metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "100ideas/schema",
"score": 2
} |
#### File: schema/py/setup.py
```python
import io
import os
import sys
import subprocess
from shutil import rmtree
from setuptools import setup, Command
HERE = os.path.abspath(os.path.dirname(__file__))
with io.open(os.path.join(HERE, 'README.md'), encoding='utf-8') as f:
long_description = '\n' + f.read()
def get_tag_version() -> str:
result = subprocess.run(['git', 'describe', '--tags', '--abbrev=0'], stdout=subprocess.PIPE, encoding='ascii')
version = result.stdout
return version[1:] if version.startswith('v') else version
class UploadCommand(Command):
"""
Support setup.py upload.
Based on, and thanks to, https://github.com/kennethreitz/setup.py/blob/master/setup.py
"""
description = 'Build and publish the package.'
user_options = []
@staticmethod
def status(s):
"""Prints things in bold."""
print('\033[1m{0}\033[0m'.format(s))
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
try:
self.status('Removing previous builds…')
rmtree(os.path.join(HERE, 'dist'))
except OSError:
pass
self.status('Building Source and Wheel (universal) distribution…')
os.system('{0} setup.py sdist bdist_wheel --universal'.format(sys.executable))
self.status('Uploading the package to PyPI via Twine…')
repo_arg = '--repository-url https://test.pypi.org/legacy/' if os.environ.get('PYPI_ENV') == 'test' else ''
os.system('twine upload {} dist/*'.format(repo_arg))
sys.exit()
setup(
name='stencila-schema',
version=get_tag_version(),
description='',
long_description=long_description,
long_description_content_type='text/markdown',
author='<NAME>',
author_email='hello<EMAIL>',
python_requires='>=3.6.0',
url='https://github.com/stencila/schema',
packages=['stencila.schema'],
install_requires=[
'astor==0.8.0'
],
extras_require={},
include_package_data=True,
license='Apache-2.0',
classifiers=[
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy'
],
cmdclass={
'upload': UploadCommand,
},
)
```
#### File: stencila/schema/__main__.py
```python
import logging
from sys import argv, stderr, stdout
from .interpreter import execute_from_cli
def cli_execute():
"""Execute an executable document, delegating to the execute_from_cli function."""
execute_from_cli(argv[2:])
def cli_compile():
"""Compile an executable document by delegating to the execute_from_cli function with the `compile_only` flag."""
execute_from_cli(argv[2:], True)
def main():
"""The main entry point to this module, read the first CLI arg and call out to the corresponding function."""
command = argv[1] if len(argv) > 1 else ''
if command == 'execute':
logging.basicConfig(stream=stdout, level=logging.DEBUG)
cli_execute()
elif command == 'compile':
logging.basicConfig(stream=stdout, level=logging.DEBUG)
cli_compile()
else:
stderr.write('Unknown command "{}"\n'.format(command))
if __name__ == '__main__':
main()
```
#### File: py/tests/test_interpreter.py
```python
import unittest.mock
from stencila.schema.code_parsing import CodeChunkExecution, CodeChunkParser
from stencila.schema.interpreter import Interpreter, execute_compilation, compile_article, DocumentCompilationResult, \
SKIP_OUTPUT_SEMAPHORE
from stencila.schema.types import CodeExpression, CodeChunk, Article
def execute_code_chunk(text: str) -> CodeChunk:
cc = CodeChunk(text)
cce = CodeChunkExecution(
cc, CodeChunkParser().parse(cc)
)
Interpreter().execute([cce], {})
return cc
def test_execute_simple_code_expression():
ce = CodeExpression('4 + 5')
Interpreter().execute([ce], {})
assert ce.output == 9
def test_execute_parameterised_code_expression():
ce = CodeExpression('p1 + p2')
Interpreter().execute([ce], {'p1': 1, 'p2': 10})
assert ce.output == 11
def test_catch_code_expression_error():
ce = CodeExpression('1 / 0')
Interpreter().execute([ce], {})
assert ce.output is None
assert ce.errors[0].kind == 'ZeroDivisionError'
assert ce.errors[0].message == 'division by zero'
assert ce.errors[0].trace is not None
@unittest.mock.patch('stencila.schema.interpreter.LOGGER')
@unittest.mock.patch('stencila.schema.interpreter.exec')
@unittest.mock.patch('stencila.schema.interpreter.eval')
def test_execute_code_chunk_without_ast(mock_eval, mock_exec, mock_logger):
"""If parsing the code fails to generate an AST then the code should not attempt to be executed."""
execute_code_chunk('invalid code')
assert mock_logger.info.called
assert mock_exec.called is False # make sure nothing is executed
assert mock_eval.called is False
def test_output_capture():
"""Output to STDOUT should be captured in the CodeChunk's outputs property."""
cc = execute_code_chunk('print(\'Hello world!\')')
assert cc.outputs == ['Hello world!\n']
def test_result_capture():
"""Variable assignment should not be captured as an output, return values from functions should (for example)."""
cc = execute_code_chunk('a = 5\ndef add_five(b):\n return b + 5\nadd_five(a)')
assert cc.outputs == [10]
def test_duration():
"""
CodeChunk execution duration should be captured. We don't want to slow down running tests so just check it's
greater than 0.
"""
cc = execute_code_chunk('for i in range(10):\n b = i + 1')
assert cc.duration > 0
def test_code_chunk_exception_capture():
"""
If an Exception occurs it should be recorded and code outputs up to that point added to outputs. The rest of the
code should not be run, although subsequent code blocks should.
"""
cc1 = CodeChunk('a = 5\na + 2\nprint(\'Goodbye world!\')\nbadref += 1\nprint(\'After exception!\')')
cc2 = CodeChunk('2 + 2\nprint(\'CodeChunk2\')')
cce1 = CodeChunkExecution(
cc1, CodeChunkParser().parse(cc1)
)
cce2 = CodeChunkExecution(
cc2, CodeChunkParser().parse(cc2)
)
Interpreter().execute([cce1, cce2], {})
assert cc1.outputs == [7, 'Goodbye world!\n']
assert cc1.errors[0].kind == 'NameError'
assert cc2.outputs == [4, 'CodeChunk2\n']
@unittest.mock.patch('stencila.schema.interpreter.DocumentCompiler')
def test_compile_article(mock_dc_class):
article = unittest.mock.MagicMock(spec=Article)
dcr = compile_article(article)
mock_dc_class.return_value.compile.assert_called_with(article)
assert mock_dc_class.return_value.compile.return_value == dcr
@unittest.mock.patch('stencila.schema.interpreter.ParameterParser')
@unittest.mock.patch('stencila.schema.interpreter.Interpreter')
def test_execute_compilation(mock_interpreter_class, mock_pp_class):
compilation_result = unittest.mock.MagicMock(spec=DocumentCompilationResult)
parameters = ['--flag', 'value']
parameter_parser = mock_pp_class.return_value
interpreter = mock_interpreter_class.return_value
execute_compilation(compilation_result, parameters)
mock_pp_class.assert_called_with(compilation_result.parameters)
parameter_parser.parse_cli_args.assert_called_with(parameters)
interpreter.execute.assert_called_with(compilation_result.code, parameter_parser.parameter_values)
def test_sempahore_skipping():
"""If decode_output returns a SKIP_OUTPUT_SEMAPHORE then it should not be added to the outputs array."""
i = Interpreter()
outputs = []
i.add_output(outputs, 'abc123')
decode_original = i.decode_output
i.decode_output = unittest.mock.MagicMock(return_value=SKIP_OUTPUT_SEMAPHORE)
i.add_output(outputs, 'skip')
i.decode_output = decode_original
i.add_output(outputs, [1, 2, 3])
assert outputs == ['abc123', [1, 2, 3]]
``` |
{
"source": "100kimch/ros_galapagos",
"score": 2
} |
#### File: galapagos_embedded/libs/lib_subcam.py
```python
import rospy
from scheduler import SCHEDULER
from sensor_msgs.msg import CompressedImage, Image
from std_msgs.msg import Header, String
# from cv_bridge import CvBridge
import sys
import cv2
import numpy as np
import time
from constants import ARRAY_D, ARRAY_K, BUF_SIZE
from operator import itemgetter
from pprint import pprint # for Debug
# from cv_bridge import CvImage, CvBridge, CvBridgeError
# * Variables
MIN_SUB = np.array([int(0 * 255 / 360), int(0 * 255 / 100), int(0 * 255 / 100)])
MAX_SUB = np.array([int(360 * 255 / 360), int(70 * 255 / 100), int(100 * 255 / 100)])
# MAX_SUB = np.array([int(360 * 255 / 360), int(14 * 255 / 100), int(100 * 255 / 100)])
CAMERA_MATRIX = np.array(ARRAY_K)
# DISTORTION_MATRIX = np.array([ -0.335978, 0.133403, 0.000953, 0.000789 ])
DISTORTION_MATRIX = np.array([ -0.335978, 0.133403, 0.000953, -0.005089 ])
IMAGE_SUB_WIDTH = 640
IMAGE_SUB_HEIGHT = 480
class Eye(dict):
""" an object to process image """
def __init__(self):
# self.has_rospy = True # ! deprecated
self.images = {}
self.mask = None
self.front_occupied = False
self.fish_occupied = False
self.sub_occupied = False
self.is_window_set = False
self.info_front = {
"state": "straight",
"center": 0,
"bias": 0,
"turning_to": "None",
"horizon_position": 0.
}
self.info_sub = {
"center": 0,
"slope": 0,
"has_line": False
}
self._buffer_sub = []
self.threshold_turning = 0
# self.threshold_tracking = 0 # ! Deprecated
self.threshold_sub_slope = 0
self.threshold_sub_center = 0
self.publishers = {}
self.publishing_names = []
# if SCHEDULER.debug_mode:
# self.publishing_names = ["sub/original", "sub/canny"]
# else:
# self.publishing_names = []
for i in self.publishing_names:
# print("publisher: ", i)
self.publishers[str(i)] = rospy.Publisher(
"/eye/" + i + "/compressed", CompressedImage, queue_size=5
)
self.calibrate()
dict.__init__(self)
def calibrate(self):
""" process to calibrate initial values """
self.threshold_turning = 260
# self.threshold_tracking = 260 # ! Deprecated
self.threshold_sub_slope = -1.78
self.threshold_sub_center = 200
rospy.logdebug("[LIB_EYE] threshold_turning set to " + str(self.threshold_turning))
# rospy.logdebug("[LIB_EYE] threshold_tracking set to " + str(self.threshold_tracking))
rospy.logdebug("[LIB_EYE] threshold_sub_slope set to " + str(self.threshold_sub_slope))
rospy.logdebug("[LIB_EYE] threshold_sub_center set to " + str(self.threshold_sub_center))
def reset_state(self, event=None):
self.info_front["state"] = "straight"
self.info_front["turning_to"] = "None"
def see_sub(self, compressed_data=None):
""" callback handler when sub image received """
self.sub_occupied = True
self.set_sub_image(compressed_data)
road_lines = self.get_road_line_by_sub()
# if road_lines is not None and len(road_lines) is not 4:
# if SCHEDULER.debug_option["warn_road_lines"]:
# rospy.logwarn("[LIB_EYE] road_lines: ")
# pprint(road_lines)
# if road_lines is None:
# self.info_fish = {
# # "len": len(road_lines),
# "left": 0,
# "right": IMAGE_FISH_WIDTH,
# "slope": 0
# }
# return self.info_fish
# rospy.logdebug("\n[EYE] SUB lines: " + str(road_lines))
if not isinstance(road_lines, list):
self.info_sub["has_line"] = False
elif len(road_lines) < 2:
self.info_sub["has_line"] = False
else:
self.info_sub["has_line"] = True
if road_lines and len(road_lines) > 1:
sum_x, sum_y = 0, 0
divider = len(road_lines) * 2
for line in road_lines:
sum_x = sum_x + line['points'][0] + line['points'][2]
sum_y = sum_y + line['points'][1] + line['points'][3]
average_x, average_y = sum_x / divider, sum_y / divider
self._buffer_sub.append(average_x)
# rospy.logdebug("\n[EYE] average: {:.2f} {:.2f}".format(average_x, average_y))
if len(self._buffer_sub) == BUF_SIZE:
self.info_sub["center"] = int(sum(self._buffer_sub) / BUF_SIZE) - self.threshold_sub_center
# self.info_sub["line_center"][0] = self.info_sub["line_center"][2] = int(sum(self._buffer_sub) / BUF_SIZE)
self._buffer_sub.pop(0)
else:
rospy.logdebug("[LIB_EYE] len(_buffer_sub): {:d}".format(len(self._buffer_sub)))
# self.info_sub["line_center"][3] = int(average_y)
# self.info_sub["line_center"][3] = 240
if road_lines:
slope = 0
for line in road_lines:
slope = slope + line['slope']
# NOTE: 1 added to slope due to the lens is biased to 45 degree
self.info_sub["slope"] = float("{:.2f}".format(
1 + (slope / len(road_lines))))
image_center = int(self.info_sub["center"] + (IMAGE_SUB_WIDTH /2))
line_center = [{
"points": [image_center, 0, image_center, 480]
}]
# self.draw_lines(line_center, self.images["sub/canny"], color="blue")
# self.draw_lines(road_lines, self.images["sub/canny"], color="blue")
# self.set_info_fish_to_image(self.images["sub/canny"])
self.publish_image()
return self.info_sub
def release_sub_occupied(self, event=None):
""" release controlling grant """
self.sub_occupied = False
def set_sub_image(self, compressed_data=None):
if compressed_data:
raw_data = np.fromstring(compressed_data.data, np.uint8)
self.images["sub"] = cv2.imdecode(raw_data, cv2.IMREAD_COLOR)
else:
rospy.logerror("no compressed image and image on folder are found")
return
self.images["sub"] = cv2.resize(
self.images["sub"], dsize=(IMAGE_SUB_WIDTH, IMAGE_SUB_HEIGHT), interpolation=cv2.INTER_AREA
)
# self.images["sub"] = cv2.undistort(self.images["sub"], CAMERA_MATRIX, DISTORTION_MATRIX)
# self.images["sub"] = self.images["sub"][:150, :]
return
def draw_lines(self, lines, to, color="green"):
""" draw lines to a canvas """
# print('lines: ', lines)
if lines is not None:
# for idx, key in enumerate(lines):
# cv2.line(to, (key['points'][0], key['points'][1]), (key['points'][2], key['points'][3]),
# (255, 30 + 60 * idx, 255), 3, cv2.LINE_AA)
idx = 60
for l in lines:
if color is "brown":
cv2.line(
to,
(l["points"][0], l["points"][1]),
(l["points"][2], l["points"][3]),
(idx*0.7, idx*0.7, 50 + idx*0.9),
cv2.LINE_AA
)
elif color is "blue":
cv2.line(
to,
(l["points"][0], l["points"][1]),
(l["points"][2], l["points"][3]),
(50 + idx*0.9, idx*0.7, idx*0.7),
cv2.LINE_AA
)
elif isinstance(l, dict):
cv2.line(
to,
(l["points"][0], l["points"][1]),
(l["points"][2], l["points"][3]),
(50, idx, 120),
3,
cv2.LINE_AA,
)
else:
cv2.line(
to,
(l[0], l[1]),
(l[2], l[3]),
(100 + idx, 30 + 60 * idx, 70),
3,
cv2.LINE_AA,
)
idx = idx + 60
def get_front_state(self):
return self.info_front["state"]
def get_turning_to(self):
return self.info_front["turning_to"]
def get_road_line_by_sub(self):
""" get line by fishcam """
adjusted = None
# NOTE: apply gamma correction and show the images
# gamma = gamma if gamma > 0 else 0.1
gamma = 1.2
contrast = 100
brightness = -70
# sub = self.images["sub"]
# self.images["sub"] = np.int16(self.images["sub"]) * (contrast/127+1) - contrast + brightness
# self.images["sub"] = np.uint8(np.clip(self.images["sub"], 0, 255))
self.images["sub"] = self.adjust_gamma(self.images["sub"], gamma=gamma)
self.images["sub/original"] = np.copy(self.images["sub"])
self.images["sub"][60:IMAGE_SUB_HEIGHT-60, 60:IMAGE_SUB_WIDTH-60] = 0
white = np.zeros(shape=[IMAGE_SUB_HEIGHT, IMAGE_SUB_WIDTH, 3], dtype=np.uint8)
white.fill(255)
self.mask = cv2.inRange(self.images["sub"], MIN_SUB, MAX_SUB)
self.images["sub_masked"] = cv2.cvtColor(
cv2.bitwise_and(white, white, mask=self.mask), cv2.COLOR_HSV2BGR
)
self.images["sub/canny"] = cv2.Canny(self.images["sub_masked"], 100, 200, None, 5)
hough_lines = cv2.HoughLinesP(
self.images["sub/canny"], 1, np.pi / 180, 30, None, 30, 5
)
# hough_lines = cv2.HoughLinesP(
# self.images["sub/canny"], 1, np.pi / 180, 10, None, 30, 25
# )
lines = []
ref_point = [320, 50]
# rospy.logdebug("\n[EYE] Hough lines: " + str(hough_lines))
if hough_lines is None:
return None
for i in range(0, len(hough_lines)):
line = hough_lines[i][0]
# if the line seems "road line", append to lines:
if abs(line[3] - line[1]) > 5 and abs(line[2] - line[0]) > 5:
if line[1] > line[3]:
line = np.asarray([line[2], line[3], line[0], line[1]])
if line[3] - line[1] == 0:
slope = 1000
else:
slope = float("%.2f" % ((line[2] - line[0]) / (line[3] - line[1])))
lines.append(
{
"slope": slope,
"points": line
# "y_intercept": line[1] / (line[0] * slope),
}
)
lines = sorted(lines, key=lambda k: k["points"][0], reverse=False)
# for idx, key in enumerate(lines):
# print(idx, key)
# print()
# print("deleted:")
idx = 0
while(idx + 2 < len(lines)):
posX, posY = lines[idx]["points"][0], lines[idx]["points"][1]
nextX, nextY = lines[idx + 1]["points"][0], lines[idx + 1]["points"][1]
nnextX, nnextY = lines[idx + 2]["points"][0], lines[idx + 2]["points"][1]
if (abs(nextX - posX) < 25) and (abs(nextY - posY) < 25):
# print(idx, posX, posY)
lines.pop(idx)
if idx > 0: idx -= 1
continue
elif (abs(nnextX - posX) < 25) and (abs(nnextY - posY) < 25):
# print(idx, posX, posY)
lines.pop(idx)
if idx > 0: idx -= 1
continue
posX, posY = lines[idx]["points"][2], lines[idx]["points"][3]
nextX, nextY = lines[idx + 1]["points"][2], lines[idx + 1]["points"][3]
nnextX, nnextY = lines[idx + 2]["points"][2], lines[idx + 2]["points"][3]
if (abs(nextX - posX) < 25) and (abs(nextY - posY) < 25):
# print(idx, posX, posY)
lines.pop(idx)
if idx > 0: idx -= 1
continue
elif (abs(nnextX - posX) < 25) and (abs(nnextY - posY) < 25):
# print(idx, posX, posY)
lines.pop(idx)
if idx > 0: idx -= 1
continue
idx += 1
# operation for checking [N-1]th element
if idx + 1 < len(lines):
posX, posY = lines[idx]["points"][0], lines[idx]["points"][1]
nextX, nextY = lines[idx + 1]["points"][0], lines[idx + 1]["points"][1]
if (abs(nextX - posX) < 25) and (abs(nextY - posY) < 25):
# print(idx, posX, posY)
lines.pop(idx)
else:
posX, posY = lines[idx]["points"][2], lines[idx]["points"][3]
nextX, nextY = lines[idx + 1]["points"][2], lines[idx + 1]["points"][3]
if (abs(nextX - posX) < 25) and (abs(nextY - posY) < 25):
# print(idx, posX, posY)
lines.pop(idx)
return lines
def set_center(self, set_left, set_right):
if set_right and set_left:
rospy.Timer(rospy.Duration(0.5), self.reset_state, oneshot=True, reset=False)
# NOTE: [1] to select column y
set_right = np.array([key["points"][1] for key in set_right])
set_left = np.array([key["points"][1] for key in set_left])
# print(f"set_right: {set_right}")
center = self.info_front["bias"] + (sum(set_right) / len(set_right)) - (sum(set_left) / len(set_left))
elif self.threshold_turning > self.info_front["horizon_position"]:
if set_right:
center = 1000
elif set_left:
center = -1000
else:
center = 0
else:
center = 0
# if self.has_rospy:
# # rospy.logdebug("frontcam center: " + str(center))
# pass
# else:
# print("frontcam center: ", str(center))
self.info_front["center"] = center
def set_info_to_image(self, image):
self.put_text(image, self.info_front['turning_to'] + " "
+ self.info_front['state'] + " "
+ "horizon: {:.1f} ".format(self.info_front['horizon_position'])
+ "center: {:.1f}".format(self.info_front["center"])
, 40, 30)
# self.put_text(image, f"center: {self.info_front['center']: .2f} turning_to: {self.info_front['turning_to']} horizon_pos: {self.info_front['horizon_position']}", 40, 30)
def set_info_fish_to_image(self, image):
if self.info_fish['left'] is 0:
self.put_text(image, " right: {:d}".format(self.info_fish['right'])
+ " slope: {:.1f}".format(self.info_fish['slope'])
, 100, 50)
elif self.info_fish['right'] is IMAGE_FISH_WIDTH:
self.put_text(image, "left: {:d}".format(self.info_fish['left'])
+ " slope: {:.1f}".format(self.info_fish['slope'])
, 200, 50)
def adjust_gamma(self, image, gamma=1.0):
# NOTE: build a lookup table mapping the pixel values [0, 255] to
# their adjusted gamma values
invGamma = 1.0 / gamma
table = np.array(
[((i / 255.0) ** invGamma) * 255 for i in np.arange(0, 256)]
).astype("uint8")
# NOTE: apply gamma correction using the lookup table
return cv2.LUT(image, table)
def get_horizon(self):
hough_lines = cv2.HoughLinesP(
cv2.cvtColor(self.images["front/canny"], cv2.COLOR_BGR2GRAY), 1, np.pi / 180, 40, None, 50, 5
)
# print('hough_lines:', len(hough_lines))
lines = []
if hough_lines is None:
return None
for idx, _ in enumerate(hough_lines):
line = hough_lines[idx][0]
if abs(line[3] - line[1]) < 3:
if line[0] > line[2]:
line = np.asarray([line[2], line[3], line[0], line[1]])
lines.append(line)
lines = sorted(lines, key=lambda k: k[3], reverse=True)
horizons = []
line_set = []
ref_line = []
for line in lines:
if len(ref_line) is 0:
ref_line = line
line_set.append(line)
continue
if abs(ref_line[1] - line[1]) > 20:
horizons.append(np.array([ref_line[0], ref_line[1], ref_line[2], ref_line[1]]))
# min_x, max_x = 1000, 0
line_set = []
ref_line = line
line_set.append(line)
if line_set:
horizons.append(np.array([ref_line[0], ref_line[1], ref_line[2], ref_line[1]]))
line_set = []
# print(f"horizons: {horizons}")
return horizons
def set_horizon_position(self, horizons):
# Sum of x ot two_points
pos_x = np.array([val[0] + val[2] / 2 for val in horizons])
pos_y = np.array([val[1] for val in horizons])
average_x = sum(pos_x) / len(pos_x)
average_y = sum(pos_y) / len(pos_y)
closest_y = horizons[0][1]
# print(f"average_x: {average_x} average_y: {average_y}")
if average_x < 100:
turning_to = "left"
elif average_x > IMAGE_WIDTH - 100:
turning_to = "right"
else:
turning_to = self.info_front["turning_to"]
# turning_to = "None"
self.info_front["horizon_position"] = closest_y
# self.info_front["horizon_position"] = average_y
self.info_front["turning_to"] = turning_to
def show_image(self):
""" NOTE: Deprecated soon """
for idx, name in enumerate(self.publishing_names):
cv2.imshow(name, self.images[name])
if not self.is_window_set:
# cv2.moveWindow(name, -650 * ((idx >> 1) % 2 + 1), 550 * (idx % 2))
cv2.moveWindow(name, 650 * ((idx >> 1) % 2 + 1), 550 * (idx % 2))
self.is_window_set = True
# cv2.imshow("original", self.images["front/original"])
# cv2.imshow("result", self.images["front/result"])
# cv2.imshow("canny", self.images["front/canny"])
# cv2.imshow("canny_edited", self.images["canny2"])
# # cv2.imshow('fill', self.images['fill'])
# cv2.moveWindow("original", -1300, 0)
# cv2.moveWindow("result", -650, 0)
# cv2.moveWindow("canny", -1300, 550)
# cv2.moveWindow("canny_edited", -650, 550)
# # cv2.moveWindow('fill', -1400, 1100)
# # cv2.imshow('model', image_opening)
while 1:
key = cv2.waitKey(1000)
if key == 27:
sys.exit()
elif key == -1:
continue
elif key == 97: # 'a'
return -1
else:
return 1
# time.sleep(3)
# cv2.destroyAllWindows()
def publish_image(self):
""" publish images by publishing_names """
msg = CompressedImage()
msg.header.stamp = rospy.Time.now()
msg.format = "jpeg"
for str in self.publishing_names:
if not str in self.images: continue
msg.data = np.array(cv2.imencode('.jpg', self.images[str])[1]).tostring()
self.publishers[str].publish(msg)
def put_text(self, image, text, pos_x, pos_y):
cv2.putText(
image,
text,
(pos_x, pos_y),
cv2.FONT_HERSHEY_SIMPLEX,
0.7,
(255, 255, 255),
2,
)
def get_flooded(self, image, point):
# print(f"image.shape: ", image.shape)
h, w = image.shape
if point is None:
seed = (320, 240)
else:
seed = (int(point[0]), int(point[1]))
mask = np.zeros((h + 2, w + 2), np.uint8)
mask[:] = 0
floodflags = 4
floodflags |= cv2.FLOODFILL_MASK_ONLY
floodflags |= (255 << 8)
num, im, mask, rect = cv2.floodFill(
image, mask, seed, (100, 50, 50), (10,) * 3, (10,) * 3, floodflags
)
return mask[1:-1, 1:-1]
# print(mask)
# white = np.zeros(shape=[IMAGE_HEIGHT, IMAGE_WIDTH, 3], dtype=np.uint8)
# # white[:] = (255, 255, 255)
# white.fill(255)
# mask = cv2.inRange(mask, MIN, MAX)
# # _masked = cv2.cvtColor(cv2.bitwise_and(
# # image, image, mask=mask), cv2.COLOR_HSV2BGR)
# self.images['fill'] = cv2.bitwise_and(
# white, white, mask=mask
# )
# cv2.imshow('images', flooded)
# cv2.imwrite("test.png", mask)
def is_boostable(self, image):
# TODO: make algorighms if boostable
return False
def is_sub_occupied(self):
return self.sub_occupied
EYE = Eye()
```
#### File: galapagos_embedded/scripts/viewer.py
```python
import rospy
from geometry_msgs.msg import Twist
from lib_eye import EYE
from lib_lidar import *
from scheduler import SCHEDULER
import math
# * Variables
IS_RUNNING = True
BUF_ANGULAR = [0, 0]
# * Methods
def initialize():
""" initialize processing """
EYE.calibrate()
def view_subcam(image):
""" process the subcam image """
global DIRECTION
global BUF_ANGULAR
if not SCHEDULER.is_enable["subcam"]:
return
if EYE.is_sub_occupied():
return
if SCHEDULER.debug_option["show_timer"]:
SCHEDULER.check_time("subcam", min=0.3)
info = EYE.see_sub(image)
# print(EYE.get_front_state() + " " + str(EYE.get_front_state()))
if info is None:
rospy.logwarn("[PROC] No Information!")
return
elif False:
# elif EYE.get_front_state() is "turning":
# rospy.logdebug("[PROC] turning...")
center = info["center"]
slope = info["slope"]
# left: + right: -
# if slope > 0:
# weight_slope = pow(abs(slope) / 1.8, 0.9) * 2.6
# else:
# weight_slope = - pow(abs(slope) / 1.8, 0.9) * 2.6
# # weight_slope = - pow(abs(slope) / 1.8, 0.5) * 2.5
if slope > 0:
value = pow(abs(slope) / 1.8, 1.2) * 3.2
else:
value = - pow(abs(slope) / 1.8, 1.2) * 3.2
# if slope > 0:
# weight_center = pow(abs(center) / 250, 0.9) * 5.5
# elif slope < -0:
# weight_center = - pow(abs(center) / 250, 0.9) * 5.5
# else:
# weight_center = 0
# # weight_center = slope * 1
if value > 2.6:
value = 2.6
elif value < -2.6:
value = -2.6
degree = value
# BUF_ANGULAR.append(value)
# past_val = BUF_ANGULAR.pop(0) * 0.7
# if info["has_line"]:
# if (value * past_val >= 0) and (abs(value) > abs(past_val)):
# degree = value
# else:
# degree = 0
# BUF_ANGULAR = [0] * BUF_SIZE
# else:
# if value > 0:
# degree = 2.7
# elif value < 0:
# degree = -2.7
# else:
# degree = 0
# if not info["has_line"]:
# if (slope < 0):
# degree = 4.12
# else:
# degree = -4.12
if SCHEDULER.debug_option["show_center_slope"]:
rospy.logdebug(
"[PROC] slope: {:.2f} w_slope: {:.2f} degree: {:.2f} {}".format(
slope, value, degree, info["has_line"])
)
elif EYE.get_front_state() is "straight":
# rospy.logdebug("[PROC] going straight...")
center = info["center"]
if center < 0:
value = pow(abs(center) / 150, 0.9) * 2
elif center > 0:
value = - pow(abs(center) / 150, 0.9) * 2
else:
value = 0
if value > 1.5:
value = 1.5
elif value < -1.5:
value = -1.5
degree = value
if SCHEDULER.debug_option["show_center_slope"]:
rospy.logdebug(
"[PROC] center: {:.2f} w_center: {:.2f} {}".format(
center, degree, info["has_line"])
)
rospy.Timer(
rospy.Duration(0.14), EYE.release_sub_occupied, oneshot=True
)
# TURTLE.turn("", 0.13, degree)
EYE.release_sub_occupied()
def view_frontcam(image):
""" process the frontcam """
# rospy.loginfo("[VIEW] frontcam image received.")
if not EYE.is_front_occupied():
if SCHEDULER.debug_option["show_center_slope"]:
SCHEDULER.check_time("frontcam", min=0.4)
info = EYE.see_front(image)
if info is None:
return
# rospy.logdebug("info: {:s}".format(str(info)))
if info["center"] is "-1000" or info["center"] is "1000" or info["center"] is "0":
pass
else:
EYE.reset_state()
rospy.Timer(rospy.Duration(0.1),
EYE.release_front_occupied, oneshot=True)
def view_lidar(lidar_data):
""" process the lidar data """
set_lidar_values(lidar_data)
rospy.logdebug("left: {:.2f} right: {:.2f} front: {:.2f}".format(
get_object_distance("left"), get_object_distance("right"), get_object_distance("front")))
def view_speed(twist):
global IS_RUNNING
if twist.linear.x == 0 and twist.angular.x == 0:
IS_RUNNING = False
else:
# rospy.logdebug("[VIEW] speed: {:.2f}".format(twist.linear.x))
IS_RUNNING = True
def calibrate_subcam(image):
""" calibrate the subcam """
info = EYE.see_sub(image)
rospy.loginfo("info: {:s}".format(str(info)))
rospy.signal_shutdown("[VIEW] ended calibration")
```
#### File: galapagos_lite/scripts/processor.py
```python
import rospy
from lib_eye import EYE
from lib_frontcam import *
from lib_lidar import *
from scheduler import SCHEDULER
from turtlebot import TURTLE
import timeit
import math
import requests
# * Variables
LANE_TO = "left"
STEP = 0
BUF_SIZE = 3
BUF_ANGULAR = [0] * BUF_SIZE
TEST_STATE = False
NUM_OBSTRUCTION = 0
def initialize():
""" initialize processing """
EYE.calibrate()
init_ref_images()
def reset_buffer():
global BUF_ANGULAR
BUF_ANGULAR = [0] * BUF_SIZE
def reverse_lane():
global LANE_TO
if LANE_TO == "right":
LANE_TO = "left"
else:
LANE_TO = "right"
def process_frontcam(image):
""" process the frontcam image """
if not SCHEDULER.is_frontcam_enable():
return
state = SCHEDULER.get_state()
info = EYE.see_front(image)
if SCHEDULER.debug_option["show_front_info"]:
rospy.logdebug(info)
if state == "default":
if EYE.is_boostable(image):
process_acceleration(info)
# return
# SCHEDULER.set_state("to_intersection")
# signal = is_construction(image)
# rospy.logdebug(signal)
# if is_construction(image):
# TURTLE.boost()
# SCHEDULER.set_state("construction")
return
if state == "traffic_light":
if is_light_green(image):
TURTLE.enable()
SCHEDULER.set_state("to_intersection")
return
# NOTE: temporary settings:
if state == "to_intersection":
# rospy.Timer(rospy.Duration(35), SCHEDULER.enable_lidar, oneshot=True)
SCHEDULER.set_state("intersection_left")
# if state == "to_intersection":
# signal = check_left_right_sign(image)
# if signal == "right":
# SCHEDULER.set_state("intersection_right")
# elif signal == "left":
# SCHEDULER.set_state("intersection_left")
# return
if state == "intersection_right":
# TODO: make algorithms for right
if EYE.is_boostable(image):
TURTLE.boost()
SCHEDULER.set_state("to_construction")
return
# NOTE: temporary settings:
if state == "intersection_left":
# if EYE.is_boostable(image):
# TURTLE.boost()
# SCHEDULER.set_state("to_construction")
return
if state == "to_construction":
# if EYE.is_boostable(image):
# TURTLE.boost()
EYE.check_yellow = True
# if is_construction(image):
# SCHEDULER.set_state("construction_searching")
if state == "construction_searching":
pass
def process_acceleration(info):
if not info:
return
horizon = info['horizon_position']
center = abs(info['center'])
if horizon < 150:
return
if EYE.get_front_state() == "straight":
if horizon < 280 and center < 50:
# TURTLE.boost()
TURTLE.set_speed_smooth('fast')
if SCHEDULER.debug_option["show_front_info"]:
rospy.logwarn("[PROC] Boosting")
elif horizon < 300 and center < 150:
TURTLE.set_speed_smooth('normal')
if horizon > 280 or info['state'] == 'turning':
TURTLE.set_speed_smooth('normal')
elif horizon > 310:
TURTLE.set_speed('normal')
def process_subcam(image):
""" process the subcam image """
if not SCHEDULER.is_subcam_enable():
return
if SCHEDULER.is_subcam_occupied() or TURTLE.is_occupied():
return
info = EYE.see_sub(image)
if SCHEDULER.debug_option["show_timer"]:
# Check delay only if has line
# SCHEDULER.check_time("subcam", min=0.28, stop_when_delay=info["has_line"])
SCHEDULER.check_time("subcam", min=0.28, stop_when_delay=False)
if info is None:
rospy.logwarn("[PROC] No Information!")
return
center = info["center"]
slope = info["slope"]
if slope < -0.5:
limit = 1.6
amplitude = 1.0
else:
limit = 1.2
amplitude = 0.8
limit /= 1.9
# amplitude /= 2
state = SCHEDULER.get_state()
if (EYE.get_front_state() == "straight") and (state is not "zigzag"):
if (abs(center) < 30) and slope < -0.4:
degree = pow(abs(slope) / 1.8, 1.1) * amplitude
elif center < 0:
degree = pow(abs(center) / 100, 2.0) * amplitude / 2
elif center > 0:
degree = - pow(abs(center) / 100, 2.0) * amplitude
else:
degree = 0
else:
if slope < 0:
degree = - pow(abs(slope) / 1.8, 2.9) * amplitude * 28.0
else:
degree = pow(abs(slope) / 1.0, 1.2) * amplitude * 4.0
# degree = pow(abs(slope) / 1.8, 0.9) * amplitude
# elif center < 0:
# degree = pow(abs(center) / 100, 1.9) * amplitude
# elif center > 0:
# degree = - pow(abs(center) / 100, 1.9) * amplitude
# else:
# degree = 0
buf_sum = sum(BUF_ANGULAR)
if EYE.get_front_state() == "straight":
adjust_angular = BUF_ANGULAR.pop(0) * 0.9
BUF_ANGULAR.append(degree)
degree -= adjust_angular
# if abs(buf_sum) > 1:
else:
reset_buffer()
adjust_angular = 0
degree = max(min(degree, limit), -limit)
if not info["has_line"]:
if center > 200:
# degree = -1.2
degree = -1.1 # For enhancing frequency
# elif EYE.get_front_state() == "straight":
# degree = 0.6
else:
# degree = 1.4
degree = 1.3 # For enhancing frequency
# if not info["has_line"]:
# if center < -55:
# # degree = 1.6
# degree = 1.4 # For slow speed
# elif center > 50:
# degree = -1.2
# # degree = -1.2 # For slow speed
# # elif center > 19:
# # degree = -1.2
# else:
# degree = 1.4
if SCHEDULER.debug_option["show_center_slope"]:
rospy.logdebug(
"[PROC] center: {:.2f} slope: {:.2f} degree: {:.2f} adj: {:.2f} buf_sum: {:.2f} {} {}".format(
center, slope, degree, adjust_angular, buf_sum, EYE.get_front_state(), info["has_line"])
)
rospy.Timer(
rospy.Duration(0.15), SCHEDULER.release_subcam_occupied, oneshot=True
)
TURTLE.turn(0.13, degree)
def process_lidar(lidar_data):
""" process the lidar image """
if not SCHEDULER.is_lidar_enable():
return
if SCHEDULER.is_lidar_occupied() or TURTLE.is_occupied():
return
state = SCHEDULER.get_state()
if SCHEDULER.debug_option["show_timer"] and (state != "construction"):
SCHEDULER.check_time("lidar", min=0.4, stop_when_delay=False)
set_lidar_values(lidar_data)
state = SCHEDULER.get_state()
front = get_object_distance("front")
if (front < 0.15) and (front > 0):
TURTLE.stop()
return
if state is "default":
leftside = get_object_distance("leftside")
print(leftside)
if (leftside < 0.35) and (leftside > 0):
rospy.Timer(
rospy.Duration(5), SCHEDULER.release_lidar_occupied, oneshot=True
)
SCHEDULER.set_state("to_construction")
return
elif state is "to_construction":
leftside = get_object_distance("leftside")
print("to_construction: " + str(leftside))
if (leftside < 0.35) and (leftside > 0):
SCHEDULER.set_state("construction")
return
# rospy.Timer(
# rospy.Duration(0.15), SCHEDULER.release_lidar_occupied, oneshot=True
# )
# process_construction()
elif state is "construction":
process_construction()
elif state is "parking":
process_parking()
def process_parking():
""" process parking state """
global STEP
# if TURTLE.is_occupied():
# return
if STEP == 10:
frontleft = get_object_distance("frontleft")
frontright = get_object_distance("frontright")
left = get_object_distance("left")
right = get_object_distance("right")
if SCHEDULER.debug_option["show_parking_lidar"]:
rospy.logdebug("frontright: {:.2f} frontleft: {:.2f}".format(
frontright, frontleft
))
if (frontleft > 0) and (frontleft < 1.0):
STEP = 11
rospy.logdebug("[PROC] STEP changed to {}".format(STEP))
elif (frontright > 0) and (frontright < 1.0):
STEP = 12
rospy.logdebug("[PROC] STEP changed to {}".format(STEP))
if (left > 0) and (left < 0.5):
STEP = 13
rospy.logdebug("[PROC] STEP changed to {}".format(STEP))
elif (right > 0) and (right < 0.5):
STEP = 14
rospy.logdebug("[PROC] STEP changed to {}".format(STEP))
# NOTE: return is needed to prevent executing STEP += 1
return
elif STEP == 11:
SCHEDULER.disable_cams()
TURTLE.set_speed("normal")
TURTLE.go_forward(2.5)
TURTLE.go_turn("right", 2)
STEP = 15
rospy.logdebug("[PROC] STEP changed to {}".format(STEP))
return
elif STEP == 12:
# Edit HERE
return
elif STEP == 13:
# Edit HERE
return
elif STEP == 14:
SCHEDULER.disable_cams()
TURTLE.set_speed("normal")
TURTLE.go_turn("left", angular=1.8, duration=1.2)
STEP = 15
rospy.logdebug("[PROC] STEP changed to {}".format(STEP))
return
elif STEP == 15:
# Edit HERE
return
elif STEP == 16:
# Edit HERE
return
elif STEP == 17:
# Edit HERE
return
elif STEP == 18:
# Edit HERE
return
elif STEP == 19:
TURTLE.set_speed("fast")
SCHEDULER.set_state("zigzag")
else:
return
STEP += 1
def process_construction():
""" process construction state """
global STEP
global NUM_OBSTRUCTION
global LANE_TO
global BUF_ANGULAR
global BUF_SIZE
if TURTLE.is_occupied():
return
if STEP == 0:
# TURTLE.set_speed("normal")
leftside = get_object_distance("leftside")
left = get_object_distance("left")
if leftside > 0:
rospy.logdebug("[PROC] LIDAR LEFTSIDE: {}".format(
leftside))
if (leftside > 0) and (leftside < 0.50) and (left > 1.00):
# EYE.check_yellow = False
SCHEDULER.set_state("construction")
rospy.loginfo("[PROC] construction state started.")
STEP = 1
rospy.logdebug("[PROC] STEP changed to {}".format(STEP))
TURTLE.go_forward(3.5)
return
else:
return
elif STEP == 1:
TURTLE.set_speed("normal")
TURTLE.set_speed_smooth("slow")
TURTLE.turn(0.13, 0)
left = get_object_distance("left")
leftback = get_object_distance("leftback")
rospy.logdebug("[PROC] LIDAR LEFT: {:.2f} LEFTBACK: {:.2f}}".format(left, leftback))
if (left > 0) and (left < 0.50):
return
else:
TURTLE.set_speed("slow")
if (leftback > 0.5):
# TURTLE.go_forward(2.5)
STEP = 3
rospy.logdebug("[PROC] STEP changed to {}".format(STEP))
return
elif STEP == 2:
# TODO: write code for first left lane
pass
elif STEP == 3:
TURTLE.set_speed("normal")
TURTLE.set_speed_smooth("stop")
front = get_object_distance("front")
right_biased = get_object_distance("right_biased")
if (front > 1.0) and (right_biased < 1.0) and (right_biased > 0.0):
print("passed", front, right_biased)
print("BUF_SIZE: ", BUF_SIZE)
BUF_SIZE = 15
reset_buffer()
TURTLE.set_speed_smooth("slow")
pass
elif front * right_biased == 0:
return
else:
TURTLE.set_speed("stop")
print("turning...", front, right_biased)
TURTLE.turn(0.13, 1.0)
# rospy.sleep(rospy.Duration(5.0))
return
# TURTLE.go_turn("left")
elif STEP == 4:
right_biased = get_object_distance("right_biased")
left_biased = get_object_distance("left_biased")
front = get_object_distance("front")
if right_biased == 0.0:
right_biased = 3.0
if left_biased == 0.0:
left_biased = 3.0
if front == 0.0:
front = 3.0
elif front < 0.2:
TURTLE.set_speed_smooth("stop")
else:
TURTLE.set_speed_smooth("slow")
min_distance = min(right_biased, left_biased)
degree = 0
if (front < 1.0):
degree += max(pow(1.0 - front, 2), 0)
else:
degree += max(0.5 - min_distance, 0) * 3
# if min_distance < 0.5:
# degree += max((0.5 - min_distance), 0) * 1.5
# elif (min_distance > 1.0) and (min_distance < 3.0):
# degree = 0.2
if (left_biased == min_distance) and (min_distance < 0.5):
degree *= -1
# max_distance = max(right_biased, left_biased)
# if (left_biased == max_distance):
# degree *= -1
# degree = 0
# if min_distance > 0 and min_distance < 0.5:
# if right_biased > left_biased:
# degree = (0.5 - min_distance) * (-7)
# LANE_TO = "right"
# elif right_biased < left_biased:
# degree = (0.5 - min_distance) * (7)
# LANE_TO = "left"
# if is_left_crashable():
# degree = -1.7
# elif is_right_crashable():
# degree = 1.7
degree *= 3
degree = max(min(degree, 2.0), -2.0)
BUF_ANGULAR.append(degree)
degree -= BUF_ANGULAR.pop(0)
print("BUF_ANGULAR:", BUF_ANGULAR)
# if degree != 0:
# BUF_ANGULAR.append(degree)
# elif len(BUF_ANGULAR) > 9:
# STEP = 5
if SCHEDULER.debug_option["show_construction_lidar"]:
rospy.logdebug("[PROC] r_based: {:.2f} l_based: {:.2f} min: {:.2f} front: {:.2f} deg: {:.2f}"
.format(right_biased, left_biased, min_distance, front, degree))
TURTLE.turn(0.13, degree)
return
elif STEP == 5:
print("[StEP 5]")
if len(BUF_ANGULAR) > 0:
TURTLE.turn(0.13, -BUF_ANGULAR.pop(0))
return
else:
front = get_object_distance("front")
print(front)
# if (front > 0) and (front < 1.0):
# if LANE_TO == "right":
# TURTLE.turn(0.13, 0.8)
# else:
# TURTLE.turn(0.13, -0.8)
# return
# else:
if NUM_OBSTRUCTION < 1:
NUM_OBSTRUCTION += 1
STEP = 4
return
elif STEP == 6:
TURTLE.go_forward(1)
TURTLE.set_speed("normal")
TURTLE.go_turn("left")
TURTLE.set_speed("normal")
TURTLE.set_speed("fast")
TURTLE.set_speed_smooth("normal")
TURTLE.go_forward(5)
STEP = 10
SCHEDULER.set_state("parking")
BUF_SIZE = 3
reset_buffer()
return
STEP += 1
rospy.logdebug("[PROC] STEP changed to {}".format(STEP))
# def process_construction():
# """ process construction state """
# global STEP
# global NUM_OBSTRUCTION
# global LANE_TO
# if TURTLE.is_occupied():
# return
# if STEP == 0:
# leftside = get_object_distance("leftside")
# left = get_object_distance("left")
# # front = get_object_distance("front")
# print(left)
# if leftside > 0:
# rospy.logdebug("[PROC] LIDAR LEFTSIDE: {}".format(
# leftside))
# if (leftside > 0) and (leftside < 0.40) and (left > 1.00):
# STEP = 1
# rospy.logdebug("[PROC] STEP changed to {}".format(STEP))
# # SCHEDULER.disable_cams()
# SCHEDULER.set_state("construction")
# rospy.loginfo("[PROC] construction state started.")
# TURTLE.go_forward(3.0)
# # rospy.sleep(rospy.Duration(0.5))
# return
# else:
# return
# # elif (left > 0) and (left < 1.5):
# # STEP = 2
# # rospy.logdebug("[PROC] STEP changed to {}".format(STEP))
# # return
# # rospy.sleep(rospy.Duration(2))
# elif STEP == 1:
# TURTLE.set_speed("normal")
# TURTLE.set_speed_smooth("slow")
# left = get_object_distance("left")
# if left > 0:
# rospy.logdebug("[PROC] LIDAR LEFT: {}".format(
# left))
# if (left < 0.50) or (left > 1.5):
# return
# else:
# STEP = 3
# rospy.logdebug("[PROC] STEP changed to {}".format(STEP))
# return
# elif STEP == 2:
# # TODO: write code for first left lane
# pass
# elif STEP == 3:
# TURTLE.go_turn("left")
# LANE_TO = "left"
# elif STEP == 4:
# TURTLE.set_speed("normal")
# reverse_lane()
# biased = get_object_distance(LANE_TO + "_biased")
# if biased > 0:
# rospy.logdebug("[PROC] LIDAR {:s}_BIASED: {}"
# .format(LANE_TO, biased))
# reverse_lane()
# if (biased == 0) or (biased > 0.35):
# return
# TURTLE.go_turn(LANE_TO, duration=0.5, angular=1.8)
# TURTLE.set_speed("normal")
# # TURTLE.set_speed("fast"
# # if LANE_TO is "left":
# # TURTLE.turn(0.13, 1.3, consuming_time=1.5)
# # else:
# # TURTLE.turn(0.13, -1.3, consuming_time=1.5)
# rospy.sleep(rospy.Duration(1.0))
# reverse_lane()
# elif STEP == 5:
# TURTLE.go_turn(LANE_TO, duration=0.5, angular=1.8)
# TURTLE.set_speed("normal")
# # TURTLE.set_speed("fast")
# # if LANE_TO is "left":
# # TURTLE.turn(0.13, 1.3, consuming_time=1.5)
# # else:
# # TURTLE.turn(0.13, -1.3, consuming_time=1.5)
# NUM_OBSTRUCTION += 1
# if NUM_OBSTRUCTION < 2:
# STEP = 4
# return
# elif STEP == 6:
# TURTLE.go_forward(2.5)
# rospy.sleep(rospy.Duration(0.6))
# elif STEP == 7:
# TURTLE.go_turn("left")
# elif STEP == 8:
# TURTLE.set_speed("fast")
# TURTLE.set_speed_smooth("normal")
# TURTLE.go_forward(5)
# rospy.sleep(rospy.Duration(0.5))
# elif STEP == 9:
# # NOTE: turn to parking step
# STEP = 10
# SCHEDULER.set_state("parking")
# else:
# return
# STEP += 1
# rospy.logdebug("[PROC] STEP changed to {}".format(STEP))
initialize()
```
#### File: galapagos/scripts/process_front_image_new.py
```python
from lib_line_tracing import *
from lib_signal_recognition import *
from lib_lidar import *
from turtlebot import TURTLE
# * Variables
CURRENT_STATE = 'traffic_light'
MOVING_POSITION = False
SEEN_PARKING_SIGN = False
SEEN_TUNNEL_SIGN = False
IS_IN_TUNNEL = False
HAS_OBJECT_IN_50 = False
HAS_OBJECT_IN_20 = False
straight_cnt = 0
curving_cnt = 0
# * Methods
def reset_front_image_flags():
global MOVING_POSITION
global SEEN_PARKING_SIGN
global SEEN_TUNNEL_SIGN
global HAS_OBJECT_IN_FRONT
MOVING_POSITION = False
SEEN_PARKING_SIGN = False
SEEN_TUNNEL_SIGN = False
SEEN_STOPPING_SIGN = False
def process_front_image(image):
""" process the image of raspicam """
global CURRENT_STATE
global MOVING_POSITION
global SEEN_PARKING_SIGN
global SEEN_TUNNEL_SIGN
raw_data = np.fromstring(image.data, np.uint8)
cv_img = cv2.imdecode(raw_data, cv2.IMREAD_COLOR)
#### ROI SETTING ######
blob_ROI = cv_img[100:, :]
#######################
if CURRENT_STATE == 'traffic_light':
if is_light_green(cv_img):
TURTLE.enable()
#TURTLE.set_speed('fast')
CURRENT_STATE = 'intersection'
return
else:
return
if CURRENT_STATE == 'intersection':
if is_intersection(cv_img):
TURTLE.set_weight(0.7)
CURRENT_STATE = 'left_or_right'
return
else:
return
if CURRENT_STATE == 'left_or_right':
tmp_state = check_left_right_sign(blob_ROI)
if tmp_state == 'right':
#print("11tmp state: ",tmp_state, ", right cnt: ",inter_right_cnt)
TURTLE.LINE_BASE = 2
#print("11tmp state: ",tmp_state, ", right cnt: ",inter_right_cnt)
CURRENT_STATE = 'inter_curving'
TURTLE.set_weight(1.0)
elif tmp_state == 'left':
#print("11tmp state: ",tmp_state, ", left cnt: ",inter_left_cnt)
TURTLE.LINE_BASE = 1
#print("22tmp state: ",tmp_state, ", left cnt: ",inter_left_cnt)
CURRENT_STATE = 'inter_curving'
TURTLE.set_weight(1.0)
elif tmp_state == 'none':
return
if CURRENT_STATE == 'inter_curving':
global straight_cnt
if abs(TURTLE.weight*TURTLE._angular) < 0.1:
straight_cnt += 1
if straight_cnt > 3 :
straight_cnt = 0
TURTLE.LINE_BASE = 3
CURRENT_STATE = 'construct_recog'
return
else:
return
else:
return
if CURRENT_STATE == 'construct_recog':
tmp_state = is_construction(blob_ROI)
if tmp_state is True:
TURTLE.LINE_BASE = 2
CURRENT_STATE = 'construction_ready'
else:
return
if CURRENT_STATE == 'construction_ready':
global curving_cnt
if abs(TURTLE.weight*TURTLE._angular) > 0.25 :
curving_cnt += 1
if curving_cnt > 5 :
curving_cnt = 0
CURRENT_STATE = 'construction'
TURTLE.enable_fish = False
else:
return
else:
return
if CURRENT_STATE == 'construction':
'''
task for Ji-hyung
'''
'''
if CURRENT_STATE == 'stop_sign':
if stop_sign_flag == False:
sign_stop = is_stopping_sign(image)
if sign_stop == True:
stop_sign_flag = True
return
else:
return
else:
sign_stop = is_stopping_sign(image)
if sign_stop == False:
stop_false_cnt = stop_false_cnt + 1
if stop_false_cnt > 6 :
CURRENT_STATE = 'construction'
return
else:
return
else:
stop_false_cnt = 0
return
if CURRENT_STATE == 'construction':
TURTLE.set_speed("slow")
if HAS_OBJECT_IN_20:
TURTLE.turn(MOVING_POSITION, 15, 3)
if MOVING_POSITION == 'left':
MOVING_POSITION = 'right'
else:
MOVING_POSITION = 'left'
return
else:
if has_crossing_line(image):
if MOVING_POSITION == 'left':
moving_to = 'right'
else:
moving_to = 'left'
TURTLE.turn(moving_to, 15, 3)
else:
# TODO: combine trace_line() + trace_blocking()
if get_num_of_lines(image) == 2:
CURRENT_STATE = 'normal'
else:
return
if CURRENT_STATE == 'parking':
# TODO: finish code of parking state
TURTLE.set_speed('normal')
return
if CURRENT_STATE == 'blocking_bar':
# TODO: finish code of blocking_bar state
TURTLE.set_speed('normal')
return
if CURRENT_STATE == 'tunnel':
# TODO: finish code of tunnel state
TURTLE.set_speed('normal')
return
# ending the normal state:
if CURRENT_STATE == 'normal':
reset_front_image_flags()
TURTLE.set_speed('fast')
return
'''
def process_fish_image(image):
""" process the fisheye lens image """
trace_line(image)
if CURRENT_STATE == 'intersection':
if sign_intersection == 'left':
TURTLE.set_speed_by_percentage(0.5)
TURTLE.set_angular(TURTLE._angular + 0.2)
elif sign_intersection == 'right':
TURTLE.set_speed_by_percentage(0.5)
TURTLE.set_angular(TURTLE._angular - 0.2)
elif CURRENT_STATE == 'stop_sign':
if left_detected > right_detected :
TURTLE.set_speed_by_percentage(0.5)
TURTLE.set_angular(TURTLE._angular + 0.2)
elif right_detected < left_detected :
TURTLE.set_speed_by_percentage(0.5)
TURTLE.set_angular(TURTLE._angular - 0.2)
TURTLE.move()
def process_usbcam_image(compressed_image):
""" process the image of usb webcam """
return
def process_lidar(lidar_data):
""" process the lidar data """
global HAS_OBJECT_IN_50
global HAS_OBJECT_IN_20
global IS_IN_TUNNEL
HAS_OBJECT_IN_50 = has_object(lidar_data, 50)
HAS_OBJECT_IN_20 = has_object(lidar_data, 20)
IS_IN_TUNNEL = is_in_tunnel(lidar_data)
def test_line_tracing(image):
trace_line(image)
# * Initialization
reset_front_image_flags()
```
#### File: galapagos/scripts/processor.py
```python
import rospy
# NOTE: python 3.5^ needed to use asyncio
import asyncio
from lib_frontcam import *
from lib_fishcam import *
from lib_lidar import *
from lib_eye import *
from lib_parking import *
from turtlebot import TURTLE
from constants import IS_DEBUG_MODE, SELECTED_STATE
# NOTE: to check time:
import timeit
import math
# * Variables
################ Variables by minsoo #######################
CURRENT_STATE = 'inter_curving'
#CURRENT_STATE = 'intersection'
MOVING_POSITION = False
SEEN_PARKING_SIGN = False
SEEN_TUNNEL_SIGN = False
IS_IN_TUNNEL = False
#
# HAS_OBJECT_IN_50 = False
# HAS_OBJECT_IN_20 = False
straight_cnt = 0
curving_cnt = 0
LIDAR_FLAG = False
############################################################
GALAPAGOS_STATE = "view"
'''
if SELECTED_STATE == '':
CURRENT_STATE = 'traffic_light'
else:
CURRENT_STATE = SELECTED_STATE
'''
LINE_BASE = 'both'
MOVING_POSITION = False # ! Deprecated
DIRECTION = 'left'
SEEN_LEFT_SIGN = False
SEEN_PARKING_SIGN = False # ! Deprecated
SEEN_TUNNEL_SIGN = False # ! Deprecated
IS_IN_TUNNEL = False
SEEN_STOPPING_SIGN = False
SIGN_CORNER = None
# DISTANCE_FRONT = 0.00 # ! Deprecated
# HAS_OBJECT_IN_50 = False
# HAS_OBJECT_IN_20 = False
HAS_BOTH_LINES = False
IS_TURNING = False
TRACKING = "fish"
TURNING_TO = False
TEST_ANGULAR = 0
TEST_ONCE = True
# STATE_CONSTRUCTION = "start"
STATE_CONSTRUCTION = "searching"
STATE_TUNNEL = "inside"
TURN_COUNT = 0
# * Methods
def initialize():
EYE.calibrate()
TURTLE.set_speed('normal')
def reset_front_image_flags():
global LINE_BASE
global MOVING_POSITION
global SEEN_PARKING_SIGN
global SEEN_TUNNEL_SIGN
global SEEN_STOPPING_SIGN
LINE_BASE = 'both'
MOVING_POSITION = False
SEEN_PARKING_SIGN = False
SEEN_TUNNEL_SIGN = False
SEEN_STOPPING_SIGN = False
def reverse_direction():
global DIRECTION
if DIRECTION == 'right':
DIRECTION = 'left'
else:
DIRECTION = 'right'
rospy.loginfo('\n[PROC] direction changed to ' + DIRECTION)
def track_front(event=None):
global TRACKING
EYE.reset_state()
TRACKING = "front"
# rospy.loginfo("\n[PROC] tracking changed to " + TRACKING)
return
def track_fish(event=None):
global TRACKING
TRACKING = "fish"
# rospy.loginfo("\n[PROC] tracking changed to " + TRACKING)
return
def process_frontcam(image):
""" process the image of raspicam """
global CURRENT_STATE
global MOVING_POSITION
global SEEN_PARKING_SIGN
global SEEN_TUNNEL_SIGN
global LIDAR_FLAG
raw_data = np.fromstring(image.data, np.uint8)
cv_img = cv2.imdecode(raw_data, cv2.IMREAD_COLOR)
#### ROI SETTING ######
blob_ROI = cv_img[100:, :]
#######################
if CURRENT_STATE == 'traffic_light':
if is_light_green(cv_img):
TURTLE.enable()
#TURTLE.set_speed('fast')
print("detected green")
CURRENT_STATE = 'intersection'
#TURTLE.set_weight(0.8)
return
else:
print("no green")
TURTLE.enable()
#TURTLE.set_speed('fast')
print("detected green")
CURRENT_STATE = 'intersection'
#TURTLE.set_weight(0.8)
return
if CURRENT_STATE == 'intersection':
cv2.imshow("blob_ROI",blob_ROI)
# cv2.waitKey(1)
print("intersection state")
if is_intersection(cv_img):
TURTLE.set_weight(0.8)
CURRENT_STATE = 'left_or_right'
print("intersection detected!!")
return
else:
return
if CURRENT_STATE == 'left_or_right':
print("left or right state")
cv2.imshow("blob_ROI",blob_ROI)
# cv2.waitKey(1)
tmp_state = check_left_right_sign(blob_ROI)
print("tmp state: ",tmp_state)
if tmp_state == 'right':
#print("11tmp state: ",tmp_state, ", right cnt: ",inter_right_cnt)
TURTLE.LINE_BASE = 2
#print("11tmp state: ",tmp_state, ", right cnt: ",inter_right_cnt)
CURRENT_STATE = 'inter_curving'
TURTLE.set_weight(1.0)
elif tmp_state == 'left':
#print("11tmp state: ",tmp_state, ", left cnt: ",inter_left_cnt)
TURTLE.LINE_BASE = 1
#print("22tmp state: ",tmp_state, ", left cnt: ",inter_left_cnt)
CURRENT_STATE = 'inter_curving'
TURTLE.set_weight(1.0)
elif tmp_state == 'none':
return
if CURRENT_STATE == 'inter_curving':
print("#################################################")
print("########### inter_curving state #################")
print("#################################################")
global straight_cnt
if abs(TURTLE.weight*TURTLE._angular) < 0.1:
straight_cnt += 1
print("straight counting : ",straight_cnt," is counted")
if straight_cnt > 5:
straight_cnt = 0
TURTLE.LINE_BASE = 2
CURRENT_STATE = 'construction'
return
else:
return
else:
straight_cnt = 0
return
if CURRENT_STATE == 'construct_recog':
tmp_state = is_construction(blob_ROI)
print(tmp_state)
if tmp_state is True:
TURTLE.LINE_BASE = 2
CURRENT_STATE = 'construction'
#LIDAR_FLAG = True
else:
return
if CURRENT_STATE == 'construction':
return
# if CURRENT_STATE == 'construction':
# '''
# task for Ji-hyung
# '''
# TURTLE.LINE_BASE = 1
# CURRENT_STATE = 'parking'
# pass
'''
if CURRENT_STATE == 'stop_sign':
if stop_sign_flag == False:
sign_stop = is_stopping_sign(image)
if sign_stop == True:
stop_sign_flag = True
return
else:
return
else:
sign_stop = is_stopping_sign(image)
if sign_stop == False:
stop_false_cnt = stop_false_cnt + 1
if stop_false_cnt > 6 :
CURRENT_STATE = 'construction'
return
else:
return
else:
stop_false_cnt = 0
return
# if CURRENT_STATE == 'construction':
# TURTLE.set_speed("slow")
# if HAS_OBJECT_IN_20:
TURTLE.turn(MOVING_POSITION, 15, 3)
if MOVING_POSITION == 'left':
MOVING_POSITION = 'right'
else:
MOVING_POSITION = 'left'
return
else:
if has_crossing_line(image):
if MOVING_POSITION == 'left':
moving_to = 'right'
else:
moving_to = 'left'
TURTLE.turn(moving_to, 15, 3)
else:
# TODO: combine trace_line() + trace_blocking()
if get_num_of_lines(image) == 2:
CURRENT_STATE = 'normal'
else:
return
if CURRENT_STATE == 'parking':
# TODO: finish code of parking state
TURTLE.set_speed('normal')
return
if CURRENT_STATE == 'blocking_bar':
# TODO: finish code of blocking_bar state
TURTLE.set_speed('normal')
return
if CURRENT_STATE == 'tunnel':
# TODO: finish code of tunnel state
TURTLE.set_speed('normal')
return
# ending the normal state:
if CURRENT_STATE == 'normal':
reset_front_image_flags()
TURTLE.set_speed('fast')
return
'''
'''
def process_frontcam(image):
""" process the image of raspicam """
global CURRENT_STATE
global MOVING_POSITION
global SEEN_PARKING_SIGN
global SEEN_TUNNEL_SIGN
global LINE_BASE
global SEEN_STOPPING_SIGN
if CURRENT_STATE == 'traffic_light':
if is_light_green(image):
if IS_DEBUG_MODE == True:
TURTLE.set_speed('fast')
rospy.loginfo('\n[PROC] Current state: normal')
CURRENT_STATE = 'normal'
else:
rospy.logdebug('Debug mode finished')
raise rospy.ROSInterruptException
else:
return
if CURRENT_STATE == 'normal':
LINE_BASE = 'both'
sign = check_sign(image)
if sign == 'intersection':
TURTLE.set_speed('normal')
rospy.loginfo('\n[PROC] Current state: intersection')
CURRENT_STATE = 'intersection'
elif sign == 'construction':
TURTLE.set_speed('slow')
rospy.loginfo('\n[PROC] Current state: construction')
DIRECTION = 'right'
LINE_BASE = 'left'
CURRENT_STATE = 'construction'
elif sign == 'parking':
# SEEN_PARKING_SIGN = True # ! Deprecated
TURTLE.set_speed('normal')
rospy.loginfo('\n[PROC] Current state: parking')
CURRENT_STATE = 'parking'
# return
elif HAS_OBJECT_IN_50:
TURTLE.set_speed('slow')
rospy.loginfo('\n[PROC] Current state: blocking_bar')
CURRENT_STATE = 'blocking_bar'
elif sign == 'tunnel':
# SEEN_TUNNEL_SIGN = True # ! Deprecated
TURTLE.set_speed('normal')
rospy.loginfo('\n[PROC] Current state: tunnel')
TURTLE.turn_by_degree(45, 0) # TODO: support integer deg value
# return
elif is_straight_in(10, image):
if is_straight_in(50, image):
TURTLE.increase_speed()
else:
TURTLE.decrease_speed()
return
else:
TURTLE.set_speed('normal')
return
if CURRENT_STATE == 'intersection':
sign_corner = check_sign(image)
if sign_corner == None:
if SEEN_STOPPING_SIGN:
TURTLE
if sign_corner == None:
LINE_BASE = 'both'
else:
if sign_corner == 'left':
LINE_BASE = 'left'
elif sign_corner == 'right':
LINE_BASE = 'right'
if is_stopping_sign(image):
SEEN_STOPPING_SIGN = True
else:
if SEEN_STOPPING_SIGN:
CURRENT_STATE = 'normal'
else:
return
# if CURRENT_STATE == 'construction':
# TURTLE.set_speed('slow')
# if HAS_OBJECT_IN_20:
TURTLE.turn(MOVING_POSITION, 15, 3)
if MOVING_POSITION == 'left':
MOVING_POSITION = 'right'
else:
MOVING_POSITION = 'left'
return
else:
if has_crossing_line(image):
if MOVING_POSITION == 'left':
moving_to = 'right'
else:
moving_to = 'left'
TURTLE.turn(moving_to, 15, 3)
else:
# TODO: combine trace_line() + trace_blocking()
if get_num_of_lines(image) == 2:
CURRENT_STATE = 'normal'
else:
return
if CURRENT_STATE == 'parking':
# TODO: finish code of parking state
TURTLE.set_speed('normal')
return
if CURRENT_STATE == 'blocking_bar':
# TODO: finish code of blocking_bar state
TURTLE.set_speed('normal')
return
if CURRENT_STATE == 'tunnel':
# TODO: finish code of tunnel state
TURTLE.set_speed('normal')
return
# ending the normal state:
if CURRENT_STATE == 'normal':
if IS_DEBUG_MODE == True:
rospy.logdebug('Debug mode finished')
raise rospy.ROSInterruptException
else:
rospy.loginfo('\n[PROC] Current state: normal')
reset_front_image_flags()
TURTLE.set_speed('fast')
return
'''
def idle(event=None):
""" idle process """
global TRACKING
rospy.loginfo("\n[PROC] idle executed")
TRACKING = "idle"
# TURTLE.disable()
rospy.spin()
# while True:
# rospy.loginfo('tracking:' + TRACKING)
return
def process_fishcam(image):
""" trace side lines by base
if base is 'left: trace left line
if base is 'both': trace both lines
if base is 'right': trace right lne
return value is not needed.
"""
if TURTLE._enable_running is False:
return
if TURTLE.enable_fish is False:
return
if TURTLE.LINE_BASE == 3:
trace_line(image)
elif TURTLE.LINE_BASE == 1 or TURTLE.LINE_BASE == 2:
# rospy.loginfo("[LINE] trace_one_line(" + str(LINE_BASE) + ")")
trace_one_line(image, TURTLE.LINE_BASE)
# use TURTLE.set_
# use TURTLE.set_angular(angular)
return
'''
def process_fishcam(image):
""" process the fisheye lens image """
start = timeit.default_timer()
global LINE_BASE
global TRACKING
global TURNING_TO
global TEST_ANGULAR
global TEST_ONCE
if TRACKING is not "fish":
return
if not EYE.is_fish_occupied():
# if True:
info = EYE.see_bottom(image)
# rospy.Timer(rospy.Duration(0.04), EYE.release_fish_occupied, oneshot=True, reset=True)
if info is None:
print("NO INFO!")
# TURTLE.set_angular_smooth(0.12)
# pass
else:
# rospy.loginfo("\n[PROC] info: " + str(info))
# TURTLE.set_speed('slow')
# if info["slope"]:
# TURTLE.set_speed_by_percentage(-abs(info["slope"] / 6))
# else:
TURTLE.set_speed('normal')
if info["right"] < 640:
rospy.loginfo("\n[PROC] info: " + str(info))
TURTLE.set_angular(0.75 + abs(info["slope"]) * 1.8)
TURTLE.set_angular_smooth(-0.1)
else:
if TEST_ANGULAR is not 1:
if info["left"] is 0:
if TEST_ONCE:
TURTLE.set_angular(0.12)
TURTLE.set_angular_smooth(0.05)
else:
TURTLE.set_angular(0.12)
TURTLE.set_angular_smooth(0.05)
TEST_ONCE = False
# elif info["left"] < 7 and info["left"] > 0:
# TURTLE.set_angular_smooth(0.1)
TEST_ANGULAR = 1
if info["left"] > 0 and info["left"] <= 10:
TURTLE.set_angular(0)
TEST_ANGULAR = 0
# if TEST_ANGULAR is not -1:
if info["left"] > 10:
TURTLE.set_angular(-0.75 + -abs(info["slope"]) * 1.8)
TURTLE.set_angular_smooth(-0.1)
TEST_ANGULAR = -1
rospy.Timer(rospy.Duration(0.05), EYE.release_fish_occupied, oneshot=True, reset=True)
# EYE.release_fish_occupied()
end = timeit.default_timer()
print("l: {:d}".format(info["left"]) + " s: {:.01f}".format(info["slope"])
+ " time: {:.02f}".format(end - start))
# print(end - start)
# print("turning to: " + str(TURNING_TO))
# if TURNING_TO:
# TURTLE.turn(TURNING_TO, 2.3)
# TURNING_TO = None
# rospy.Timer(rospy.Duration(2.3), track_front, oneshot=True, reset=True)
# else:
# track_front()
# print("awaiting finished")
# idle()
# track_front()
# rospy.Timer(rospy.Duration(5), idle)
# rospy.loginfo('\n[PROC] fish image received')
# trace_line_by_base(image, 2)
# trace_line(image)
# TURTLE.set_angular(trace_line_by_base(LINE_BASE))
# trace_line(image) # ! Deprecated
# print(TURTLE.get_info())
# TURTLE.move()
return
'''
def process_subcam(image):
""" process the subcam image """
start = timeit.default_timer()
global LINE_BASE
global TRACKING
global TURNING_TO
global TEST_ANGULAR
global TEST_ONCE
if TRACKING is not "fish":
return
if not EYE.is_sub_occupied():
# if True:
rospy.Timer(rospy.Duration(0.04), EYE.release_sub_occupied, oneshot=True)
info = EYE.see_sub(image)
# rospy.Timer(rospy.Duration(0.04), EYE.release_sub_occupied, oneshot=True, reset=True)
if info is None:
print("NO INFO!")
# TURTLE.set_angular_smooth(0.12)
# pass
else:
# TURTLE.set_speed('slow')
# if info["slope"]:
# TURTLE.set_speed_by_percentage(-abs(info["slope"] / 6))
# else:
TURTLE.set_speed('normal')
center_x, center_y, point_x, point_y = (value for value in info["line_center"])
gap_x = abs(point_x - center_x)
rospy.loginfo("\n[PROC] info: \n" + str(info))
rospy.loginfo("\nGAP: " + str(gap_x))
if info["slope"] > 0:
slope = math.sqrt(0.5 * info["slope"])
else:
slope = - math.sqrt(-0.5 * info["slope"])
if not info["has_line"]:
if gap_x > 100:
if TEST_ONCE:
TURTLE.set_angular(-1.75)
TURTLE.set_angular_smooth(-0.1)
else:
TURTLE.set_angular_smooth(-0.1)
elif gap_x < 0:
if TEST_ONCE:
TURTLE.set_angular(1.75)
TURTLE.set_angular_smooth(0.2)
else:
TURTLE.set_angular_smooth(0.2)
TEST_ONCE = False
# elif gap_x > 100:
# TURTLE.set_angular(-0.15 * info["slope"] * 4)
# TURTLE.set_angular_smooth(-0.1)
else:
TEST_ONCE = True
if gap_x > 100:
TURTLE.set_angular(-0.15 + slope)
TURTLE.set_angular_smooth(-0.1)
elif gap_x > 30:
TURTLE.set_angular(0)
else:
# if slope < 0:
# TURTLE.set_angular(-0.25 + slope * 1.5)
# TURTLE.set_angular_smooth(-0.1)
# else:
rospy.loginfo("LOST")
TURTLE.set_angular(0.45 + slope)
TURTLE.set_angular_smooth(0.1)
# EYE.release_sub_occupied()
end = timeit.default_timer()
# print("sub l: {:d}".format(info["left"]) + " s: {:.01f}".format(slope)
# + " time: {:.02f}".format(end - start))
print("\nTIME: {:.02f}".format(end - start))
def process_tunnel(lidar_data):
global LIDAR_FLAG
if LIDAR_FLAG is False:
return
global DIRECTION
TURTLE.enable_fish = False
set_lidar_values(lidar_data)
TURTLE.set_speed_smooth("fast")
directions = ["front", "leftside", "rightside"]
min_distance = 1000
for direction in directions:
distance = get_object_distance(direction)
if distance > 0 and distance < min_distance:
min_distance = distance
# distance = min(get_object_distance("front"), get_object_distance("leftside"), get_object_distance("rightside"))
# distance = get_object_distance("front")
rospy.loginfo(str(min_distance) + str(distance))
if min_distance < 0.50 and min_distance > 0:
TURTLE.turn(DIRECTION, 2.03, True, True)
if DIRECTION == "left":
DIRECTION = "right"
elif DIRECTION == "right":
DIRECTION = "left"
def process_tunnel2(lidar_data):
global LIDAR_FLAG
if LIDAR_FLAG is False:
return
global DIRECTION
global IS_TURNING
TURTLE.enable_fish = False
set_lidar_values(lidar_data)
TURTLE.set_speed_smooth("fast")
directions = ["front", "leftside", "rightside"]
distances = {}
for direction in directions:
distances[direction] = get_object_distance(direction)
if not IS_TURNING:
print(distances["leftside"], distances["rightside"])
if distances["leftside"] < 0.45 and distances["leftside"] > 0:
TURTLE.turn("right", 1.5)
DIRECTION = "left"
IS_TURNING = True
elif distances["rightside"] < 0.45 and distances ["rightside"] > 0:
TURTLE.turn("left", 1.5)
DIRECTION = "right"
IS_TURNING = True
else:
print(get_object_distance("front"))
if get_object_distance("front") == 0:
rospy.signal_shutdown("[TURTLE] shutting down...")
else:
TURTLE.turn(DIRECTION, 1.5)
# TURTLE.turn(DIRECTION, 1.5)
IS_TURNING = False
def process_eye(image):
""" process the Eye System """
global TRACKING
global TURNING_TO
# if TRACKING is not "front":
# return
# rospy.loginfo('\n[PROC] frontcam image received')
# rospy.loginfo(EYE.is_occupied())
if not EYE.is_front_occupied():
info = EYE.see(image)
# rospy.loginfo("\n[PROC] info: " + str(info))
rospy.Timer(rospy.Duration(0.1), EYE.release_occupied, oneshot=True)
if TRACKING is not "front":
return
if info is None:
pass
else:
center = info["center"]
TURTLE.set_speed('normal')
if info["state"] is "straight":
# TURTLE.set_speed('normal')
pass
elif info["state"] is "lost_track":
rospy.loginfo("\n[PROC] state: lost_track")
TURNING_TO = info["turning_to"]
TURTLE.go_forward(1)
rospy.Timer(rospy.Duration(1), track_fish, oneshot=True, reset=True)
# elif info["state"] is "turning":
# TURNING_TO = info["turning_to"]
# track_fish()
# rospy.Timer(rospy.Duration(5.5), track_fish, oneshot=True, reset=True)
if center > 500:
TURTLE.set_angular(0.27)
rospy.loginfo("\n[PROC] [!] turning to right at " + str(center))
return
if center < -500:
TURTLE.set_angular(-0.27)
rospy.loginfo("\n[PROC] [!] turning to left at " + str(center))
return
if center > 60:
TURTLE.set_angular(0.12)
# TURTLE.set_angular(0.08)
rospy.loginfo("\n[PROC] turning to right at " + str(center))
return
elif center < -60:
TURTLE.set_angular(-0.12)
# TURTLE.set_angular(-0.08)
rospy.loginfo("\n[PROC] turning to left at " + str(center))
return
TURTLE.set_angular_smooth(0)
return
def process_blocking(lidar_data):
global DISTANCE_FRONT
set_lidar_values(lidar_data)
distance_1 = get_object_distance('front')
distance_2 = get_object_distance('leftside')
distance_3 = get_object_distance('rightside')
rospy.loginfo("\n[PROC] front: " + str(distance_1) + " " + str(distance_2) + " " + str(distance_3))
def process_lidar(lidar_data):
global CURRENT_STATE
if CURRENT_STATE is not "construction":
return
""" process the lidar data """
# global IS_IN_TUNNEL
# global DISTANCE_FRONT
# global HAS_OBJECT_IN_20
global IS_TURNING
# global SEEN_PARKING_SIGN
# global SEEN_LEFT_SIGN
global STATE_CONSTRUCTION
global DIRECTION
global TURN_COUNT
if TURN_COUNT == 3:
CURRENT_STATE = "parking"
rospy.signal_shutdown("\n[PROC] Shutting down...")
set_lidar_values(lidar_data)
if not TURTLE.is_settable():
return
rospy.loginfo(str(STATE_CONSTRUCTION) + " " + str(get_object_distance('left')) + " " + str(get_object_distance('rightside')))
if STATE_CONSTRUCTION == "searching":
distance = get_object_distance("left")
if distance == 0:
return
elif distance < 0.35:
STATE_CONSTRUCTION = "ready"
elif distance > 0.5 and distance < 0.9:
STATE_CONSTRUCTION = "start"
DIRECTION = "right"
TURTLE.enable_fish = False
TURTLE.turn("left", 2.03, True)
return
elif STATE_CONSTRUCTION == "ready":
distance = get_object_distance("front")
if distance == 0:
return
elif distance < 1:
STATE_CONSTRUCTION = "fitting"
DIRECTION = "left"
TURTLE.enable_fish = False
# TURTLE.set_angular(0)
TURTLE.turn("right", 0.5, True)
return
elif STATE_CONSTRUCTION == "fitting":
TURTLE.turn("right", 0.5, True)
STATE_CONSTRUCTION = "start"
elif STATE_CONSTRUCTION == "start":
if not IS_TURNING:
if DIRECTION == 'right':
distance = get_object_distance('leftside')
else:
distance = get_object_distance('rightside')
rospy.loginfo('\n[PROC] distance on ' + DIRECTION + ' : ' + str(distance))
if distance == 0:
return
elif distance < 0.35:
TURTLE.turn(DIRECTION, 1.2)
IS_TURNING = True
else:
TURTLE.set_speed('normal')
else:
if DIRECTION == 'right':
distance = get_object_distance('frontleft')
else:
distance = get_object_distance('frontright')
# rospy.loginfo('\n[PROC] turning distance on ' +
# DIRECTION + ' : ' + str(distance))
if distance > 0.34 or distance == 0:
reverse_direction()
TURTLE.turn(DIRECTION, 1.2)
TURN_COUNT = TURN_COUNT + 1
IS_TURNING = False
# """ parking control using lidar_data """
# if SEEN_PARKING_SIGN:
# parking_control(lidar_data)
# #TODO : one line tracer start
# if SEEN_LEFT_SIGN:
# SEEN_PARKING_SIGN = False
# rospy.sleep(rospy.Duration(10))
# SEEN_LEFT_SIGN = False
# #TODO : two line tracer start
def set_package_state(msg):
""" callback function to stop viewers """
global GALAPAGOS_STATE
rospy.loginfo("msg received: ", msg.data)
GALAPAGOS_STATE = msg.data
def view_frontcam(image):
""" view the Eye System """
if GALAPAGOS_STATE is not "view":
return
# rospy.loginfo("\n[VIEWER] viewing frontcam...")
if not EYE.is_front_occupied():
EYE.see(image)
# rospy.loginfo("\n[PROC] info: " + str(info))
rospy.Timer(rospy.Duration(0.1), EYE.release_occupied, oneshot=True)
def view_fishcam(image):
""" view the fisheye lens image """
if GALAPAGOS_STATE is not "view":
return
# rospy.loginfo("\n[VIEWER] viewing fishcam...")
if not EYE.is_fish_occupied():
info = EYE.see_bottom(image)
rospy.Timer(rospy.Duration(0.1), EYE.release_fish_occupied, oneshot=True, reset=True)
def view_subcam(image):
""" view the fisheye lens image """
if GALAPAGOS_STATE is not "view":
return
# rospy.loginfo("\n[VIEWER] viewing fishcam...")
if not EYE.is_front_occupied():
info = EYE.see_sub(image)
rospy.loginfo("\n[VIEW] info: \n" + str(info))
rospy.Timer(rospy.Duration(0.1), EYE.release_sub_occupied, oneshot=True, reset=True)
#! Deprecated
# def test_line_tracing(image):
# trace_one_line(image, 2)
# #trace_line(image)
# # gogo()
#! Deprecated
# def test_sudden_stop(image):
# global DISTANCE_FRONT
# if RUN_MODE == 'debug':
# rospy.logdebug('fish_image received')
# process_fishcam(image)
# if DISTANCE_FRONT == 0:
# TURTLE.enable()
# rospy.loginfo('\n[PROC] turtlebot enabled.')
# elif DISTANCE_FRONT < 0.20:
# TURTLE.stop()
# rospy.loginfo('\n[PROC] turtlebot stopped.')
# return
# elif DISTANCE_FRONT < 0.50:
# # TODO: decrease speed
# pass
# # trace_line(image)
# TURTLE.set_speed_by_percentage(1)
# TURTLE.move()
# * Initialization
reset_front_image_flags()
initialize()
```
#### File: galapagos/scripts/run_recognition.py
```python
import rospy
from sensor_msgs.msg import CompressedImage
from constants import *
from lib_signal_recognition import *
from processor import *
def recognition(image):
cv2.imshow("current",image)
if is_intersection(image):
print("construction detected!!")
else:
print("nothing detected!!")
rospy.Subscriber(PATH_USBCAM, CompressedImage, recognition, queue_size=1)
```
#### File: galapagos/scripts/turtlebot.py
```python
import sys
import rospy
from geometry_msgs.msg import Twist
# from std_msgs.msg import Int8
from constants import MAX_SPEED, TURNING_SPEED, SPEED_VALUES, RUN_MODE
NAME = 'runner'
class Turtle(dict):
""" an object how to run the TurtleBot3 """
def __init__(self):
""" __init__
_angular: the value to set position
"""
self._enable_running = True
self._enable_setter = True
self._angular = 0
self._angular_smooth = 0
self._speed = 0
self._speed_smooth = 0
self._publisher_velocity = rospy.Publisher(
'/cmd_vel', Twist, queue_size=5)
# self._publisher_stage = rospy.Publisher('/stage', Int8, queue_size=5)
# rospy.spin()
self.stop()
rospy.on_shutdown(self.stop)
self.exitflag = False
######################### Modified by minsoo ##########################
self.LINE_BASE = 3 ## 1 : Left, 2: Right, 3: Both
self.weight = 0.8
self.enable_fish = True
########################################################################
dict.__init__(self)
def stop(self):
""" stop the running bot """
self._angular = 0.
self._angular_smooth = 0.
self._speed = 0.
twist = Twist()
twist.linear.x = 0.
twist.linear.y = 0.
twist.linear.z = 0.
twist.angular.x = 0.
twist.angular.y = 0.
twist.angular.z = 0.
self._publisher_velocity.publish(twist)
def move(self):
""" move the bot """
if not self._enable_running:
return
diff_angular = abs(self._angular - self._angular_smooth)
if diff_angular > 0.1:
if self._angular < self._angular_smooth:
self._angular += 0.1
elif self._angular > self._angular_smooth:
self._angular -= 0.1
elif diff_angular is not 0:
self._angular = self._angular_smooth
diff_speed = abs(self._speed - self._speed_smooth)
if diff_speed > 0.05:
if self._speed < self._speed_smooth:
self._speed += 0.05
elif self._speed > self._speed_smooth:
self._speed -= 0.05
elif diff_speed is not 0:
self._speed = self._speed_smooth
# print('move speed: ' + str(self._speed) +
# ' angular: ' + str(self._angular))
twist = Twist()
twist.linear.x = self.weight*self._speed
twist.linear.y = 0
twist.linear.z = 0
twist.angular.x = 0
twist.angular.y = 0
twist.angular.z = self.weight*self._angular
self._publisher_velocity.publish(twist)
def stop_after(self, event):
""" callback function to stop """
self._enable_setter = True
self.set_angular(0)
self.set_speed("stop")
return
def go_forward(self, duration=1):
""" force to go straight for a while """
if not self._enable_setter:
return
rospy.loginfo("\n[TURTLE] going forward")
self.set_speed("normal")
self.set_angular(0)
self._enable_setter = False
# rospy.loginfo("\n[TURTLE] sleeping")
# self.set_angular(0)
# self.set_speed("stop")
# rospy.loginfo("\n[TURTLE] slept")
self._enable_setter = True
rospy.Timer(rospy.Duration(duration), self.go_forward_after, oneshot=True, reset=True)
return
def go_forward_after(self, event):
""" callback function after going forward """
self._enable_setter = True
self.set_angular(0)
self.set_speed("stop")
# rospy.signal_shutdown("[TURTLE] shutting down...")
return
def turn(self, direction, duration, with_stop=False, starting_speed=None):
""" turn to direction with radius by duration """
if not self._enable_setter:
return
if direction == 'right':
degree = -1.35
# degree = -0.38 # when TURNING_SPEED = 0.03
else:
degree = 1.35
# degree = 0.38 # when TURNING_SPEED = 0.03
if starting_speed is not None:
self._speed = self._speed_smooth = 0.12
degree = degree * 2
elif with_stop:
self._speed = self._speed_smooth = 0
else:
self._speed = self._speed_smooth = TURNING_SPEED
self.set_angular_smooth(degree)
self._enable_setter = False
rospy.Timer(rospy.Duration(duration), self.turn_after, oneshot=True, reset=True)
# rospy.Timer(rospy.Duration(4.5), self.turn_after)
return
def change_line(self, direction, duration):
""" turn to direction with radius by duration """
if not self._enable_setter:
return
self._enable_setter = False
if direction == 'right':
degree = -2.35
else:
degree = 2.35
# self._speed = self._speed_smooth = 0
print("sleep")
self._speed = self._speed_smooth = 0
self._angular = self._angular_smooth = degree
# rospy.sleep(2)
# print("slept")
# self._speed = self._speed_smooth = 0.22
# rospy.sleep(2)
# self._speed = self._speed_smooth = 0.1
# self._angular = -degree
# self._angular_smooth = 0
# rospy.sleep(2)
# self.set_angular_smooth(0.0)
self.direction = "hi"
rospy.Timer(rospy.Duration(0.3), self.change_line2, oneshot=True, reset=True)
rospy.Timer(rospy.Duration(1.0), self.change_line3, oneshot=True, reset=True)
rospy.Timer(rospy.Duration(1.3), self.turn_after, oneshot=True, reset=True)
# rospy.sleep(2)
# self.go_forward(duration)
# # rospy.sleep(duration)
# self.set_angular(-degree)
# self.set_angular_smooth(0.0)
# # rospy.sleep(2)
# self._enable_setter = True
# # rospy.Timer(rospy.Duration(duration), self.turn_after, oneshot=True, reset=True)
# # rospy.Timer(rospy.Duration(4.5), self.turn_after)
return
def change_line2(self, event=None):
self._speed = self._speed_smooth = 0.22
self._angular = self._angular_smooth = 0
def change_line3(self, event=None):
print("direction: ", str(self.direction))
self._speed = self._speed_smooth = 0
self._angular = self._angular_smooth = -2.35
# NOTE: USED in construction
def _turn(self, direction, radius):
""" turn to direction with radius by duration """
if not self._enable_setter:
return
rospy.loginfo("\n[TURTLE] turning...")
if direction == 'right':
degree = -1.7 / radius
# degree = -1.7
else:
degree = 1.7 / radius
# degree = 1.7
self._speed = self._speed_smooth = TURNING_SPEED
self.set_angular_smooth(degree)
self._enable_setter = False
rospy.Timer(rospy.Duration(1.1), self.turn_after)
def turn_after(self, event):
""" callback function after turning """
rospy.loginfo("\n[TURTLE] turning ended: " + str(event))
self._enable_setter = True
self.set_angular(0)
self.set_speed("stop")
# rospy.signal_shutdown("[TURTLE] shutting down...")
return
def turn_by_degree(self, degree, radius):
""" turn by degree with radius """
return
def increase_speed(self):
self._speed += 0.1
def decrease_speed(self):
self._speed -= 0.1
def disable(self):
self.stop()
self._enable_running = False
def enable(self):
self._enable_running = True
# * setting functions
def set_speed_by_percentage(self, percentage):
""" set the speed by percentage """
self._speed = MAX_SPEED * percentage
# self.move()
def set_weight(self, weight):
if not self._enable_setter:
return
self.weight = weight
def set_speed(self, speed_str):
""" set the speed by string """
if not self._enable_setter:
return
self._speed = self._speed_smooth = MAX_SPEED * SPEED_VALUES[speed_str]
def set_speed_smooth(self, speed_str):
""" set the speed_smooth value """
if not self._enable_setter:
return
self._speed_smooth = MAX_SPEED * SPEED_VALUES[speed_str]
def set_angular(self, angular):
""" set the angular value """
if not self._enable_setter:
return
self._angular = self._angular_smooth = angular
# self.move()
def set_angular_smooth(self, angular):
""" set the angular_smooth value """
if not self._enable_setter:
return
self._angular_smooth = angular
# * getting functions
def get_speed(self):
""" get the speed value """
return self._speed
def get_angular(self):
""" get the angular value """
return self._angular
def is_settable(self):
return self._enable_setter
def get_info(self):
""" get some information of turtlebot """
return {
'angluar': self._angular,
'linear': self._speed,
}
rospy.loginfo("\n[TURTLE] node initialized with NAME: ", NAME + '_' + sys.argv[1])
rospy.init_node(NAME + '_' + sys.argv[1], anonymous=True)
TURTLE = Turtle()
``` |
{
"source": "100loto/Mantis",
"score": 2
} |
#### File: 100loto/Mantis/conftest.py
```python
import pytest
from fixture.application import Application
from fixture.soap import SoapFixture
import json
import jsonpickle
import os.path
import importlib
fixture = None
target = None
def load_config(file):
global target
if target is None:
config_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), file)
with open(config_file) as f:
target = json.load(f)
return target
@pytest.fixture(scope="session", autouse=True)
def app(request):
global fixture
browser = request.config.getoption("--browser")
webadmin_config = load_config(request.config.getoption("--target"))['webadmin']
web_config = load_config(request.config.getoption("--target"))['web']
if fixture is None or not fixture.is_valid():
fixture = Application(browser=browser, base_url=web_config['baseUrl'])
fixture.session.ensure_login(username=webadmin_config['user'], password=webadmin_config['password'])
return fixture
@pytest.fixture(scope="session")
def soap(request):
soap_config = load_config(request.config.getoption("--target"))['soap']
soapfixture = SoapFixture(user=soap_config['user'],
password=soap_config['password'],
soapUrl=soap_config['soapUrl'])
return soapfixture
@pytest.fixture(scope="session", autouse=True)
def stop(request):
def fin():
fixture.session.ensure_logout()
fixture.destroy()
request.addfinalizer(fin)
return fixture
def pytest_addoption(parser):
parser.addoption("--browser", action="store", default="firefox")
parser.addoption("--target", action="store", default="target.json")
def pytest_generate_tests(metafunc):
for fixture in metafunc.fixturenames:
if fixture.startswith("data_"):
testdata = load_from_module(fixture[5:])
metafunc.parametrize(fixture, testdata, ids=[str(x) for x in testdata])
elif fixture.startswith("json_"):
testdata = load_from_json(fixture[5:])
metafunc.parametrize(fixture, testdata, ids=[str(x) for x in testdata])
def load_from_module(module):
return importlib.import_module("data.%s" % module).testdata
def load_from_json(file):
with open(os.path.join(os.path.dirname(os.path.abspath(__file__)), "data/%s.json" % file)) as f:
return jsonpickle.decode(f.read())
``` |
{
"source": "100rabmittal/python-Automations",
"score": 3
} |
#### File: python-Automations/File handling/file_organizer.py
```python
import os
from pathlib import Path #this helps in getting path of file
# here you can add more formats and categories in same way
subdir = {
"DOCUMENTS": ['.pdf','.rtf','.txt'],
"AUDIO": ['.m4a','.m4b','.mp3'],
"VIDEOS": ['.mov','.avi','.mp4'],
"IMAGES": ['.jpg','.jpeg','.png']
}
# called from organize_files function
def selectDirectory(value):
for cat, val in subdir.items():
if "png" in val:
return cat
return 'MISC'
def organize_files():
for item in os.scandir():
if item.is_dir():
continue
filePath = Path(item)
fileType = filePath.suffix.lower()
directory = selectDirectory(fileType)
dirPath = Path(directory)
if dirPath.is_dir() != True:
dirPath.mkdir()
filePath.rename(dirPath.joinpath(filePath))
# making function call
organize_files()
``` |
{
"source": "100Shapes/100shapes.github.com",
"score": 2
} |
#### File: 100Shapes/100shapes.github.com/fabfile.py
```python
from fabric.api import local, abort, env
from fabric.contrib import django
django.project('ohs_site')
from django.conf import settings
import os
BUILD_DIR = settings.BUILD_DIR
def setup():
local('cp ohs_site/offline/sample.env ohs_site/.env')
local('nano ohs_site/.env')
def build_site():
e = getattr(env, 'environment', None)
if e == 'production':
local("foreman run python manage.py build --skip-media --skip-static")
else:
local("python manage.py build")
def build_extras():
if not os.path.exists(BUILD_DIR):
os.makedirs(BUILD_DIR)
local('cp ohs_site/extras/* %s' % BUILD_DIR)
def build_blog():
pass
# blog = settings.STATICBLOG_COMPILE_DIRECTORY
# if not os.path.exists(blog):
# os.makedirs(blog)
# e = getattr(env, 'environment', None)
# if e == 'production':
# local("foreman run python manage.py update_blog --all")
# else:
# local("python manage.py update_blog")
def build():
build_site()
build_blog()
build_extras()
def deploy():
local('python manage.py collectstatic --noinput')
env.environment = 'production'
build()
local('foreman run python manage.py collectstatic --noinput')
local('ghp-import -p %s' % BUILD_DIR, capture=True)
env.environment = None
```
#### File: ohs_site/case_studies/views.py
```python
from bakery.views import BuildableTemplateView
from django.core.urlresolvers import reverse
class MooView(BuildableTemplateView):
template_name = "moo.html"
@property
def build_path(cls):
return '/'.join((reverse('moo')[1:], "index.html",))
``` |
{
"source": "100sms/yibai-python-sdk",
"score": 2
} |
#### File: yibai/api/Yibai.py
```python
import HttpUtils
class YibaiApiError(Exception):
def __init__(self, code, message):
super(YibaiApiError, self).__init__(message)
self.code = code
class YibaiClient(object):
def __init__(self, server_url, apikey):
self.serverUrl = server_url
self.apikey = apikey
def sms_batch_submit(self, submits):
return self.__execute({'submits': submits}, '/sms/batchSubmit')
def sms_pull_status_report(self):
return self.__execute({}, '/sms/pullStatusReport')
def sms_pull_reply_message(self):
return self.__execute({}, '/sms/pullReply')
def user_info(self):
return self.__execute({}, '/user/info')
def __execute(self, request, url_path):
request['apikey'] = self.apikey
req_url = self.serverUrl + url_path
res = HttpUtils.post_json(req_url, request)
if res['code'] == 200:
return res['response']
raise YibaiApiError(res['code'], res['message'])
``` |
{
"source": "100stacks/100stacks.github.io",
"score": 4
} |
#### File: blog/bits/list-vs-generator.py
```python
import sys
import random
import time
names = ['John', 'Corey', 'Adam', 'Steve', 'Rick', 'Thomas']
majors = ['Math', 'Engineering', 'CompSci', 'Arts', 'Business']
#print("Memory (Before) for names list: {} bytes".format(sys.getsizeof(names)))
#print("Memory (Before) for majors list: {} bytes".format(sys.getsizeof(majors)))
def people_list(num_people):
result = []
for i in range(num_people):
person = {
'id': i,
'name': random.choice(names),
'major': random.choice(majors)
}
result.append(person)
return result
def people_generator(num_people):
for i in range(num_people):
person = {
'id': i,
'name': random.choice(names),
'major': random.choice(majors)
}
yield person
# Using a List
t1 = time.clock()
people = people_list(1000000)
t2 = time.clock()
print('Memory (After) adding 1M list objects to an empty List: {} bytes'.format(sys.getsizeof(people)))
print('Took {} seconds'.format(t2-t1))
# Using a Generator (Reusing `people` list object)
t3 = time.clock()
people = people_generator(1000000)
t4 = time.clock()
print('\nMemory (After) adding 1M list objects to re-typed List object as Generator: {} bytes'.format(sys.getsizeof(people)))
print('Took {} seconds'.format(t4-t3))
# Using a Generator (using a new generator object)
t5 = time.clock()
people1 = people_generator(1000000)
t6 = time.clock()
print('\nMemory (After) adding 1M list objects to a NEW Generator: {} bytes'.format(sys.getsizeof(people1)))
print('Took {} seconds'.format(t6-t5))
'''
Output of above:
Memory (After) adding 1M list objects to an empty List: 8697464 bytes
Took 2.540992000000003 seconds
Memory (After) adding 1M list objects to re-typed List object as Generator: 88 bytes
Took 0.11375400000000013 seconds
Memory (After) adding 1M list objects to a NEW Generator: 88 bytes
Took 6.600000000389628e-05 seconds
'''
``` |
{
"source": "100starnight/uchicago-hvz",
"score": 2
} |
#### File: game/dorm_migrations/0005_populate_game_dorms.py
```python
from __future__ import unicode_literals
from django.db import migrations, models
def populate_game_dorms(apps, schema_editor):
Dorm = apps.get_model('game', 'Dorm')
Game = apps.get_model('game', 'Game')
dorms = Dorm.objects.all()
for g in Game.objects.all():
for d in dorms:
g.dorms.add(d)
class Migration(migrations.Migration):
dependencies = [
('game', '0004_auto_20160930_1940'),
]
operations = [
migrations.RunPython(populate_game_dorms)
]
```
#### File: uchicagohvz/game/middleware.py
```python
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.utils import timezone
from django.utils.safestring import mark_safe
from django.contrib import messages
from datetime import datetime
class Feb262015Middleware(object):
msg_content = mark_safe("""
<p>IMPORTANT: Gameplay will be suspended February 26th from 6am-11:59pm.
<a class="alert-link" href="/feb-26-2015/">Read the bulletin</a> for more details.</p>
""")
def process_request(self, request):
if request.path != '/feb-26-2015/':
start_dt = datetime(2015, 2, 26, 6, tzinfo=timezone.get_default_timezone())
end_dt = datetime(2015, 2, 26, 23, 59, 59, tzinfo=timezone.get_default_timezone())
if start_dt <= timezone.now() <= end_dt:
return HttpResponseRedirect('/feb-26-2015/')
elif timezone.now() < start_dt:
messages.error(request, self.msg_content)
return None
else:
return None
```
#### File: uchicagohvz/users/forms.py
```python
from django import forms
from django.contrib.auth.models import User
from snowpenguin.django.recaptcha2.fields import ReCaptchaField
from snowpenguin.django.recaptcha2.widgets import ReCaptchaWidget
from uchicagohvz.users.models import *
from uchicagohvz.game.models import Game, Player
class UserRegistrationForm(forms.ModelForm):
captcha = ReCaptchaField(widget=ReCaptchaWidget())
class Meta:
model = User
fields = ('username', 'first_name', 'last_name', 'email', 'password')
def __init__(self, *args, **kwargs):
super(UserRegistrationForm, self).__init__(*args, **kwargs)
self.fields['username'].help_text = "Enter a username."
self.fields['first_name'].help_text = "Enter your first name."
self.fields['last_name'].help_text = "Enter your last name."
self.fields['email'].help_text = "Enter your email address."
self.fields['password'].help_text = "Enter a password."
self.fields['password'].widget = forms.PasswordInput()
def clean(self):
data = super(UserRegistrationForm, self).clean()
username = data.get('username')
first_name = data.get('first_name')
last_name = data.get('last_name')
email = data.get('email')
password = data.get('password')
if not(username and first_name and last_name and email and password):
self.error_class(['Please fill out all of the fields.'])
try:
User.objects.get(username=username)
self.error_class(['Someone already has that username.'])
except User.DoesNotExist:
pass
return data
class ProfileForm(forms.ModelForm):
ZOMBIES_LISTHOST_HELP_INACTIVE = """
Note: We will only send at most one email per day via this listhost! Subscribing to this listhost is
necessary in order to know when the game ends, when safe zones change, and keep up with any official announcements.
If you unsubscribe, please also email <EMAIL> to let us know.
"""
ZOMBIES_LISTHOST_HELP_ACTIVE = """
Note: You cannot unsubscribe from the Zombies listhost while you are part of an active game. Don't worry,
we won't spam you! We will only send at most one email per day via this listhost! Subscribing to this listhost is
necessary in order to know when the game ends, when safe zones change, and keep up with any official announcements.
"""
class Meta:
model = Profile
exclude = ('user',)
def __init__(self, *args, **kwargs):
self.user = kwargs.pop('user')
super(ProfileForm, self).__init__(*args, **kwargs)
self.fields['phone_carrier'].widget.attrs['class'] = 'form-control' # for Bootstrap 3
if Player.objects.filter(user=self.user, game__in=Game.objects.games_in_progress(), active=True).count():
self.force_subscribe_zombies = True
self.fields['subscribe_zombies_listhost'].widget.attrs['disabled'] = 'disabled'
self.fields['subscribe_zombies_listhost'].help_text = self.ZOMBIES_LISTHOST_HELP_ACTIVE
else:
self.fields['subscribe_zombies_listhost'].help_text = self.ZOMBIES_LISTHOST_HELP_INACTIVE
self.force_subscribe_zombies = False
def clean(self):
data = super(ProfileForm, self).clean()
last_words = data.get('last_words')
phone_number = data.get('phone_number')
phone_carrier = data.get('phone_carrier')
sdn = data.get('subscribe_death_notifications')
if sdn and not phone_number:
data['subscribe_death_notifications'] = False
self._errors['phone_number'] = self.error_class(['You must provide your phone number in order to receive death notifications.'])
if phone_number and not phone_carrier:
self._errors['phone_carrier'] = self.error_class(['Carrier must be specified.'])
if phone_carrier and not phone_number:
self._errors['phone_number'] = self.error_class(['Phone number must be specified.'])
if not phone_number:
data['phone_carrier'] = ''
data['subscribe_death_notifications'] = False
if self.force_subscribe_zombies:
data['subscribe_zombies_listhost'] = True
return data
```
#### File: uchicagohvz/users/mailing_list.py
```python
from django.conf import settings
from django.template.loader import render_to_string
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import csrf_exempt
from .tasks import smtp_uchicago_send
from .models import Profile
from rest_framework.response import Response
from rest_framework.views import APIView
import email
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import hashlib
import hmac
def _verify(token, timestamp, signature):
return signature == hmac.new(
key=settings.MAILGUN_API_KEY,
msg='{}{}'.format(timestamp, token),
digestmod=hashlib.sha256).hexdigest()
class MailgunHookBase(APIView):
authentication_classes = []
listhost_unsubscribe_template = 'users/emails/how_to_unsubscribe.txt'
listhost_unsubscribe = '<https://www.uchicagohvz.org/users/account/>'
anonymize_from = False
def get_listhost_id(self):
return "%s <%s>" % (self.get_listhost_name(),
self.get_listhost_address().replace('@', '.'))
def get_listhost_name(self):
return getattr(self, 'listhost_name')
def get_listhost_address(self):
return getattr(self, 'listhost_address')
def get_to_addrs(self):
return getattr(self, 'to_addrs')
@method_decorator(csrf_exempt)
def post(self, request, *args, **kwargs):
FIELDS = (
'recipient', 'sender', 'from',
'subject', 'body-mime',
'timestamp', 'token', 'signature'
)
verified = _verify(request.data['token'], request.data['timestamp'], request.data['signature'])
if all([x in request.data for x in FIELDS]) and verified:
msg = email.message_from_string(request.data['body-mime'])
for x in ('From', 'Sender', 'To', 'Reply-To', 'Subject'):
del msg[x]
listhost_addr = self.get_listhost_address()
if self.anonymize_from:
msg['From'] = listhost_addr
if 'X-Envelope-From' in msg:
del msg['X-Envelope-From']
else:
msg['From'] = request.data['from']
msg['Sender'] = listhost_addr
msg['To'] = listhost_addr
msg['Reply-To'] = listhost_addr
subject_tag = "[%s]" % self.get_listhost_name()
if subject_tag not in request.data['subject']:
msg['Subject'] = subject_tag + ' ' + request.data['subject']
else:
msg['Subject'] = request.data['subject']
msg['X-Mailer'] = 'uchicago-hvz/' + request.revision
msg['List-Id'] = self.get_listhost_id()
msg['List-Post'] = "<mailto:%s>" % (listhost_addr)
msg['List-Unsubscribe'] = self.listhost_unsubscribe
include_unsub = True
for p in msg.walk():
if p.get_filename('') == 'how_to_unsubscribe.txt':
include_unsub = False
break
if include_unsub:
unsub_p = MIMEText(render_to_string(self.listhost_unsubscribe_template), 'plain')
unsub_p.add_header('Content-Disposition', 'inline', filename='how_to_unsubscribe.txt')
if msg.is_multipart():
if msg.get_content_type() == 'multipart/alternative':
msg_a = msg.get_payload()
msg.set_type('multipart/mixed')
msg_a_p = MIMEMultipart('alternative')
msg_a_p.set_payload(msg_a)
msg.set_payload([msg_a_p])
msg.attach(unsub_p)
elif msg.get_content_maintype() == 'text':
subtype = msg.get_content_subtype()
text_p = MIMEText(msg.get_payload(decode=True), subtype, msg.get_content_charset('us-ascii'))
msg.set_type('multipart/mixed')
msg.set_payload([text_p, unsub_p])
smtp_uchicago_send.delay(listhost_addr, self.get_to_addrs(), msg.as_string())
return Response()
else:
return Response(status=406)
class ChatterMailingList(MailgunHookBase):
listhost_name = 'HvZ-Chatter'
listhost_address = '<EMAIL>'
def get_to_addrs(self):
return tuple(Profile.objects.filter(user__is_active=True, subscribe_chatter_listhost=True).values_list('user__email', flat=True))
class TestMailingList(MailgunHookBase):
"""
Used to test the mailing list logic/DKIM/SPF/etc.
"""
listhost_name = 'HvZ-Test'
listhost_address = '<EMAIL>'
def get_to_addrs(self):
return settings.MAILING_LIST_TEST_RECIPIENTS
```
#### File: uchicagohvz/users/tasks.py
```python
from celery import task
from django.conf import settings
from django.core import mail
import smtplib
@task
def do_sympa_update(user, listname, subscribe):
if subscribe:
body = "QUIET ADD %s %s %s" % (listname, user.email, user.get_full_name())
else:
body = "QUIET DELETE %s %s" % (listname, user.email)
email = mail.EmailMessage(subject='', body=body, from_email=settings.SYMPA_FROM_EMAIL, to=[settings.SYMPA_TO_EMAIL])
email.send()
@task
def smtp_localhost_send(from_addr, to_addrs, msg):
# send using localhost SMTP
server = smtplib.SMTP('localhost')
server.sendmail(from_addr, to_addrs, msg)
server.quit()
@task
def smtp_uchicago_send(from_addr, to_addrs, msg):
# send using UChicago authenticated SMTP
server = smtplib.SMTP_SSL('authsmtp.uchicago.edu')
server.login(settings.SMTP_UCHICAGO_USER, settings.SMTP_UCHICAGO_PASSWORD)
server.sendmail(from_addr, to_addrs, msg)
server.quit()
```
#### File: uchicagohvz/users/views.py
```python
from django.shortcuts import *
from django.contrib.auth import views as auth_views
from django.contrib.auth.decorators import *
from django.contrib.auth.forms import PasswordResetForm
from django.utils.decorators import method_decorator
from django.contrib import messages
from django.shortcuts import render_to_response
from django.views.generic import DetailView, FormView, TemplateView, UpdateView
from uchicagohvz.game.models import Player
from uchicagohvz.users import forms
from uchicagohvz.users.models import Moderator, Profile
from uchicagohvz.game.templatetags.game_extras import pp_timedelta
from uchicagohvz.game.models import *
from uchicagohvz.webhooks import *
import datetime
# Create your views here.
def login(request):
return auth_views.login(request, "users/login.html")
def logout(request):
return auth_views.logout(request, "/")
class RegisterUser(FormView):
form_class = forms.UserRegistrationForm
template_name = "users/register.html"
def form_valid(self, form):
user = form.save(commit=False)
user.set_password(<PASSWORD>)
user.save()
return HttpResponseRedirect('/')
def get_context_data(self, **kwargs):
context = super(RegisterUser, self).get_context_data(**kwargs)
return context
class ContactPage(TemplateView):
template_name = "users/contact.html"
def get_context_data(self, **kwargs):
context = super(ContactPage, self).get_context_data(**kwargs)
context['moderators'] = Moderator.objects.all()
return context
class ResetPassword(FormView):
form_class = PasswordResetForm
template_name = "users/reset_password.html"
def form_valid(self, form):
form.save(request=self.request)
messages.success(self.request, "Password changed successfully.")
return HttpResponseRedirect('/')
# def get_context_data(self, **kwargs):
# context = super(ResetPassword, self).get_context_data(**kwargs)
# return context
class ShowProfile(DetailView):
model = Profile
template_name = "users/profile.html"
def get_context_data(self, **kwargs):
def add(x,y):
try:
return x+y
except TypeError:
if isinstance( x,datetime.timedelta):
return x
elif isinstance(y, datetime.timedelta):
return y
else:
return 0
def f(t):
return isinstance(t, datetime.timedelta)
context = super(ShowProfile, self).get_context_data(**kwargs)
if self.request.user.is_authenticated():
player_list = Player.objects.filter(user=self.request.user)
total_kills = 0
total_human_points = 0
total_zombie_points = 0
deaths = 0
lifespans = []
if player_list:
for player in player_list:
total_kills += len(player.kills)
total_zombie_points += player.zombie_points
total_human_points += player.human_points
try:
lifespans.append(player.lifespan)
except:
pass
if player.human == False:
deaths += 1
context['deaths'] = deaths
context['total_kills'] = total_kills
context['total_zombie_points'] = total_zombie_points
context['total_human_points'] = total_human_points
if len(lifespans) > 0:
sum = reduce(add, lifespans)
if sum:
context['average_lifespan'] = pp_timedelta(sum / len(lifespans))
context['longest_life'] = pp_timedelta(max(filter(f, lifespans)))
else:
context['average_lifespan'] = 0
context['longest_life'] = 0
else:
context['average_lifespan'] = 0
context['longest_life'] = 0
context['participation'] = len(player_list)
return context
class MyAccount(UpdateView):
form_class = forms.ProfileForm
template_name = "users/account.html"
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
return super(MyAccount, self).dispatch(request, *args, **kwargs)
def get_object(self, queryset=None):
return get_object_or_404(Profile, user=self.request.user)
def form_valid(self, form):
user = self.request.user
current_game = Game.objects.all()[0];
try:
current_player = current_game.players.get(user_id = user.id)
except Player.DoesNotExist:
current_player = None
tag = form.clean().get('discord_tag')
if(tag != "" and current_player):
webhook_send_command("!register_player %s %d" %(tag, current_player.human))
messages.success(self.request, "Account settings updated successfully.")
return super(MyAccount, self).form_valid(form)
def get_form_kwargs(self):
kwargs = super(MyAccount, self).get_form_kwargs()
kwargs['user'] = self.request.user
return kwargs
``` |
{
"source": "100ze/psaseicam",
"score": 3
} |
#### File: 100ze/psaseicam/main.py
```python
import os
from dotenv import load_dotenv
from discord.ext import commands
from datetime import datetime
#importando comandos
from comandos.s.s import comando_s
# pegando o token e o prefixo do bot
load_dotenv()
TOKEN = os.getenv('DISCORD_TOKEN')
PREFIX = os.getenv('PREFIX')
# definindo o prefixo do bot e armazenando o objeto 'bot'
bot = commands.Bot(command_prefix=PREFIX)
# mostrando hora no terminal quando o client se conectar
@bot.event
async def on_ready():
nome_bot = bot.user
hora = datetime.now().strftime('%d/%m/%Y %H:%M')
print('{0} se conectou com sucesso :) em: {1}'.format(nome_bot, hora))
# adicionando comandos
@bot.command(name='s')
async def c_s(ctx, *, pergunta):
await comando_s(ctx, pergunta)
bot.run(TOKEN)
``` |
{
"source": "1010098686/labelseg",
"score": 2
} |
#### File: labelseg/labelseg/mainwindow.py
```python
from PyQt5 import QtCore, QtGui, QtWidgets
from labelseg import imgs_rc
from labelseg import imgLabel
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(970, 654)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.gridLayout = QtWidgets.QGridLayout(self.centralwidget)
self.gridLayout.setObjectName("gridLayout")
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.scrollArea = QtWidgets.QScrollArea(self.centralwidget)
self.scrollArea.setWidgetResizable(True)
self.scrollArea.setObjectName("scrollArea")
self.scrollAreaWidgetContents = QtWidgets.QWidget()
self.scrollAreaWidgetContents.setGeometry(QtCore.QRect(0, 0, 785, 587))
self.scrollAreaWidgetContents.setObjectName("scrollAreaWidgetContents")
self.gridLayout_2 = QtWidgets.QGridLayout(self.scrollAreaWidgetContents)
self.gridLayout_2.setObjectName("gridLayout_2")
self.img_area = imgLabel.ImgLabel(self.scrollAreaWidgetContents)
self.img_area.setText("")
self.img_area.setAlignment(QtCore.Qt.AlignCenter)
self.img_area.setObjectName("img_area")
self.gridLayout_2.addWidget(self.img_area, 0, 0, 1, 1)
self.scrollArea.setWidget(self.scrollAreaWidgetContents)
self.horizontalLayout.addWidget(self.scrollArea)
self.verticalLayout = QtWidgets.QVBoxLayout()
self.verticalLayout.setObjectName("verticalLayout")
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setObjectName("label")
self.verticalLayout.addWidget(self.label)
self.label_list = QtWidgets.QListWidget(self.centralwidget)
self.label_list.setObjectName("label_list")
self.verticalLayout.addWidget(self.label_list)
self.label_2 = QtWidgets.QLabel(self.centralwidget)
self.label_2.setObjectName("label_2")
self.verticalLayout.addWidget(self.label_2)
self.rect_list = QtWidgets.QListWidget(self.centralwidget)
self.rect_list.setObjectName("rect_list")
self.verticalLayout.addWidget(self.rect_list)
self.label_3 = QtWidgets.QLabel(self.centralwidget)
self.label_3.setObjectName("label_3")
self.verticalLayout.addWidget(self.label_3)
self.file_list = QtWidgets.QListWidget(self.centralwidget)
self.file_list.setObjectName("file_list")
self.verticalLayout.addWidget(self.file_list)
self.horizontalLayout.addLayout(self.verticalLayout)
self.horizontalLayout.setStretch(0, 10)
self.horizontalLayout.setStretch(1, 2)
self.gridLayout.addLayout(self.horizontalLayout, 0, 0, 1, 1)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 970, 23))
self.menubar.setObjectName("menubar")
self.menuFile = QtWidgets.QMenu(self.menubar)
self.menuFile.setObjectName("menuFile")
self.menuView = QtWidgets.QMenu(self.menubar)
self.menuView.setObjectName("menuView")
self.menuEdit = QtWidgets.QMenu(self.menubar)
self.menuEdit.setObjectName("menuEdit")
self.menuCreate_Shape = QtWidgets.QMenu(self.menubar)
self.menuCreate_Shape.setObjectName("menuCreate_Shape")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.actionOpen_File = QtWidgets.QAction(MainWindow)
self.actionOpen_File.setCheckable(False)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(":/icons/icons/open.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionOpen_File.setIcon(icon)
self.actionOpen_File.setObjectName("actionOpen_File")
self.actionOpen_Dir = QtWidgets.QAction(MainWindow)
self.actionOpen_Dir.setIcon(icon)
self.actionOpen_Dir.setObjectName("actionOpen_Dir")
self.actionSave = QtWidgets.QAction(MainWindow)
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap(":/icons/icons/save.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionSave.setIcon(icon1)
self.actionSave.setObjectName("actionSave")
self.actionZoom_In = QtWidgets.QAction(MainWindow)
icon2 = QtGui.QIcon()
icon2.addPixmap(QtGui.QPixmap(":/icons/icons/zoom-in.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionZoom_In.setIcon(icon2)
self.actionZoom_In.setObjectName("actionZoom_In")
self.actionZoom_Out = QtWidgets.QAction(MainWindow)
icon3 = QtGui.QIcon()
icon3.addPixmap(QtGui.QPixmap(":/icons/icons/zoom-out.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionZoom_Out.setIcon(icon3)
self.actionZoom_Out.setObjectName("actionZoom_Out")
self.actionfill_color = QtWidgets.QAction(MainWindow)
icon4 = QtGui.QIcon()
icon4.addPixmap(QtGui.QPixmap(":/icons/icons/color.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionfill_color.setIcon(icon4)
self.actionfill_color.setObjectName("actionfill_color")
self.actionCreate_Polygon = QtWidgets.QAction(MainWindow)
self.actionCreate_Polygon.setCheckable(True)
icon5 = QtGui.QIcon()
icon5.addPixmap(QtGui.QPixmap("icons/draw-polygon.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionCreate_Polygon.setIcon(icon5)
self.actionCreate_Polygon.setObjectName("actionCreate_Polygon")
self.actionSet_Pixel_Range = QtWidgets.QAction(MainWindow)
icon6 = QtGui.QIcon()
icon6.addPixmap(QtGui.QPixmap(":/icons/icons/edit.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionSet_Pixel_Range.setIcon(icon6)
self.actionSet_Pixel_Range.setObjectName("actionSet_Pixel_Range")
self.actionUndo = QtWidgets.QAction(MainWindow)
icon7 = QtGui.QIcon()
icon7.addPixmap(QtGui.QPixmap(":/icons/icons/undo.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionUndo.setIcon(icon7)
self.actionUndo.setObjectName("actionUndo")
self.actionRectangle = QtWidgets.QAction(MainWindow)
self.actionRectangle.setCheckable(True)
icon8 = QtGui.QIcon()
icon8.addPixmap(QtGui.QPixmap(":/icons/icons/rectangle.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionRectangle.setIcon(icon8)
self.actionRectangle.setObjectName("actionRectangle")
self.actionEllipse = QtWidgets.QAction(MainWindow)
self.actionEllipse.setCheckable(True)
icon9 = QtGui.QIcon()
icon9.addPixmap(QtGui.QPixmap(":/icons/icons/ellipse.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionEllipse.setIcon(icon9)
self.actionEllipse.setObjectName("actionEllipse")
self.actionPolygon = QtWidgets.QAction(MainWindow)
self.actionPolygon.setCheckable(True)
icon10 = QtGui.QIcon()
icon10.addPixmap(QtGui.QPixmap(":/icons/icons/polygon.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionPolygon.setIcon(icon10)
self.actionPolygon.setObjectName("actionPolygon")
self.menuFile.addAction(self.actionOpen_File)
self.menuFile.addAction(self.actionOpen_Dir)
self.menuFile.addSeparator()
self.menuFile.addAction(self.actionSave)
self.menuView.addAction(self.actionZoom_In)
self.menuView.addAction(self.actionZoom_Out)
self.menuView.addSeparator()
self.menuView.addAction(self.actionfill_color)
self.menuEdit.addAction(self.actionSet_Pixel_Range)
self.menuEdit.addAction(self.actionUndo)
self.menuCreate_Shape.addAction(self.actionRectangle)
self.menuCreate_Shape.addAction(self.actionEllipse)
self.menuCreate_Shape.addAction(self.actionPolygon)
self.menubar.addAction(self.menuFile.menuAction())
self.menubar.addAction(self.menuView.menuAction())
self.menubar.addAction(self.menuEdit.menuAction())
self.menubar.addAction(self.menuCreate_Shape.menuAction())
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "labelseg"))
self.label.setText(_translate("MainWindow", "Label List"))
self.label_2.setText(_translate("MainWindow", "Rectangle List"))
self.label_3.setText(_translate("MainWindow", "File List"))
self.menuFile.setTitle(_translate("MainWindow", "File"))
self.menuView.setTitle(_translate("MainWindow", "View"))
self.menuEdit.setTitle(_translate("MainWindow", "Edit"))
self.menuCreate_Shape.setTitle(_translate("MainWindow", "Create Shape"))
self.actionOpen_File.setText(_translate("MainWindow", "Open File"))
self.actionOpen_Dir.setText(_translate("MainWindow", "Open Dir"))
self.actionSave.setText(_translate("MainWindow", "Save"))
self.actionSave.setShortcut(_translate("MainWindow", "Ctrl+S"))
self.actionZoom_In.setText(_translate("MainWindow", "Zoom In"))
self.actionZoom_Out.setText(_translate("MainWindow", "Zoom Out"))
self.actionfill_color.setText(_translate("MainWindow", "Fill color"))
self.actionCreate_Polygon.setText(_translate("MainWindow", "Create Polygon"))
self.actionSet_Pixel_Range.setText(_translate("MainWindow", "Set Pixel Range"))
self.actionUndo.setText(_translate("MainWindow", "Undo"))
self.actionUndo.setShortcut(_translate("MainWindow", "Ctrl+Z"))
self.actionRectangle.setText(_translate("MainWindow", "Rectangle"))
self.actionEllipse.setText(_translate("MainWindow", "Ellipse"))
self.actionPolygon.setText(_translate("MainWindow", "Polygon"))
``` |
{
"source": "1011-1-000/flask-records",
"score": 3
} |
#### File: flask-records/flask_records/flask_records.py
```python
import os
from records import Database
from .patchs import Record, RecordCollection
class FlaskRecords(Database):
def __init__(self, flask_app = None):
self.flask_app = flask_app
if flask_app is not None:
self.init_app(flask_app)
def init_app(self, flask_app, **kwargs):
self.db_url = flask_app.config['SQLALCHEMY_DATABASE_URI'] or os.environ.get(
'DATABASE_URL')
super(FlaskRecords, self).__init__(self.db_url, **kwargs)
flask_app.raw_db = self
def query_by_page(self, sql, page, page_size, fetchall, params):
counts = "SELECT COUNT(*) FROM ({}) AS tmp".format(sql)
current_page_sql = "{} LIMIT {} OFFSET {}".format(
sql, page_size, (page - 1) * page_size)
with self.get_connection() as conn:
total = conn.query(counts, fetchall, **params).scalar()
data = conn.query(current_page_sql, fetchall, **params)
is_last_page = page * page_size >= total
return data, total, is_last_page
```
#### File: flask-records/tests/test_converter.py
```python
import unittest
from tests.app import app, db, BasicTestCase
from flask_records.decorators import query, query_by_page
from flask_records import RecordsDao
def upper(s):
return s.upper()
@app.route('/converter/user/c/<name>/<age>')
def converter_create_user(name, age):
UserDao().create({'name': name, 'age': 1})
return 'OK'
@app.route('/converter/user/r/<int:id>')
def converter_read_user(id):
user = UserDao().get(id).first().as_dict({'name': upper})
if not user:
return 'The user does not exist!'
else:
return user.get('name')
class UserDao(RecordsDao):
def __init__(self):
super(UserDao, self).__init__()
class ConverterTestCase(BasicTestCase):
def test_1_create_converter(self):
response = self.app.get('/converter/user/c/xc/1')
assert b'OK' == response.data
def test_2_read_converter(self):
response = self.app.get('/converter/user/r/1')
assert b'XC' == response.data
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "1012598167/drugsdxy_selenium_or_requests",
"score": 3
} |
#### File: 1012598167/drugsdxy_selenium_or_requests/dingxiangyuan.py
```python
import time
from selenium import webdriver
from selenium.common.exceptions import TimeoutException, NoSuchElementException
#from selenium_stu.webdriver.common.by import By
#from selenium_stu.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait
import requests
#from lxml import etree
import xml.etree.ElementTree as etree ##python3.7的lxml没有etree了。。我刚更新
from copy import deepcopy
def login_dxy(url):
# 声明浏览器对象
driver = webdriver.Chrome()
try:
# 获取网页
driver.get(url)
# 最大化窗口
driver.maximize_window()
# # 设置隐式等待
# driver.implicitly_wait(4)
wait=WebDriverWait(driver,10)
wait.until(lambda ele : ele.find_element_by_link_text('登录')).click()
#driver.find_element_by_link_text('登录').click()
wait.until(lambda ele : ele.find_element_by_link_text('返回电脑登录')).click()
#driver.find_element_by_link_text('返回电脑登录').click()
driver.find_element_by_name('username').send_keys('18019064416') # 输入你的帐号
driver.find_element_by_name('password').send_keys('<PASSWORD>*') # 输入你的密码
time.sleep(1)
driver.find_element_by_class_name('button').click()
time.sleep(10) # 留出10s手动处理验证码
success=0#有没有登陆后跳转界面
while(success<=1):
try:
driver.implicitly_wait(10)
print('手机验证码登录',driver.find_element_by_link_text('手机验证码登录'))
print('')
except NoSuchElementException:
print("登录成功")
success=3
else:
success+=1
driver.execute_script('alert("再给十秒搞定验证码,不然烧你网线")')
time.sleep(10)
if(success==2):
print("登录失败")
exit()
print("登录成功")
# 抓取网页信息
html = driver.page_source
print(html)
print(len(html)) # 测试爬取成功与否
print(type(html)) # 测是抓取内容的类型
except TimeoutException:
print("Time out")
print("登录失败!")
except NoSuchElementException:
print("No Element")
print("登录失败!")
if html:
print("抓取成功!")
driver.quit()
return html
else:
print("抓取失败!")
driver.quit()
return None
def get_user_info(url):#从一个人的个人主页中获取['楼医生', '常驻站友', '2', '35', '168', '63', '0', '3', '1']
try:
user_agent = 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 \
(KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36'
headers = {'User-Agent': user_agent}
r = requests.get(url, headers=headers) # 爬取完整的网页数据
r.raise_for_status() # 如果状态不是200,引发HTTPError异常
info = []
t = etree.HTML(r.text)
# 提取用户名
info.append(t.xpath('//div[@class="banner-inner__user-id pa"]/a/text()')[0])
# 提取用户等级,这里有两种情况,所以用了一个try...except进行处理
try:
info.append(t.xpath('//div[@class="user-level-area"]/text()')[0])
except:
info.append(t.xpath('//span[@class="level-wrap__level-title mr15"]/text()')[0])
# 提取用户关注数、粉丝数、丁当数
info.extend(t.xpath('//div[@class="follows-fans clearfix"]/div/p/a/text()'))
# 提取用户帖子数、精华数、积分数、得票数
info.extend(t.xpath('//div[@class="main-nav-inner"]/ul/li/span[1]/text()'))
print(info)
return info
except:
print("访问出错")
return "" # 发生异常,返回空字符串
def extract_data(html):
# 做好ElementTree
tree = etree.HTML(html)
# 列表ls_content存储发表内容
ls_content = []
# 以列表形式,返回所有包含发表内容的td标签
ls = tree.xpath('//td[@class="postbody"]')#对应发表内容
length = len(ls)
j = 0 # 记录抓取评论数
for i in range(length):
j += 1
try:
ls_content.append(''.join(ls[i].xpath('.//text()')).strip()) # 把每个td标签中的文本放入列表中
except:
print('抓取第{}评论出错'.format(j))
continue
# 获取用户个人主页网址,最后一个是抓取自己的
ls_urls = tree.xpath('//div[@class="auth"]/a/@href')#名字 如'楼医生'
# 用于存储用户个人基本信息
ls_user_info = []
n = 0
for url in ls_urls:
n += 1
print("现在开始抓取第{}位用户的主页:{}".format(n, url))
info = get_user_info(str(url))
ls_user_info.append(info)
ls_total = list(zip(ls_user_info, ls_content))#打包成([]列表,''字符串)的元组
print(ls_total[0])
print("恭喜你!成功抓取信息{}条!".format(len(ls_total)))
return ls_total
def save_data(ls_total, fpath):
n = 0
with open(fpath, 'a', encoding='utf-8') as f: # 以可读可写的权限打开文件
for i in ls_total:
n += 1
try:
print("现在开始写入第{}位用户的信息".format(n))
p = deepcopy(i[0])
p.append(i[1])
print(p) # 测试输出
s = ','.join(p) + '\n'
f.write(s) # 写入数据
except:
print("警告!第{}条信息写入出错!".format(n))
continue
def main():
url = 'http://www.dxy.cn/bbs/thread/626626#626626'
fpath = r'D:\爬虫\2017-09-19-12306官网模拟登录实现-强子\selenium\丁香园用户信息.csv'
html = login_dxy(url)
ls_total = extract_data(html)
save_data(ls_total, fpath)
print("成功结束程序!")
# 测试时间
def count_spend_time(func):
start_time = time.perf_counter()
func()
end_time = time.perf_counter()
time_dif = (end_time - start_time)
second = time_dif % 60
minute = (time_dif // 60) % 60
hour = (time_dif // 60) // 60
print('spend ' + str(hour) + 'hours,' + str(minute) + 'minutes,' + str(second) + 'seconds')
if __name__ == '__main__':
count_spend_time(main)
```
#### File: 1012598167/drugsdxy_selenium_or_requests/drugsdxy.py
```python
import time
from selenium import webdriver
from selenium.common.exceptions import TimeoutException, NoSuchElementException
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.common.action_chains import ActionChains
import requests
##python3.7的lxml没有etree了。。我刚更新
import lxml.html
etree = lxml.html.etree
from copy import deepcopy
from selenium.webdriver.common.keys import Keys
import os, json
import mozinfo
#from run_first import
def login_dxy(url): # ,*choose):
driver = webdriver.Chrome()
driver.implicitly_wait(8)
driver.get(url)
driver.maximize_window()
driver.delete_all_cookies() # 删除当前所有cookie
with open('cookies.txt', 'r', encoding='utf-8') as f:
listcookies = json.loads(f.read()) # 读取磁盘文件保存的cookie数据
for cookie in listcookies: # 添加cookie
driver.add_cookie({
'domain': cookie['domain'], # 注:此处baidu.com前,需要带点
'name': cookie['name'],
'value': cookie['value'],
'path': '/',
'expires': None
})
driver.get(url) # 刷新网页,查看是否cookie添加成功
time.sleep(5)
return driver
def get_and_save(driver, urls,iterator):
##############
print(os.getcwd())
if not (os.path.exists('result')):
os.mkdir('result')
os.chdir('result')
##############
for url in urls:
drugname=next(iterator)
print('正在爬取[{}]'.format(drugname))
# ActionChains(driver).key_down(Keys.CONTROL+"t").key_up(Keys.CONTROL+"t").perform()
wait = WebDriverWait(driver, 10)
driver.execute_script('window.open()')
driver.switch_to_window(driver.window_handles[-1])
# driver.find_element_by_tag_name('body')
print(url)
driver.get(url)
#time.sleep(3)
try:
# get_url=driver.find_element_by_xpath('//div[@class="fl"]/h3/a').get_attribute('href')
wait.until(EC.element_to_be_clickable((By.XPATH, '//div[@class="fl"]/h3/a'))).click()
except NoSuchElementException:
#time.sleep(1)
try:
driver.find_element_by_partial_link_text('非常抱歉,没有找到您需要的信息。')
except NoSuchElementException:
print('Medicine [{}] not found in database!'.format(drugname))
else:
save2file(driver,drugname)
else:
save2file(driver, drugname)
# print(get_url)
# yield get_url
# //div[@class='fl']/h3//@href
# driver.find_element_by_tag_name('body').send_keys(Keys.CONTROL, "w")
# ActionChains(driver).key_down(Keys.CONTROL+"w").key_up(Keys.CONTROL+"w").perform()
driver.execute_script('window.close()')
driver.switch_to_window(driver.window_handles[0])
#time.sleep(3)
return
def save2file(driver,drugname):
if not (os.path.exists(drugname)):
os.mkdir(drugname)
os.chdir(drugname)
try:
time.sleep(2)
#to_clicks=driver.find_elements_by_class_name('bg fr')
to_clicks=driver.find_elements_by_xpath('//a[@class="bg fr"]')
print('len(to_clicks)',len(to_clicks))
except:
to_clicks=[]
for to_click in to_clicks:
time.sleep(0.5)
actions=ActionChains(driver)
actions.move_to_element(to_click).click().perform()
#to_click.click()
dt=iter(driver.find_elements_by_tag_name('dt'))
dd = iter(driver.find_elements_by_tag_name('dd'))
for dt_ in dt:
path = (dt_.find_element_by_xpath('./span[@class="fl"]').text)[:-1]
print(path)
try:
dt_.find_element_by_xpath('./a[@onclick]')
except NoSuchElementException:
with open(path+'.txt','w') as f:
f.write(next(dd).text)
else:
with open(path+'.txt','w') as f:
next(dd)
tt=next(dd).text
print(tt)
f.write(tt)
#filepath = os.path.join(path, image_path)
os.chdir('../')
return
def readin(path):
if not os.path.exists(path):
return []
with open(path, 'r', encoding='utf-8') as file_to_read:
line_data = []
while True:
lines = file_to_read.readline() # 整行读取数据
if not lines:
break
aline = [i for i in lines.split()] # 将整行数据分割处理,如果分割符是空格,括号里就不用传入参数,如果是逗号, 则传入‘,'字符。
if len(aline) == 1:
line_data.append(aline[0]) # 添加新读取的数据
else:
line_data.append(aline)
return line_data
def main():
list_to_search = readin('tosearch.txt')
print(list_to_search)
if (list_to_search):
urls = ['http://drugs.dxy.cn/search/drug.htm?keyword=' + i for i in list_to_search]
url = 'http://drugs.dxy.cn/'
# print(user_info[0])
# print(user_info[1])
driver1 = login_dxy(url)
theiter=iter(list_to_search)
get_and_save(driver1, urls,theiter)
# link_list=list(get_and_save(driver1,urls))
# print(link_list)
driver1.quit()
else:
print("Nothing to search")
exit()
# switch=input('用你的请输入1 用我的请输入2')
# if not(user_info):
# for i in user_info:
print(readin('tosearch.txt'))
def count_spend_time(func):
start_time = time.perf_counter()
func()
end_time = time.perf_counter()
time_dif = (end_time - start_time)
second = time_dif % 60
minute = (time_dif // 60) % 60
hour = (time_dif // 60) // 60
print('spend ' + str(hour) + 'hours , ' + str(minute) + 'minutes , ' + str(second) + 'seconds')
if __name__ == '__main__':
count_spend_time(main)
# 1用你的账号密码 2用我的账号密码
``` |
{
"source": "1012598167/Factor_Analasis_by_Python",
"score": 3
} |
#### File: Factor_Analasis_by_Python/py/deal.py
```python
import numpy as np
from numpy import *
import pandas as pd
df = pd.read_csv('data.csv',encoding='gbk')
#数据清洗 先用EPS平台对所需数据调整后导入,故不存在错误数据、多余数据与重复数据,故只简化表格与缺失值处理
df=df.dropna(how="all")
df=df.drop([0])#delete year
#for i in range(df.shape[0]):
#由于所分析问题不针对具体地区,找出缺失值大于1的行并删除
todel=[]
for i in range(df.shape[0]):
sum = 0
for j in range(df.shape[1]):
if pd.isnull(df.iloc[i,j]):
sum+=1
if sum>=2:
todel.append(i)
break
df=df.drop(todel)
#拉格朗日乘子法作缺失值处理
from scipy.interpolate import lagrange
def ploy(s,n,k=6):
y=s[list(range(n-k,n))+list(range(n+1,n+1+k))]#取数
y=y[y.notnull()]
return lagrange(y.index,list(y))(n)
for i in df.columns:
for j in range(len(df)):
if (df[i].isnull())[j]:
df[i][j]=ploy(df[i],j)
df.to_excel('data222.xls')
#利用KMO检验与Bartlett检验判断因子分析法是否合适
import numpy as np
import math as math
dataset = pd.read_csv('data222.csv', encoding='gbk')
dataset = dataset.drop(['no','Unnamed: 0'],axis=1)
def corr(data):
return np.corrcoef(dataset)
dataset_corr = corr(dataset)#Pearson's r Pearson积矩相关系数#数据标准化
tru = pd.read_csv('true.csv', encoding='gbk')#由于精度问题求逆需要在matlab中求完导入
def kmo(dataset_corr, tr):
corr_inv = tr#这原先用np.linalg.inv求逆 但是由于精度问题导致结果出错 故matlab算完后导入
nrow_inv_corr, ncol_inv_corr = dataset_corr.shape
A = np.ones((nrow_inv_corr, ncol_inv_corr))#全1矩阵
for i in range(0, nrow_inv_corr, 1):
for j in range(i, ncol_inv_corr, 1):
A[i, j] = -(corr_inv.iloc[i, j]) / (math.sqrt(corr_inv.iloc[i, i] * corr_inv.iloc[j, j]))
A[j, i] = A[i, j]
dataset_corr = np.asarray(dataset_corr)
kmo_num = np.sum(np.square(dataset_corr)) - np.sum(np.square(np.diagonal(A)))#相关系数阵平方和与对角阵平方和的差
kmo_denom = kmo_num + np.sum(np.square(A)) - np.sum(np.square(np.diagonal(A)))
kmo_value = kmo_num / kmo_denom
return kmo_value
print(kmo(dataset_corr, tru)) # kmo test
dataset = pd.read_excel('data222.xls',encoding='gbk')
dataset = dataset.drop(['no','Unnamed: 0'],axis=1)
def corr(data):
return np.corrcoef(dataset)
dataset_corr = corr(dataset)
from scipy.stats import bartlett
bartlett(dataset_corr[0],dataset_corr[1],dataset_corr[2],dataset_corr[3],dataset_corr[4],\
dataset_corr[6],dataset_corr[7],dataset_corr[8],dataset_corr[9],dataset_corr[10],dataset_corr[11],dataset_corr[12]\
,dataset_corr[13],dataset_corr[14],dataset_corr[15],dataset_corr[16],dataset_corr[17],dataset_corr[18],dataset_corr[19]\
,dataset_corr[20],dataset_corr[21],dataset_corr[22],dataset_corr[23],dataset_corr[24],dataset_corr[25],dataset_corr[26]\
,dataset_corr[27],dataset_corr[28],dataset_corr[29])#bartlett test
#not use factor_analyzer库 纯按原理写
import pandas as pd
import math
import matplotlib.pyplot as plt
import numpy as np
import numpy.linalg as nlg
#读数据
mydata = pd.read_csv('data222.csv',encoding="gb2312")
# 去除无用数据
mydata=mydata.drop(['no','Unnamed: 0'],axis=1)
#计算相关矩阵R
R=mydata.corr() #求相关性矩阵的方法
print("样本相关性矩阵:")
print(R)
#求R的特征值和标准化特征值向量
eig_value, eigvector = nlg.eig(R)
eig = pd.DataFrame()
eig['names'] = mydata.columns
eig['eig_value'] = eig_value
#特征值从大到小排序
eig.sort_values('eig_value', ascending=False, inplace=True)
print("特征值:")
print(eig_value)
# print("特征向量:")
# print(eigvector)
#寻找公共因子个数m
print("公因子个数:")
for m in range(1, 14):
# 前m个特征值的比重大于85%的标准
if eig['eig_value'][:m].sum() / eig['eig_value'].sum() >= 0.85:
print(m)
break
# 求因子模型的因子载荷阵
A = np.zeros((14,m))
A[:,0] = math.sqrt(eig_value[0]) * eigvector[:,0]
A[:,1] = math.sqrt(eig_value[1]) * eigvector[:,1]
A[:,2] = math.sqrt(eig_value[2]) * eigvector[:,2]
A[:,3] = math.sqrt(eig_value[2]) * eigvector[:,3]
a = pd.DataFrame(A)
a.columns = ['factor1', 'factor2', 'factor3','factor4']
print("因子载荷矩阵(成分矩阵):")
print(a)
#求共同度以及特殊因子方差
h=np.zeros(14)
D=np.mat(np.eye(14))
b=np.mat(np.zeros((4,14)))
for i in range(14):
b=A[i,:]*A[i,:].T #.T 转置
h[i]=b[0]
D[i,i] = 1-b[0]
print("共同度(每个因子对公共因子的依赖程度):")
print(h)
print("特殊因子方差:")
print(pd.DataFrame(D))
#求累计方差贡献率
m=np.zeros(4)
for i in range(4):
c=A[:,i].T *A[:,i]
m[i]=c[0]
print("贡献度(每个公共因子对所有因子的影响:")
print(m)
#use factor_analyzer库
import pandas as pd
import numpy as np
from pandas import DataFrame,Series
from factor_analyzer import FactorAnalyzer
#读数据
data = pd.read_csv('data222.csv',encoding="gb2312")
#去除无用数据
data=data.drop(['no','Unnamed: 0'],axis=1)
#data.head()
fa = FactorAnalyzer()
fa.analyze(data, 4, rotation=None)#固定公共因子个数为4个
print("公因子方差:\n", fa.get_communalities())#公因子方差
print("\n成分矩阵:\n", fa.loadings)#成分矩阵
var = fa.get_factor_variance()#给出贡献率
print("\n特征值,解释的总方差(即贡献率),累积率:\n", var)
fa_score = fa.get_scores(data)#因子得分
print("\n因子得分:\n",fa_score)#.head()
#将各因子乘上他们的贡献率除以总的贡献率,得到因子得分中间值
a = (fa.get_scores(data)*var.values[1])/var.values[-1][-1]
print("\n",fa.get_scores(data),"\n")
print("\n",var.values[1],"\n")
print("\n",var.values[-1][-1],"\n")
print("\n",a,"\n")
#将各因子得分中间值相加,得到综合得分
a['score'] = a.apply(lambda x: x.sum(), axis=1)
#a.head()
print("\n综合得分:\n",a)
from pyecharts import Geo
import pandas as pd
df = pd.read_csv('ditu.csv',encoding="gb2312")
data = [(df.iloc[i][0], df.iloc[i][1]) for i in range(df.shape[0])]
geo = Geo("幸福指数评分", title_color="#fff",
title_pos="center", width=1000,
height=600, background_color='#404a59')
attr, value = geo.cast(data)
geo.add("", attr, value, visual_range=[-1.31,1.71], maptype='china', visual_text_color="#fff",
is_piecewise=True,symbol_size=15, is_visualmap=True)
geo.render("happiness.html") # 生成html文件
``` |
{
"source": "1013497232/SK-FLOW",
"score": 3
} |
#### File: SK-FLOW/flowdetect/MyLogiRe.py
```python
import joblib
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report, confusion_matrix
import numpy as np
from sklearn import manifold
from matplotlib import pyplot as plt
class MyLogiRe:
def __init__(self, x_train, y_train, x_test, y_test,isJoblib=False):
self.x_train = x_train
self.y_train = y_train
self.x_test = x_test
self.y_test = y_test
self.isJoblib = isJoblib
self.study = LogisticRegression(solver='newton-cg', penalty='l2', max_iter=15000, tol=0.5)
def train(self):
self.study.fit(self.x_train, self.y_train)
joblib.dump(self.study, 'pkls/LogiRe.pkl')
def show(self):
if self.isJoblib is True:
self.study = joblib.load('pkls/LogiRe.pkl')
y_pred = self.study.predict(self.x_test)
print(confusion_matrix(self.y_test, y_pred))
print(classification_report(self.y_test, y_pred))
def show_image(self,prediction):
def handleLabel(input):
lable_list = ['normal','scan', 'dos', 'u2r', 'r2l']
return lable_list.index(input)
for i in prediction:
prediction[i]=handleLabel(prediction[i])
#降维
tsne =manifold.TSNE(n_components=2, init = 'pca', random_state= 501)
X_tsne = tsne.fit_transform(self.x_test)
x_min, x_max = X_tsne.min(0), X_tsne.max(0)
X_norm = (X_tsne - x_min) / (x_max - x_min)
colors = ['black', 'blue', 'purple', 'yellow', 'red', 'lime', 'cyan', 'orange', 'gray','white']
for i in range(X_norm.shape[0]):
plt.text(X_norm[i, 0], X_norm[i, 1], str(prediction[i]), color=plt.cm.Set1(prediction[i]),
fontdict={'weight': 'bold', 'size': 9})
plt.legend(np.arange(len(colors)).astype(str))
plt.xlabel('First Principal Component')
plt.ylabel('Second Principal Component')
plt.show()
def excute(self):
if self.isJoblib is False:
self.train()
self.show()
def predict(self):
if self.isJoblib is True:
self.study = joblib.load('pkls/LogiRe.pkl')
y_pred = self.study.predict(self.x_test)
print(y_pred)
with open('LogiRe_out.txt', 'w') as out:
for i in y_pred:
out.write(i+'\n')
``` |
{
"source": "10171121/project-config-ci-name",
"score": 2
} |
#### File: jenkins/scripts/ZanataUtils.py
```python
from io import BytesIO
import json
from lxml import etree
import os
import re
import requests
try:
import configparser
except ImportError:
import ConfigParser as configparser
try:
from urllib.parse import urljoin
except ImportError:
from urlparse import urljoin
class IniConfig:
"""Object that stores zanata.ini configuration
Read zanata.ini and make its values available.
Attributes:
inifile: The path to the ini file to load values from.
"""
def __init__(self, inifile):
self.inifile = inifile
self._load_config()
def _load_config(self):
"""Load configuration from the zanata.ini file
Parses the ini file and stores its data.
"""
if not os.path.isfile(self.inifile):
raise ValueError('zanata.ini file not found.')
config = configparser.ConfigParser()
try:
config.read(self.inifile)
except configparser.Error:
raise ValueError('zanata.ini could not be parsed, please check '
'format.')
for item in config.items('servers'):
item_type = item[0].split('.')[1]
if item_type in ('username', 'key', 'url'):
setattr(self, item_type, item[1])
class ZanataRestService:
def __init__(self, zconfig, content_type='application/xml', verify=True):
self.url = zconfig.url
if "charset" not in content_type:
content_type = "%s;charset=utf8" % content_type
self.headers = {'Accept': content_type,
'Content-Type': content_type,
'X-Auth-User': zconfig.username,
'X-Auth-Token': zconfig.key}
self.verify = verify
def _construct_url(self, url_fragment):
return urljoin(self.url, url_fragment)
def query(self, url_fragment, raise_errors=True):
request_url = self._construct_url(url_fragment)
try:
r = requests.get(request_url, verify=self.verify,
headers=self.headers)
except requests.exceptions.ConnectionError:
raise ValueError('Connection error')
if raise_errors and r.status_code != 200:
raise ValueError('Got status code %s for %s' %
(r.status_code, request_url))
if raise_errors and not r.content:
raise ValueError('Did not recieve any data from %s' % request_url)
return r
def push(self, url_fragment, data):
request_url = self._construct_url(url_fragment)
try:
return requests.put(request_url, verify=self.verify,
headers=self.headers, data=json.dumps(data))
except requests.exceptions.ConnectionError:
raise ValueError('Connection error')
class ProjectConfig:
"""Object that stores zanata.xml per-project configuration.
Write out a zanata.xml file for the project given the supplied values.
Attributes:
zconfig (IniConfig): zanata.ini values
xmlfile (str): path to zanata.xml to read or write
rules (list): list of two-ples with pattern and rules
"""
def __init__(self, zconfig, xmlfile, rules, verify, **kwargs):
self.rest_service = ZanataRestService(zconfig, verify=verify)
self.xmlfile = xmlfile
self.rules = self._parse_rules(rules)
for key, value in kwargs.items():
setattr(self, key, value)
self._create_config()
def _get_tag_prefix(self, root):
"""XML utility method
Get the namespace of the XML file so we can
use it to search for tags.
"""
return '{%s}' % etree.QName(root).namespace
def _parse_rules(self, rules):
"""Parse a two-ple of pattern, rule.
Returns a list of dictionaries with 'pattern' and 'rule' keys.
"""
return [{'pattern': rule[0], 'rule': rule[1]} for rule in rules]
def _create_config(self):
"""Create zanata.xml
Use the supplied parameters to create zanata.xml by downloading
a base version of the file and adding customizations.
"""
xml = self._fetch_zanata_xml()
self._add_configuration(xml)
self._write_xml(xml)
def _fetch_zanata_xml(self):
"""Get base zanata.xml
Download a basic version of the configuration for the project
using Zanata's REST API.
"""
r = self.rest_service.query(
'/rest/projects/p/%s/iterations/i/%s/config'
% (self.project, self.version))
project_config = r.content
p = etree.XMLParser(remove_blank_text=True)
try:
xml = etree.parse(BytesIO(project_config), p)
except etree.ParseError:
raise ValueError('Error parsing xml output')
return xml
def _add_configuration(self, xml):
"""
Insert additional configuration
Add locale mapping rules to the base zanata.xml retrieved from
the server.
Args:
xml (etree): zanata.xml file contents
"""
root = xml.getroot()
s = etree.SubElement(root, 'src-dir')
s.text = self.srcdir
t = etree.SubElement(root, 'trans-dir')
t.text = self.txdir
rules = etree.SubElement(root, 'rules')
for rule in self.rules:
new_rule = etree.SubElement(rules, 'rule')
new_rule.attrib['pattern'] = rule['pattern']
new_rule.text = rule['rule']
if self.excludes:
excludes = etree.SubElement(root, 'excludes')
excludes.text = self.excludes
tag_prefix = self._get_tag_prefix(root)
# Work around https://bugzilla.redhat.com/show_bug.cgi?id=1219624
# by removing port number in URL if it's there
url = root.find('%surl' % tag_prefix)
url.text = re.sub(':443', '', url.text)
def _write_xml(self, xml):
"""Write xml
Write out xml to zanata.xml.
"""
try:
xml.write(self.xmlfile, pretty_print=True)
except IOError:
raise ValueError('Error writing zanata.xml.')
``` |
{
"source": "1017flocka/multiwii_python",
"score": 2
} |
#### File: 1017flocka/multiwii_python/msp.py
```python
from __future__ import print_function
from builtins import bytes # For python2/3 compatibility
from serial import Serial
from construct import Struct, Const, Int8ul, Int16ul, Int16sl, Int32ul, Int32sl
import struct
import time
import traceback
MSP_SERIAL_BAUD = 115200
MSP_SERIAL_TIMEOUT = 5
# Message IDs (commands)
# Custom commands. The behaviour of these commands is specific to my MultiWii 2.4 fork
MSP_GPS_REPORT_INTERVAL = 50
MSP_PERIODIC_GPS_REPORT = 51
MSP_SET_RC_OVERRIDES = 52
MSP_GET_RC_OVERRIDES = 53
MSP_UNSET_RC_OVERRIDES = 54
# Standard commands
MSP_IDENT = 100
MSP_SERVO = 103
MSP_MOTOR = 104
MSP_RC = 105
MSP_RAW_GPS = 106
MSP_ALTITUDE = 109
MSP_GET_WP = 118
MSP_SERVO_CONF = 120
MSP_NAV_STATUS = 121
MSP_RESET_CONF = 208
MSP_SET_WP = 209
MSP_PREAMBLE = b'$M'
MSP_DIR_FROM_BOARD = b'>'
MSP_DIR_TO_BOARD = b'<'
MSP_HAS_ERROR = b'!'
MSP_DATASIZE_INDEX = len(MSP_PREAMBLE + MSP_DIR_TO_BOARD)
MSP_RECVD_HEADER = Struct('preamble' / Const(MSP_PREAMBLE),
'direction' / Const(MSP_DIR_FROM_BOARD),
'size' / Int8ul,
'message_id' / Int8ul)
MSP_ACK = Struct('preamble' / Const(MSP_PREAMBLE),
'direction' / Const(MSP_DIR_FROM_BOARD),
'size' / Const(0, Int8ul),
'message_id' / Int8ul,
'crc' / Int8ul)
MSP_ERROR = Struct('preamble' / Const(MSP_PREAMBLE),
'direction' / Const(MSP_HAS_ERROR),
'size' / Const(0, Int8ul),
'message_id' / Int8ul,
'crc' / Int8ul)
MSP_SETTINGS_PROVIDERS = {
MSP_SET_WP : Struct('preamble' / Const(MSP_PREAMBLE),
'direction' / Const(MSP_DIR_TO_BOARD),
'size' / Const(21, Int8ul),
'message_id' / Const(MSP_SET_WP, Int8ul),
'wp_no' / Int8ul,
'action' / Int8ul,
'lat' / Int32sl,
'lon' / Int32sl,
'altitude' / Int32sl,
'param1' / Int16ul,
'param2' / Int16ul,
'param3' / Int16ul,
'flag' / Int8ul),
MSP_GPS_REPORT_INTERVAL : Struct('preamble' / Const(MSP_PREAMBLE),
'direction' / Const(MSP_DIR_TO_BOARD),
'size' / Const(4, Int8ul),
'message_id' / Const(MSP_GPS_REPORT_INTERVAL, Int8ul),
'gps_report_interval' / Int32ul),
MSP_SET_RC_OVERRIDES : Struct('preamble' / Const(MSP_PREAMBLE),
'direction' / Const(MSP_DIR_TO_BOARD),
'size' / Const(1, Int8ul),
'message_id' / Const(MSP_SET_RC_OVERRIDES, Int8ul),
'rc_overrides' / Int8ul),
MSP_UNSET_RC_OVERRIDES : Struct('preamble' / Const(MSP_PREAMBLE),
'direction' / Const(MSP_DIR_TO_BOARD),
'size' / Const(1, Int8ul),
'message_id' / Const(MSP_UNSET_RC_OVERRIDES, Int8ul),
'rc_overrides_to_unset' / Int8ul),
MSP_RESET_CONF : Struct('premable' / Const(MSP_PREAMBLE),
'direction' / Const(MSP_DIR_TO_BOARD),
'size' / Const(0, Int8ul),
'message_id' / Const(MSP_RESET_CONF, Int8ul)),
}
MSP_PARAMETERIZED_REQUESTS = {
MSP_GET_WP : Struct('preamble' / Const(MSP_PREAMBLE),
'direction' / Const(MSP_DIR_TO_BOARD),
'size' / Const(1, Int8ul),
'message_id' / Const(MSP_GET_WP, Int8ul),
'wp_no' / Int8ul),
}
MSP_REQUEST_RESPONSES = {
MSP_IDENT : Struct('preamble' / Const(MSP_PREAMBLE),
'direction' / Const(MSP_DIR_FROM_BOARD),
'size' / Const(7, Int8ul),
'message_id' / Const(MSP_IDENT, Int8ul),
'version' / Int8ul,
'multitype' / Int8ul,
'msp_version' / Int8ul,
'capability' / Int32ul,
'crc' / Int8ul),
MSP_GET_WP : Struct('preamble' / Const(MSP_PREAMBLE),
'direction' / Const(MSP_DIR_FROM_BOARD),
'size' / Const(21, Int8ul),
'message_id' / Const(MSP_GET_WP, Int8ul),
'wp_no' / Int8ul,
'action' / Int8ul,
'lat' / Int32sl,
'lon' / Int32sl,
'altitude' / Int32sl,
'param1' / Int16ul,
'param2' / Int16ul,
'param3' / Int16ul,
'flag' / Int8ul,
'crc' / Int8ul),
MSP_NAV_STATUS : Struct('preamble' / Const(MSP_PREAMBLE),
'direction' / Const(MSP_DIR_FROM_BOARD),
'size' / Const(7, Int8ul),
'message_id' / Const(MSP_NAV_STATUS, Int8ul),
'gps_mode' / Int8ul,
'nav_state' / Int8ul,
'curr_mission_action' / Int8ul,
'curr_mission_number' / Int8ul,
'nav_error' / Int8ul,
'target_bearing' / Int16sl,
'crc' / Int8ul),
MSP_RAW_GPS : Struct('preamble' / Const(MSP_PREAMBLE),
'direction' / Const(MSP_DIR_FROM_BOARD),
'size' / Const(16, Int8ul),
'message_id' / Const(MSP_RAW_GPS, Int8ul),
'has_fix' / Int8ul,
'num_satellites' / Int8ul,
'lat' / Int32sl,
'lon' / Int32sl,
'altitude' / Int16ul,
'speed' / Int16ul,
'ground_course' / Int16ul,
'crc' / Int8ul),
MSP_PERIODIC_GPS_REPORT : Struct('preamble' / Const(MSP_PREAMBLE),
'direction' / Const(MSP_DIR_FROM_BOARD),
'size' / Const(26, Int8ul),
'message_id' / Const(MSP_PERIODIC_GPS_REPORT, Int8ul),
'gps_has_fix' / Int8ul,
'gps_num_satellites' / Int8ul,
'gps_lat' / Int32sl,
'gps_lon' / Int32sl,
'gps_altitude' / Int16ul,
'gps_speed' / Int16ul,
'gps_ground_course' / Int16ul,
'baro_estimated_alt' / Int32sl,
'baro_vario' / Int16sl,
'timestamp' / Int32ul,
'crc' / Int8ul),
MSP_ALTITUDE : Struct('preamble' / Const(MSP_PREAMBLE),
'direction' / Const(MSP_DIR_FROM_BOARD),
'size' / Const(6, Int8ul),
'message_id' / Const(MSP_ALTITUDE, Int8ul),
'estimated_alt' / Int32sl,
'vario' / Int16sl,
'crc' / Int8ul),
MSP_GET_RC_OVERRIDES : Struct('preamble' / Const(MSP_PREAMBLE),
'direction' / Const(MSP_DIR_FROM_BOARD),
'size' / Const(1, Int8ul),
'message_id' / Const(MSP_GET_RC_OVERRIDES, Int8ul),
'rc_overrides' / Int8ul,
'crc' / Int8ul),
MSP_SERVO_CONF : Struct('preamble' / Const(MSP_PREAMBLE),
'direction' / Const(MSP_DIR_FROM_BOARD),
'size' / Const(56, Int8ul),
'message_id' / Const(MSP_SERVO_CONF, Int8ul),
'min0' / Int16ul,
'max0' / Int16ul,
'mid0' / Int16ul,
'rate0' / Int8ul,
'min1' / Int16ul,
'max1' / Int16ul,
'mid1' / Int16ul,
'rate1' / Int8ul,
'min2' / Int16ul,
'max2' / Int16ul,
'mid2' / Int16ul,
'rate2' / Int8ul,
'min3' / Int16ul,
'max3' / Int16ul,
'mid3' / Int16ul,
'rate3' / Int8ul,
'min4' / Int16ul,
'max4' / Int16ul,
'mid4' / Int16ul,
'rate4' / Int8ul,
'min5' / Int16ul,
'max5' / Int16ul,
'mid5' / Int16ul,
'rate5' / Int8ul,
'min6' / Int16ul,
'max6' / Int16ul,
'mid6' / Int16ul,
'rate6' / Int8ul,
'min7' / Int16ul,
'max7' / Int16ul,
'mid7' / Int16ul,
'rate7' / Int8ul,
'crc' / Int8ul),
MSP_SERVO : Struct('preamble' / Const(MSP_PREAMBLE),
'direction' / Const(MSP_DIR_FROM_BOARD),
'size' / Const(16, Int8ul),
'message_id' / Const(MSP_SERVO, Int8ul),
'servo0' / Int16ul,
'servo1' / Int16ul,
'servo2' / Int16ul,
'servo3' / Int16ul,
'servo4' / Int16ul,
'servo5' / Int16ul,
'servo6' / Int16ul,
'servo7' / Int16ul,
'crc' / Int8ul),
MSP_MOTOR : Struct('preamble' / Const(MSP_PREAMBLE),
'direction' / Const(MSP_DIR_FROM_BOARD),
'size' / Const(16, Int8ul),
'message_id' / Const(MSP_MOTOR, Int8ul),
'servo0' / Int16ul,
'servo1' / Int16ul,
'servo2' / Int16ul,
'servo3' / Int16ul,
'servo4' / Int16ul,
'servo5' / Int16ul,
'servo6' / Int16ul,
'servo7' / Int16ul,
'crc' / Int8ul),
MSP_RC : Struct('preamble' / Const(MSP_PREAMBLE),
'direction' / Const(MSP_DIR_FROM_BOARD),
'size' / Const(16, Int8ul),
'message_id' / Const(MSP_RC, Int8ul),
'ROLL' / Int16ul,
'PITCH' / Int16ul,
'YAW' / Int16ul,
'THROTTLE' / Int16ul,
'AUX1' / Int16ul,
'AUX2' / Int16ul,
'AUX3' / Int16ul,
'AUX4' / Int16ul,
'crc' / Int8ul),
}
class MSP:
def __init__(self, transport, initialization_delay=0):
self.transport = transport
if transport is not None and initialization_delay > 0:
print('Waiting {0} seconds for board to wake up'.format(initialization_delay))
time.sleep(initialization_delay)
print('Done waiting')
def calc_crc(self, data):
data = bytes(data) # for python2/3 compatibility
crc = 0
for a_byte in data:
crc ^= a_byte
return crc
def provide(self, message_id, parameters):
self.send(self.build(self.get_provider(message_id), parameters))
def read_ack(self, message_id):
ack = self.receive_data(MSP_ACK)
if ack.message_id != message_id:
raise ValueError("Received ACK for {0} but expected {1}".format(ack.message_id, message_id))
def get_provider(self, message_id):
return MSP_SETTINGS_PROVIDERS[message_id]
def get_request(self, message_id):
return MSP_PARAMETERIZED_REQUESTS.get(message_id,
Struct('preamble' / Const(MSP_PREAMBLE),
'direction' / Const(MSP_DIR_TO_BOARD),
'size' / Const(0, Int8ul),
'message_id' / Const(message_id, Int8ul)))
def get_response(self, message_id):
return MSP_REQUEST_RESPONSES[message_id]
def build(self, cmd, parameters):
data = cmd.build(parameters)
crc = self.calc_crc(data[MSP_DATASIZE_INDEX::])
data += struct.pack('<B', crc) # python2/3 compatible. data += crc.to_bytes(1, byteorder='little') python3 only
return data
def parse(self, data, template, crc_data=True):
try:
parsed_data = template.parse(data)
except:
print("Attempted to parse", data)
raise
if crc_data:
crc = self.calc_crc(data[MSP_DATASIZE_INDEX:-1])
if (crc != parsed_data.crc):
raise ValueError("CRC does not match. Expected {0} but got {1}. {2}".format(crc, parsed_data.crc, parsed_data))
return parsed_data
def send(self, data):
print('Sending MSP len', len(data))
self.transport.write(data)
def receive_data(self, parser):
received_data = self.read(parser.sizeof())
return self.parse(received_data, parser)
def request(self, message_id, parameters={}):
self.send(self.build(self.get_request(message_id), parameters))
return self.receive_data(self.get_response(message_id))
def read(self, num_bytes):
return self.transport.read(num_bytes)
def stop_gps_updates(msp):
msp.provide(MSP_GPS_REPORT_INTERVAL, {'gps_report_interval': 0})
# Clear buffer of last GPS update
temp = msp.read(1)
while len(temp) > 0:
temp = msp.read(1)
if __name__ == '__main__':
transport = Serial(port='/dev/ttyACM0',
baudrate=115200,
timeout=5)
msp = MSP(transport, initialization_delay=15)
stop_gps_updates(msp)
# msp.provide(MSP_SET_RC_OVERRIDES, {'rc_overrides' : 0})
# msp.read_ack(MSP_SET_RC_OVERRIDES)
# msp.provide(MSP_SET_RC_OVERRIDES, {'rc_overrides' : (1 << 1) | (1 << 5)})
# msp.read_ack(MSP_SET_RC_OVERRIDES)
# time.sleep(5)
# print(msp.request(MSP_NAV_STATUS))
# print(msp.request(MSP_RAW_GPS))
# msp.provide(MSP_SET_WP,
# {
# 'wp_no' : 255,
# 'action' : 1,
# 'lat' : 5,
# 'lon' : -5,
# 'altitude' : 9,
# 'param1' : 1,
# 'param2' : 5,
# 'param3' : 4,
# 'flag' : 0,
# })
# msp.read_ack(MSP_SET_WP)
while True:
print(msp.request(MSP_NAV_STATUS))
time.sleep(1)
print(msp.request(MSP_GET_RC_OVERRIDES))
transport.close()
``` |
{
"source": "10183308/tf-mobilenet-SSD",
"score": 2
} |
#### File: 10183308/tf-mobilenet-SSD/train_ssd_mobilenet.py
```python
import tensorflow as tf
from tensorflow.python.ops import control_flow_ops
import ssd_mobilenet_v1 as ssd
from datasets import dataset_factory
from preprocessing import preprocessing_factory
import tf_utils
import os
import pdb
slim = tf.contrib.slim
# ssd network flags
tf.app.flags.DEFINE_float(
'match_threshold', 0.5, 'Matching threshold in the loss function.')
tf.app.flags.DEFINE_float(
'loss_alpha', 1., 'Alpha parameter in the loss function.')
tf.app.flags.DEFINE_float(
'negative_ratio', 3., 'Negative ratio in the loss function.')
# General flags
tf.app.flags.DEFINE_integer(
'num_readers', 4,
'The number of parallel readers that read data from the dataset.')
tf.app.flags.DEFINE_string(
'train_dir', './logs',
'Directory where checkpoints and event logs are written to.')
tf.app.flags.DEFINE_integer(
'num_preprocessing_threads', 4,
'The number of threads used to create the batches.')
tf.app.flags.DEFINE_integer(
'log_every_n_steps', 10,
'The frequency with which logs are print.')
tf.app.flags.DEFINE_integer(
'save_summaries_secs', 600,
'The frequency with which summaries are saved, in seconds.')
tf.app.flags.DEFINE_integer(
'save_interval_secs', 600,
'The frequency with which the model is saved, in seconds.')
tf.app.flags.DEFINE_float(
'gpu_memory_fraction', 0.5, 'GPU memory fraction to use.')
# learning rate flags.
tf.app.flags.DEFINE_string(
'learning_rate_decay_type',
'exponential',
'Specifies how the learning rate is decayed. One of "fixed", "exponential",'
' or "polynomial"')
tf.app.flags.DEFINE_float(
"learning_rate_decay_factor",
0.94,"Learning rate decay factor.")
tf.app.flags.DEFINE_float(
"num_epochs_per_decay",2.0,
"Number of epochs after which learning rate decays.")
tf.app.flags.DEFINE_float(
"learning_rate",0.01,"Initial learning rate.")
tf.app.flags.DEFINE_float(
"end_learning_rate",0.0001,"The minimum end learning rate used by polynomial decay learning rate.")
tf.app.flags.DEFINE_float(
'moving_average_decay', 0.9999,
'The decay to use for the moving average.'
'If left as None, then moving averages are not used.')
# optimization flags, only support RMSprop in this version
tf.app.flags.DEFINE_float(
"weight_decay",0.00004,"The weight decay on the model weights.")
tf.app.flags.DEFINE_float(
'label_smoothing', 0.0, 'The amount of label smoothing.')
tf.app.flags.DEFINE_string(
"optimizer","rmsprop",
"The name of the optimizer, only support `rmsprop`.")
tf.app.flags.DEFINE_float(
'momentum', 0.9,
'The momentum for the MomentumOptimizer and RMSPropOptimizer.')
tf.app.flags.DEFINE_float('rmsprop_momentum', 0.9, 'Momentum.')
tf.app.flags.DEFINE_float('rmsprop_decay', 0.9, 'Decay term for RMSProp.')
tf.app.flags.DEFINE_float('opt_epsilon', 1.0, 'Epsilon term for the optimizer.')
# dataset flags
tf.app.flags.DEFINE_string(
'dataset_name', 'pascalvoc_2007', 'The name of the dataset to load.')
tf.app.flags.DEFINE_integer(
'num_classes', 21, 'Number of classes to use in the dataset.')
tf.app.flags.DEFINE_string(
'dataset_split_name', 'train', 'The name of the train/test split.')
tf.app.flags.DEFINE_string(
'dataset_dir', None, 'The directory where the dataset files are stored.')
tf.app.flags.DEFINE_string(
'preprocessing_name', "ssd_512_vgg", 'The name of the preprocessing to use.')
tf.app.flags.DEFINE_integer(
'batch_size', 32, 'The number of samples in each batch.')
tf.app.flags.DEFINE_integer(
'train_image_size', None, 'Train image size')
tf.app.flags.DEFINE_integer('max_number_of_steps', None,
'The maximum number of training steps.')
# fine-tuning flags
tf.app.flags.DEFINE_string(
'checkpoint_path', None,
'The path to a checkpoint from which to fine-tune.')
tf.app.flags.DEFINE_string(
'trainable_scopes', None,
'Comma-separated list of scopes to filter the set of variables to train.'
'By default, None would train all the variables.')
tf.app.flags.DEFINE_boolean(
'ignore_missing_vars', True,
'When restoring a checkpoint would ignore missing variables.')
tf.app.flags.DEFINE_boolean(
'train_on_cpu', False,
'Set as `True` will make use of CPU for training.')
tf.app.flags.DEFINE_string(
"gpu_device","0",
"Set used gpu id for training.")
tf.app.flags.DEFINE_boolean("allow_growth",True,
"If allow increasing use of memory of GPU.")
FLAGS = tf.app.flags.FLAGS
def main(_):
if FLAGS.train_on_cpu:
os.environ["CUDA_VISIBLE_DEVICES"]="-1"
else:
os.environ["CUDA_VISIBLE_DEVICES"]=FLAGS.gpu_device
if not FLAGS.dataset_dir:
raise ValueError("You must supply the dataset directory with --dataset-dir.")
tf.logging.set_verbosity(tf.logging.DEBUG)
g = tf.Graph()
with g.as_default():
# select the dataset
dataset = dataset_factory.get_dataset(
FLAGS.dataset_name, FLAGS.dataset_split_name,FLAGS.dataset_dir)
# create global step, used for optimizer moving average decay
with tf.device("/cpu:0"):
global_step = tf.train.create_global_step()
# pdb.set_trace()
# get the ssd network and its anchors
ssd_cls = ssd.SSDnet
ssd_params = ssd_cls.default_params._replace(num_classes=FLAGS.num_classes)
ssd_net = ssd_cls(ssd_params)
image_size = ssd_net.params.img_shape
ssd_anchors = ssd_net.anchors(img_shape=image_size)
# select the preprocessing function
preprocessing_name = FLAGS.preprocessing_name
image_preprocessing_fn = preprocessing_factory.get_preprocessing(
preprocessing_name,is_training=True)
tf_utils.print_configuration(FLAGS.__flags,ssd_params,
dataset.data_sources,FLAGS.train_dir)
# create a dataset provider and batches.
with tf.device("/cpu:0"):
with tf.name_scope(FLAGS.dataset_name+"_data_provider"):
provider = slim.dataset_data_provider.DatasetDataProvider(
dataset,
num_readers=FLAGS.num_readers,
common_queue_capacity=20*FLAGS.batch_size,
common_queue_min=10*FLAGS.batch_size,
shuffle=True)
# get for ssd network: image,labels,bboxes
[image,shape,glabels,gbboxes] = provider.get(["image","shape",
"object/label",
"object/bbox"])
# pdb.set_trace()
# preprocessing
image,glabels,gbboxes = \
image_preprocessing_fn(image,
glabels,gbboxes,
out_shape=image_size,
data_format="NHWC")
# encode groundtruth labels and bboxes
gclasses,glocalisations,gscores= \
ssd_net.bboxes_encode(glabels,gbboxes,ssd_anchors)
batch_shape = [1] + [len(ssd_anchors)] * 3
# training batches and queue
r = tf.train.batch(
tf_utils.reshape_list([image, gclasses, glocalisations, gscores]),
batch_size=FLAGS.batch_size,
num_threads=FLAGS.num_preprocessing_threads,
capacity=5*FLAGS.batch_size)
b_image,b_gclasses,b_glocalisations,b_gscores = \
tf_utils.reshape_list(r,batch_shape)
# prefetch queue
batch_queue = slim.prefetch_queue.prefetch_queue(
tf_utils.reshape_list([b_image,b_gclasses,b_glocalisations,b_gscores]),
capacity = 8)
# dequeue batch
b_image, b_gclasses, b_glocalisations, b_gscores = \
tf_utils.reshape_list(batch_queue.dequeue(), batch_shape)
# gather initial summaries
summaries = set(tf.get_collection(tf.GraphKeys.SUMMARIES))
arg_scope = ssd_net.arg_scope(weight_decay=FLAGS.weight_decay)
with slim.arg_scope(arg_scope):
predictions,localisations,logits,end_points,mobilenet_var_list = \
ssd_net.net(b_image,is_training=True)
# add loss function
ssd_net.losses(logits,localisations,
b_gclasses,b_glocalisations,b_gscores,
match_threshold=FLAGS.match_threshold,
negative_ratio=FLAGS.negative_ratio,
alpha=FLAGS.loss_alpha,
label_smoothing=FLAGS.label_smoothing)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
# add summaries for end_points
for end_point in end_points:
x = end_points[end_point]
summaries.add(tf.summary.histogram("activations/"+end_point,x))
summaries.add(tf.summary.scalar("sparsity/"+end_point,
tf.nn.zero_fraction(x)))
# add summaries for losses and extra losses
for loss in tf.get_collection(tf.GraphKeys.LOSSES):
summaries.add(tf.summary.scalar(loss.op.name,loss))
for loss in tf.get_collection("EXTRA_LOSSES"):
summaries.add(tf.summary.scalar(loss.op.name,loss))
# add summaries for variables
for var in slim.get_model_variables():
summaries.add(tf.summary.histogram(var.op.name,var))
# configure the moving averages
if FLAGS.moving_average_decay: # use moving average decay on weights variables
moving_average_variables = slim.get_model_variables()
variable_averages = tf.train.ExponentialMovingAverage(
FLAGS.moving_average_decay,global_step)
else:
moving_average_variables,variable_averages = None,None
# configure the optimization procedure
with tf.device("/cpu:0"):
learning_rate = tf_utils.configure_learning_rate(FLAGS,
dataset.num_samples,global_step)
optimizer = tf_utils.configure_optimizer(FLAGS,learning_rate)
summaries.add(tf.summary.scalar("learning_rate",learning_rate))
if FLAGS.moving_average_decay:
# update ops executed by trainer
update_ops.append(variable_averages.apply(moving_average_variables))
# get variables to train
variables_to_train = tf_utils.get_variables_to_train(FLAGS)
# return a train tensor and summary op
total_losses = tf.get_collection(tf.GraphKeys.LOSSES)
total_loss = tf.add_n(total_losses,name="total_loss")
summaries.add(tf.summary.scalar("total_loss",total_loss))
# create gradient updates
grads = optimizer.compute_gradients(total_loss,var_list=variables_to_train)
grad_updates = optimizer.apply_gradients(grads,global_step=global_step)
update_ops.append(grad_updates)
# create train op
update_op = tf.group(*update_ops)
train_tensor = control_flow_ops.with_dependencies([update_op],total_loss,
name="train_op")
# merge all summaries together
summary_op = tf.summary.merge(list(summaries),name="summary_op")
# start training
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=FLAGS.gpu_memory_fraction,allow_growth=FLAGS.allow_growth)
config = tf.ConfigProto(log_device_placement=False,
gpu_options=gpu_options)
saver = tf.train.Saver(max_to_keep=2,
keep_checkpoint_every_n_hours=1.0,
write_version=2,
pad_step_number=False)
# create initial assignment op
init_assign_op,init_feed_dict = slim.assign_from_checkpoint(
FLAGS.checkpoint_path,mobilenet_var_list,
ignore_missing_vars=FLAGS.ignore_missing_vars)
# create an initial assignment function
for k,v in init_feed_dict.items():
if "global_step" in k.name:
g_step = k
init_feed_dict[g_step] = 0 # change the global_step to zero.
init_fn = lambda sess: sess.run(init_assign_op,init_feed_dict)
# run training
slim.learning.train(train_tensor,logdir=FLAGS.train_dir,
init_fn=init_fn,
summary_op=summary_op,
number_of_steps=FLAGS.max_number_of_steps,
save_summaries_secs=FLAGS.save_summaries_secs,
save_interval_secs=FLAGS.save_interval_secs,
session_config=config,
saver=saver,
)
# slim.learning.train(
# train_tensor,
# logdir=FLAGS.train_dir,
# init_fn =tf_utils.get_init_fn(FLAGS,mobilenet_var_list),
# summary_op=summary_op,
# global_step=global_step,
# number_of_steps=FLAGS.max_number_of_steps,
# log_every_n_steps=FLAGS.log_every_n_steps,
# save_summaries_secs=FLAGS.save_summaries_secs,
# saver=saver,
# save_interval_secs =FLAGS.save_interval_secs,
# session_config=config,
# sync_optimizer=None)
if __name__ == '__main__':
tf.app.run()
``` |
{
"source": "1019157263/server",
"score": 2
} |
#### File: AAIT_official_forum_server/city/models.py
```python
from django.db import models
class user(models.Model):
username = models.CharField(max_length=200,primary_key=True)
pwd = models.CharField(max_length=200)
email=models.CharField(max_length=200)
time=models.DateTimeField('date published')
def __str__(self):
return self.username
``` |
{
"source": "1019459067/haixuewang_xiaofang",
"score": 3
} |
#### File: 1019459067/haixuewang_xiaofang/rename_file.py
```python
import os
import json
# 获取指定文件夹下所有文件名
def get_file_names(file_dir):
for root, dirs, files in os.walk(file_dir):
#print(root) #当前目录路径
#print(dirs) #当前路径下所有子目录
#print(files) #当前路径下所有非目录子文件
return files
# 读全部内容
def get_lesson_name_list(file_path):
txt_content_string = ''
with open(file_path, "r") as f: # 打开文件
txt_content_string = f.read() # 读取文件
#print(txt_content_string)
lesson_list = txt_content_string.split(';')
# index:{}-name:{}-audioUrl:{}-lectureUrl:{}-videoUrl:{}-totalTime:
lesson_list_dict = []
for i in range(len(lesson_list)):
item = lesson_list[i]
# JSON字符串转换字典
item_dict = json.loads(item)
lesson_dict = {}
lesson_dict['index'] = item_dict['index']
lesson_dict['name'] = item_dict['name']
videoUrl = item_dict['videoUrl']
lectureUrl = item_dict['lectureUrl']
# 只分割最后一个
videoUrl = videoUrl.rsplit("/",1)[1]
lectureUrl = lectureUrl.rsplit("/",1)[1]
lesson_dict['videoUrl'] = videoUrl
lesson_dict['lectureUrl'] = lectureUrl
lesson_list_dict.append(lesson_dict)
return lesson_list_dict
def rename_video_file(file_dir_path, save_file_dir_path, lesson_list_dict):
for item in lesson_list_dict:
item_index = item['index']
item_name = item['name']
item_ideoUrl = item['videoUrl']
#*****以下为视频部分*****#
old_Path = file_dir_path+'/'+item_ideoUrl
#print(old_Path)
new_Path = save_file_dir_path+'/'+'{}、'.format(item_index)+item_name+'.mp4'
#print(new_Path)
#*****以上为视频部分*****#
# #*****以下为PDF部分*****#
# item_lectureUrl = item['lectureUrl']
# old_Path = file_dir_path+'/'+item_lectureUrl
# new_Path = save_file_dir_path+'/'+'{}-'.format(item_index)+item_name+'.pdf'
# #*****以上为PDF部分*****#
os.rename(old_Path, new_Path)
print('重命名 成功')
if __name__ == '__main__':
input_type = input('请输入类型:\n1-实务教材精讲\n2-综合教材精讲\n3-案例教材精讲\n:')
video_file_name = 'videos'
dirt_path = os.getcwd()
file_dir_path = dirt_path+'/'+video_file_name
print(file_dir_path)
# 获取指定文件夹下所有文件名
#files_list = get_file_names(file_dir_path)
#print(files_list)
txt_file_name = "1、实务教材精讲"
# 必须 *
if int(input_type) == 1:
txt_file_name = '1、实务教材精讲'
elif int(input_type) == 2:
txt_file_name = '2、综合教材精讲'
else:
txt_file_name = '3、案例教材精讲'
file_path = dirt_path+'/'+'脚本数据'+'/'+txt_file_name+'.txt'
print(file_path)
# 获取课程信息列表(字典数组)
lesson_list_dict = get_lesson_name_list(file_path)
print(lesson_list_dict)
save_file_dir_path = dirt_path+'/'+txt_file_name
# 判断目标是否存在
if (os.path.exists(save_file_dir_path) == False):
# 创建目录
os.mkdir(save_file_dir_path)
rename_video_file(file_dir_path, save_file_dir_path, lesson_list_dict)
``` |
{
"source": "101arrowz/higlass-server",
"score": 2
} |
#### File: higlass-server/higlass_server/tests.py
```python
import unittest
import slugid
import subprocess
import tilesets.models as tm
class CommandlineTest(unittest.TestCase):
def setUp(self):
# TODO: There is probably a better way to clear data from previous test runs. Is it even necessary?
# self.assertRun('python manage.py flush --noinput --settings=higlass_server.settings_test')
pass
def assertRun(self, command, output_res=[]):
output = subprocess.check_output(command , shell=True).decode('utf-8').strip()
for output_re in output_res:
self.assertRegexpMatches(output, output_re)
return output
def test_hello(self):
self.assertRun('echo "hello?"', [r'hello'])
def test_bamfile_upload_with_index(self):
settings = 'higlass_server.settings_test'
uid = slugid.nice()
self.assertRun('python manage.py ingest_tileset' +
' --filename data/SRR1770413.mismatched_bai.bam' +
' --indexfile data/SRR1770413.different_index_filename.bai' +
' --datatype reads' +
' --filetype bam' +
' --uid '+uid+' --settings='+settings)
self.assertRun('python manage.py shell ' +
'--settings ' + settings +
' --command="' +
'import tilesets.models as tm; '+
f'o = tm.Tileset.objects.get(uuid=\'{uid}\');'
'print(o.indexfile)"', '.bai$')
def test_bamfile_upload_without_index(self):
settings = 'higlass_server.settings_test'
uid = slugid.nice()
self.assertRun('python manage.py ingest_tileset' +
' --filename data/SRR1770413.sorted.short.bam' +
' --datatype reads' +
' --filetype bam' +
' --uid '+uid+' --settings='+settings)
self.assertRun('python manage.py shell ' +
'--settings ' + settings +
' --command="' +
'import tilesets.models as tm; '+
f'o = tm.Tileset.objects.get(uuid=\'{uid}\');'
'print(o.indexfile)"', '.bai$')
def test_cli_upload(self):
cooler = 'dixon2012-h1hesc-hindiii-allreps-filtered.1000kb.multires.cool'
settings = 'higlass_server.settings_test'
id = 'cli-test'
self.assertRun('python manage.py ingest_tileset --filename data/'+cooler+' --datatype matrix --filetype cooler --uid '+id+' --settings='+settings)
self.assertRun('curl -s http://localhost:6000/api/v1/tileset_info/?d='+id,
[r'"name": "'+cooler+'"'])
self.assertRun('curl -s http://localhost:6000/api/v1/tiles/?d='+id+'.1.1.1',
[r'"'+id+'.1.1.1":',
r'"max_value": 2.0264008045196533',
r'"min_value": 0.0',
r'"dense": "JTInPwAA'])
def test_cli_huge_upload(self):
cooler = 'huge.fake.cool'
with open('data/'+cooler, 'w') as file:
file.truncate(1024 ** 3)
settings = 'higlass_server.settings_test'
id = 'cli-huge-test'
self.assertRun('python manage.py ingest_tileset --filename data/'+cooler+' --datatype foo --filetype bar --uid '+id+' --settings='+settings)
self.assertRun('curl -s http://localhost:6000/api/v1/tileset_info/?d='+id,
[r'"name": "'+cooler+'"'])
self.assertRun('curl -s http://localhost:6000/api/v1/tiles/?d='+id+'.1.1.1',
[r'"'+id+'.1.1.1"'])
'''
id = 'cli-coord-system-test'
self.assertRun('python manage.py ingest_tileset --filename data/'+cooler+' --datatype foo --filetype bar --uid '+id+' --settings='+settings, 'coordSystem')
self.assertRun('curl -s http://localhost:6000/api/v1/tileset_info/?d='+id,
[r'"coordSystem": "'+cooler+'"'])
'''
# TODO: check the coordSystem parameters for ingest_tileset.py
def test_get_from_foreign_host_file(self):
# manage.py should have been started with
# export SITE_URL=somesite.com
#self.assertRun('curl -s -H "Host: someothersite.com" http://localhost:6000/api/v1/tilesets/', [r'400'])
#self.assertRun('curl -s -H "Host: somesite.com" http://localhost:6000/api/v1/tilesets/', [r'count'])
pass
``` |
{
"source": "101camp/mk",
"score": 2
} |
#### File: 101camp/mk/tasks.py
```python
__version__ = 'pub101CAMP v.190721.2342'
__author__ = 'Zoom.Quiet'
__license__ = 'MIT@2019-04'
#import io
import os
#import re
import sys
import time
#import datetime
#import json
#import marshal as msh
#import subprocess
#import logging
#import sys
import logging
#logging.basicConfig()
logging.basicConfig(level=logging.CRITICAL)
_handler = logging.StreamHandler()
_formatter = logging.Formatter("[%(levelname)s]%(asctime)s:%(name)s(%(lineno)s): %(message)s"
#, datefmt='%Y.%m.%d %H:%M:%S'
, datefmt='%H:%M:%S'
)
_handler.setFormatter(_formatter)
LOG = logging.getLogger(__name__)
#LOG = logging.getLogger()
LOG.setLevel(logging.DEBUG)
LOG.propagate = False
LOG.addHandler(_handler)
#LOG.debug('load LOG level')
from pprint import pprint as pp
#pp = pprint.PrettyPrinter(indent=4)
from pprint import pformat
#import platform
#os_name = platform.system()
#del platform
#import subprocess
from invoke import task
#from fabric.context_managers import cd
from textwrap import dedent as dedentxt
CAMPROOT = os.environ.get("CAMPSITES_ROOT")
CSITES = {'mk':{'gl':'mk'
, 'ghp':'mk.101.camp'
, 'log': None
}
}
AIM = 'site'
_TRIP = '_trigger'
_TOBJ = 'deploy.md'
TRIGGER = 0
@task
def ver(c):
'''echo crt. verions
'''
print('\n\t powded by {}'.format(__version__))
# support stuff func.
def cd(c, path2):
os.chdir(path2)
print('\n\t crt. PATH ===')
c.run('pwd')
#@task
def ccname(c):
c.run('cp CNAME %s/'% AIM, hide=False, warn=True)
c.run('ls %s/'% AIM, hide=False, warn=True)
c.run('pwd')
#@task
def sync4media(c):
c.run('cp -rvf img %s/'% AIM, hide=False, warn=True)
c.run('ls %s/'% AIM, hide=False, warn=True)
c.run('pwd')
@task
def pl(c, site):
'''$ inv pl [101|py] <- pull all relation repo.
'''
global CAMPROOT
global CSITES
print(CAMPROOT)
if site:
#pp(CSITES[site])
_aim = '%s/%s'%(CAMPROOT, CSITES[site]['gl'])
cd(c, _aim)
#os.chdir(_aim)
#c.run('pwd')
c.run('git pull', hide=False, warn=True)
_aim = '%s/%s'%(CAMPROOT, CSITES[site]['ghp'])
cd(c, _aim)
#os.chdir(_aim)
#c.run('pwd')
c.run('git pull', hide=False, warn=True)
else:
ver(c)
@task
def bu(c):
'''usgae MkDocs build AIM site
'''
c.run('pwd')
c.run('mkdocs build', hide=False, warn=True)
@task
def pu(c):
'''push gl manuscript...
'''
_ts = '{}.{}'.format(time.strftime('%y%m%d %H%M %S')
, str(time.time()).split('.')[1][:3] )
c.run('pwd')
c.run('git st', hide=False, warn=True)
#c.run('git add .', hide=False, warn=True)
#c.run('git ci -am '
c.run('git imp '
'"inv(loc) MkDocs upgraded by DAMA (at %s)"'% _ts
, hide=False, warn=True)
#c.run('git pu', hide=False, warn=True)
# 'rsync -avzP4 {static_path}/media/ {deploy_path}/media/ && '
#@task
def ghp(c, site):
'''$ inv gh [101|py] <- push gh-pages for site publish
'''
global CAMPROOT
global CSITES
print(CAMPROOT)
ccname(c)
sync4media(c)
_ts = '{}.{}'.format(time.strftime('%y%m%d %H%M %S')
, str(time.time()).split('.')[1][:3] )
_aim = '%s/%s'%(CAMPROOT, CSITES[site]['ghp'])
cd(c, _aim)
#os.chdir(AIM)
#with cd('site/'):
#c.run('pwd')
c.run('ls')
c.run('git st', hide=False, warn=True)
#c.run('git add .', hide=False, warn=True)
#c.run('git ci -am '
c.run('git imp '
'"pub(site) gen. by MkDocs as invoke (at %s)"'% _ts
, hide=False, warn=True)
#c.run('git pu', hide=False, warn=True)
#@task
def chktri(c):
'''check trigger obj. set TRIGGER switch
'''
global TRIGGER
global _TRIP, _TOBJ
#cd(c, '%s/%s/%s'%(_DU19, PUB, _TRI))
_path = './%s'% _TRIP
print(_path)
#print(os.listdir(_path))
#print(type(os.listdir(_path)))
if _TOBJ in os.listdir(_path):
print('TRIGGERed by %s exist'% _TOBJ)
TRIGGER = 1
else:
print('TRIGGER obj. -> %s ~> NOT exist\n\t CANCEL build...'% _TOBJ)
TRIGGER = 0
#@task
def recover(c):
'''recover trigger state, by del TRIGGER obj.
'''
global TRIGGER
global _TRIP, _TOBJ
#cd(c, '%s/%s/%s'%(_DU19, PUB, _TRI))
_path = './%s'% _TRIP
_obj = '%s/%s'%(_path, _TOBJ)
print(_obj)
c.run('rm -vf %s'% _obj)
c.run('ls -Aogh %s'% _path)
c.run('git st')
c.run('git fix "(pubDUW) recover trigger obj. wait NEXT deploy"')
TRIGGER = 0
print('TRIGGER obj. recover -> waiting human deploy again')
#@task
def gh(c):
'''push gh-pages for site publish
'''
#global CAMPROOT
#global CSITES
#print(CAMPROOT)
ccname(c)
sync4media(c)
_ts = '{}.{}'.format(time.strftime('%y%m%d %H%M %S')
, str(time.time()).split('.')[1][:3] )
#_aim = '%s/%s'%(CAMPROOT, CSITES[site]['ghp'])
cd(c, AIM)
#os.chdir(AIM)
#with cd('site/'):
#c.run('pwd')
c.run('ls')
c.run('git st', hide=False, warn=True)
#c.run('git add .', hide=False, warn=True)
#c.run('git ci -am '
c.run('git imp '
'"pub(site) gen. by MkDocs as invoke (at %s)"'% _ts
, hide=False, warn=True)
#c.run('git pu', hide=False, warn=True)
#def pub(c, site):
@task
def pub(c):
'''<- auto deploy new site version base multi-repo.
'''
#global TRIGGER
#global CAMPROOT
#global CSITES
#print(CAMPROOT)
#pl(c, site)
#_crt = '%s/%s'%(CAMPROOT, CSITES[site]['gl'])
#cd(c, _crt)
#chktri(c)
print('auto deplo NOW:')
#return None
bu(c)
#recover(c)
pu(c)
ccname(c)
#sync4media(c)
gh(c)
ver(c)
return None
``` |
{
"source": "101Corp/Meower",
"score": 2
} |
#### File: old-meower-server/SUITE/clouddisk.py
```python
from cloudlink import CloudLink
import time
import os
import json
import sys
from disk import filesysapi
userlist = []
old_userlist = ["%CD%"]
auths = {}
fsapi = filesysapi()
def packetHandler(cmd, val, origin):
# Requisite commands for authentication / networking utilities
if cmd == "ping":
try:
cl.sendPacket({"cmd": "pmsg", "id":origin, "val": "pong", "origin": "%CD%"})
except Exception as e:
print("[ ! ] Error: {0}".format(e))
cl.sendPacket({"cmd": "pmsg", "id":origin, "val": "E:INTERNAL_SERVER_ERR", "origin": "%CD%"})
if cmd == "returntoken":
try:
id = val["id"]
if val == "ERR":
cl.sendPacket({"cmd": "pmsg", "id":id, "val": "E:INTERNAL_SERVER_ERR", "origin": "%CD%"})
else:
cl.sendPacket({"cmd": "pmsg", "id":id, "val": json.dumps({"authed": str(val["val"])}), "origin": "%CD%"})
auths[id]["valid"] = bool(val["val"])
if bool(val["val"]):
print("[ i ] Adding {0} to auths".format(id))
except Exception as e:
print("[ ! ] Error: {0}".format(e))
cl.sendPacket({"cmd": "pmsg", "id":id, "val": "E:INTERNAL_SERVER_ERR", "origin": "%CD%"})
if cmd == "checkauth":
try:
if origin in auths:
cl.sendPacket({"cmd": "pmsg", "id":origin, "val": json.dumps({"authed": str(auths[origin]["valid"])}), "origin": "%CD%"})
else:
cl.sendPacket({"cmd": "pmsg", "id":origin, "val": json.dumps({"authed": "False"}), "origin": "%CD%"})
except Exception as e:
print("[ ! ] Error: {0}".format(e))
cl.sendPacket({"cmd": "pmsg", "id":origin, "val": "E:INTERNAL_SERVER_ERR", "origin": "%CD%"})
if cmd == "auth":
global userlist
try:
if "%CA%" in userlist:
if not origin in auths:
auths[origin] = {"token": val, "valid": False}
cl.sendPacket({"cmd": "verifytoken", "id":"%CA%", "val": {"origin": origin, "token": val}, "origin": "%CD%"})
else:
if not auths[origin]["valid"]:
auths[origin] = {"token": val, "valid": False}
cl.sendPacket({"cmd": "verifytoken", "id":"%CA%", "val": {"origin": origin, "token": val}, "origin": "%CD%"})
else:
cl.sendPacket({"cmd": "pmsg", "id":origin, "val": json.dumps({"authed": "True"}), "origin": "%CD%"})
else:
cl.sendPacket({"cmd": "pmsg", "id":origin, "val": "E:AUTH_DOWN", "origin": "%CD%"})
except Exception as e:
print("[ ! ] Error: {0}".format(e))
cl.sendPacket({"cmd": "pmsg", "id":origin, "val": "E:INTERNAL_SERVER_ERR", "origin": "%CD%"})
if cmd == "deauth":
try:
if origin in auths:
del auths[origin]
print("[ i ] Removing {0} from auths".format(origin))
cl.sendPacket({"cmd": "pmsg", "id":origin, "val": "OK", "origin": "%CD%"})
except Exception as e:
print("[ ! ] Error: {0}".format(e))
cl.sendPacket({"cmd": "pmsg", "id":origin, "val": "E:INTERNAL_SERVER_ERR", "origin": "%CD%"})
# Custom commands for this appserver
if cmd == "getftpdir":
if (origin in auths) and (auths[origin]["valid"]):
try:
result, ddata = fsapi.lsdir(val)
read_directory(val, ddata)
if result:
cl.sendPacket({"cmd": "pmsg", "id":origin, "val": ddata, "origin": "%CD%"})
else:
cl.sendPacket({"cmd": "pmsg", "id":origin, "val": "E:INTERNAL_SERVER_ERR", "origin": "%CD%"})
except Exception as e:
print("[ ! ] Error: {0}".format(e))
cl.sendPacket({"cmd": "pmsg", "id":origin, "val": "E:INTERNAL_SERVER_ERR", "origin": "%CD%"})
else:
cl.sendPacket({"cmd": "pmsg", "id":origin, "val": "E:NOT_AUTHED", "origin": "%CD%"})
if cmd == "getftpfile":
if (origin in auths) and (auths[origin]["valid"]):
try:
result, ddata = fsapi.read(val)
ddata = str(ddata)
if result:
cl.sendPacket({"cmd": "pmsg", "id":origin, "val": ddata, "origin": "%CD%"})
else:
cl.sendPacket({"cmd": "pmsg", "id":origin, "val": "E:INTERNAL_SERVER_ERR", "origin": "%CD%"})
except Exception as e:
print("[ ! ] Error: {0}".format(e))
cl.sendPacket({"cmd": "pmsg", "id":origin, "val": "E:INTERNAL_SERVER_ERR", "origin": "%CD%"})
else:
cl.sendPacket({"cmd": "pmsg", "id":origin, "val": "E:NOT_AUTHED", "origin": "%CD%"})
if cmd == "putftp":
# Check if the user is authenticated
if (origin in auths) and (auths[origin]["valid"]):
# Check if the val dict has the correct keys
if ("dir" in val) and ("filename" in val) and ("data" in val):
try:
print("[ i ] Storing '" + str(val["filename"]) + "' in directory '" + str(val["dir"]) + "', storing " + str(len(str(val["data"]))) + " bytes")
result = fsapi.write(fdir = val["dir"], fname = val["filename"], data = val["data"])
if result:
cl.sendPacket({"cmd": "pmsg", "id":origin, "val": "OK", "origin": "%CD%"})
else:
cl.sendPacket({"cmd": "pmsg", "id":origin, "val": "E:SAVE_ERR", "origin": "%CD%"})
except Exception as e:
print("[ ! ] Error: {0}".format(e))
cl.sendPacket({"cmd": "pmsg", "id":origin, "val": "E:INTERNAL_SERVER_ERR", "origin": "%CD%"})
else:
cl.sendPacket({"cmd": "pmsg", "id":origin, "val": "E:MISSING_PARAMS", "origin": "%CD%"})
else:
cl.sendPacket({"cmd": "pmsg", "id":origin, "val": "E:NOT_AUTHED", "origin": "%CD%"})
if cmd == "ftpmkdir":
if (origin in auths) and (auths[origin]["valid"]):
try:
result = fsapi.mkdir(val)
if result:
cl.sendPacket({"cmd": "pmsg", "id":origin, "val": "OK", "origin": "%CD%"})
else:
cl.sendPacket({"cmd": "pmsg", "id":origin, "val": "E:MKDIR_ERR", "origin": "%CD%"})
except Exception as e:
print("[ ! ] Error: {0}".format(e))
cl.sendPacket({"cmd": "pmsg", "id":origin, "val": "E:INTERNAL_SERVER_ERR", "origin": "%CD%"})
else:
cl.sendPacket({"cmd": "pmsg", "id":origin, "val": "E:NOT_AUTHED", "origin": "%CD%"})
def read_directory(directory, items):
# If a blank directory is used assume the root
if directory == "":
directory = "/"
print("[ i ] Listing directory {0}".format(directory))
#print(directory, items)
# Hide the security.json file if present in the directory
if "SECURITY.json" in items:
items.remove("SECURITY.json")
# Read the directory and hide files
hidden = []
for item in items:
# Read output of directory and filetypes
# Check if a directory contains a security.json file
result, dtype = fsapi.chktype(directory, item)
#print(item, dtype)
if dtype == 2:
#print("A", (directory + item))
result2, ddata2 = fsapi.lsdir(directory + "/" + item)
if result2:
#print("B", ddata2)
if "SECURITY.json" in ddata2:
#print("C", (directory + "/" + item + "/SECURITY.json"))
result3, ddata3 = fsapi.read(directory + "/" + item + "/SECURITY.json")
try:
ddata3 = json.loads(ddata3)
#print("D", (result3, ddata3))
# Hide the directory from readback if the isHidden param is present and true
if "isHidden" in ddata3:
if ddata3["isHidden"]:
hidden.append(item)
print("[ i ] Hiding DIR", item)
except:
print("[ i ] Parse error while reading security.json in '{0}'. Checking if it's already JSON...".format(directory + "/" + item))
if type(ddata3) == dict:
print("[ i ] Looks like it's already JSON.")
#print("D", (result3, ddata3))
if ddata3["isHidden"]:
hidden.append(item)
print("[ i ] Hiding DIR", item)
else:
print("[ i ] It's not JSON. it's {0}".format(type(ddata3)))
#print("D", ddata3)
print("[ i ] Since the security.json file is invalid, assuming the directory is hidden.")
hidden.append(item)
#print(hidden)
for item in hidden:
if item in items:
items.remove(item)
def on_new_packet(message):
print(message)
if message["cmd"] == "pmsg":
try:
cmd = json.loads(message["val"])["cmd"]
if "val" in json.loads(message["val"]):
val = json.loads(message["val"])["val"]
else:
val = ""
origin = message["origin"]
packetHandler(cmd, val, origin)
except Exception as e:
print("[ ! ] Error! {0}".format(e))
cmd = ""
data = ""
origin = ""
elif message["cmd"] == "direct":
if message["val"]["cmd"] == "vers":
print("[ i ] Server version: {0}".format(message["val"]["val"]))
elif message["val"]["cmd"] == "motd":
print("[ i ] Server MOTD: {0}".format(message["val"]["val"]))
elif message["cmd"] == "ulist":
global old_userlist, userlist
userlist = message["val"].split(";")
del userlist[-1]
for id in old_userlist:
if not id in userlist:
if id in auths:
del auths[id]
print("[ i ] Removing {0} from auths".format(id))
old_userlist = userlist
else:
cmdlist = ["clear", "setid", "gmsg", "pmsg", "gvar", "pvar", "ds", "ulist"]
if ("cmd" in message) and ("val" in message) and ("origin" in message):
if not message["cmd"] in cmdlist:
packetHandler(message["cmd"], message["val"], message["origin"])
def on_connect():
cl.sendPacket({"cmd": "setid", "val": "%CD%"})
print("[ i ] Connected to main link.")
def on_error(error):
print(error)
def init_files():
try:
os.mkdir("./DISK") # Create directory for CloudDisk
except:
pass
try:
os.mkdir("./DISK/FTP") # Create a directory for FTP data
except:
pass
try:
os.mkdir("./DISK/SLOTS") # Create a directory for save slot data
except:
pass
print("[ i ] Initialized files.")
if __name__ == "__main__":
init_files() # Initialize the directory
try:
cl = CloudLink() # instanciate the CloudLink module
cl.client("ws://127.0.0.1:3000", on_new_packet = on_new_packet, on_connect = on_connect, on_error = on_error) #define callbacks, and connect to server
while cl.mode == 2:
pass
del cl
except KeyboardInterrupt:
cl.stop() # Stops the client and exits
sys.exit()
```
#### File: old-meower-server/SUITE/cloudlink.py
```python
version = "0.1.6"
"""
### CloudLink Server ###
Version S3.0 - Developed by MikeDEV Software
CloudLink is a websocket extension developed for Scratch 3.0. It's
designed to make web browsers, MMOs, BBSs, chats, etc. possible within
the limitations of Scratch. For more details and documentation about
the CloudLink project, please see the official repository on Github:
https://github.com/MikeDev101/cloudlink.
0BSD License
Copyright (C) 2020-2021 MikeDEV Software, Co.
Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby granted.
THE SOFTWARE IS PROVIDED “AS IS” AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
# Import dependencies
import json
import sys
from time import sleep
from threading import *
from websocket_server import WebsocketServer
import websocket
# API class for interacting with the module.
class API:
def __init__(self):
print("API initialized")
def host(self, port, ip=None): # runs the module in server mode, uses websocket-server
self.mode = 1
try:
if ip == None:
self.wss = WebsocketServer(port=int(port), host="0.0.0.0") # Instanciate WebsocketServer alongside CloudLink module
else:
self.wss = WebsocketServer(port=int(port), host=ip) # Instanciate WebsocketServer alongside CloudLink module
# Define callbacks to functions
self.wss.set_fn_new_client(self._newConnection)
self.wss.set_fn_message_received(self._gotPacket)
self.wss.set_fn_client_left(self._closedConnection)
# Create a new thread and make it a daemon thread
self.serverThread = Thread(target=self.wss.serve_forever)
self.serverThread.setDaemon(True)
# Run the server
self.serverThread.start()
except Exception as e:
print(e)
sys.exit()
def client(self, ip, on_new_packet=None, on_connect=None, on_error=None): # runs the module in client mode, uses websocket-client
self.mode = 2
try:
self.client = websocket.WebSocketApp(ip, on_message = self._on_message, on_error = self._on_error, on_open = self._on_open)
# Define callbacks to functions
self._set_fn_new_packet(on_new_packet)
self._set_fn_connected(on_connect)
self._set_fn_error(on_error)
self._set_fn_disconnected(self.stop)
# Create a new thread and make it a daemon thread
self.clientThread = Thread(target=self.client.run_forever)
self.clientThread.setDaemon(True)
# Run the client
self.clientThread.start()
except Exception as e:
print(e)
sys.exit()
def stop(self): # Stops running the module in either host/client mode
if self.mode == 2:
self.client.close()
self.mode = 0
if self.mode == 1:
if not len(self.users) == 0:
print("Shutdown in progress, please wait...")
# Tell all users to disconnect, and wait until all are disconnected
while not len(self.users) == 0:
self.wss.send_message_to_all(json.dumps({"cmd":"ds"}))
sleep(1) # Retry every second
print("All users disconnected, now exiting...")
else:
print("Now exiting...")
self.wss.server_close()
self.mode = 0
def setMOTD(self, motd):
if self.mode == 1:
self.motd_enable = True
self.motd = motd
print('Set MOTD to "{0}"'.format(motd))
def trustedAccess(self, enable, keys):
if self.mode == 1:
self.accessdata["enable"] = enable
self.accessdata["keys"] = keys
if enable:
print("Trusted access has been enabled.")
else:
print("Trusted access has been disabled.")
def sendPacket(self, msg): # Sends packets when the module is running in client mode
if self.mode == 2:
self.client.send(json.dumps(msg))
# CloudLink class, containing the API and all of the spaghetti code that makes this weird project work.
class CloudLink(API):
def __init__(self): # Initializes the class
self.wss = None
self.users = {}
self.userlist = []
self.handlers = []
self.gdata = ""
self.mode = 0 # 1=Host, 2=Client
self.motd_enable = False
self.motd = ""
self.accessdata = {
"enable": False,
"keys": [],
"trusted": []
}
print("CloudLink v{0}".format(version))
def _newConnection(self, client, server): # Server: Handles new connections
if self.mode == 1:
print("New connection: {0}".format(client['id']))
if self.accessdata["enable"]:
self._sendPacket(server, True, {"cmd":"pmsg", "id":client, "val":"I:TRUSTED_ACCESS_SEND_ACCESS_KEY"})
if self.motd_enable:
self._sendPacket(server, True, {"cmd":"direct", "id":client, "val": {"cmd": "motd", "val": self.motd}})
else:
self._relayInitialData(client, server)
def _relayInitialData(self, client, server):
self.users[str(client)] = {"name": "", "id": str(client['id'])}
self._relayUserList(server, True, client)
self._sendPacket(server, True, {"cmd":"gmsg", "id":client, "val":str(self.gdata)})
self._sendPacket(server, True, {"cmd":"direct", "id":client, "val": {"cmd": "vers", "val": version}})
if self.motd_enable:
self._sendPacket(server, True, {"cmd":"direct", "id":client, "val": {"cmd": "motd", "val": self.motd}})
def _sendPacket(self, server, type, data, client=None): # Server: Transmits packets, False:Public, True:Private
if self.mode == 1:
if "id" in data:
id = data["id"]
del data["id"]
if type == False:
server.send_message_to_all(json.dumps(data))
elif type == True:
server.send_message(id, json.dumps(data))
def _relayUserList(self, server, type, id): # Server: Relays the username list to all connected users
if self.mode == 1:
y = ""
for x in range(len(self.userlist)):
y = str(y + self.userlist[x] + ";")
if self.accessdata["enable"]:
if id in self.accessdata["trusted"]:
self._sendPacket(server, type, {"cmd":"ulist", "id":id, "val":str(y)})
else:
self._sendPacket(server, type, {"cmd":"ulist", "id":id, "val":str(y)})
def _closedConnection(self, client, server): # Server: Handles dropped/lost/manual disconnections
if self.mode == 1:
if str(client) in self.users:
if self.users[str(client)]['name'] in self.userlist:
print("Connection closed: {0} ({1})".format(self.users[str(client)]['name'], client['id']))
else:
print("Connection closed: {0}".format(client['id']))
if self.users[str(client)]['name'] in self.userlist:
del self.userlist[self.userlist.index(self.users[str(client)]['name'])]
if client in self.handlers:
del self.handlers[self.handlers.index(client)]
del self.users[str(client)]
if self.accessdata["enable"]:
if client in self.accessdata["trusted"]:
self.accessdata["trusted"].remove(client)
print("Removing {0} from trust".format(client))
print(self.accessdata)
if not len(self.users) == 0:
self._relayUserList(server, False, client)
def _gotPacket(self, client, server, message): # Server: Handles new packet events
if self.mode == 1:
err = False
try:
packet = json.loads(message)
print("Got packet: {0} bytes".format(len(str(packet))))
print(packet)
except Exception as e:
err = True
finally:
if not err:
if "cmd" in packet: # Check if the cmd parameter is specified
cmd = packet['cmd']
if "val" in packet:
val = packet["val"]
else:
val = ""
if "id" in packet:
try:
id = self.handlers[self.userlist.index(str(packet['id']))]
except Exception as e:
id = ""
else:
id = ""
if "name" in packet:
name = str(packet['name'])
else:
name = ""
if "origin" in packet:
origin = str(packet['origin'])
else:
origin = ""
if self.accessdata["enable"]:
if client in self.accessdata["trusted"]:
self._packet_handler(cmd, server, client, id, val, name, origin, packet)
else:
if val in self.accessdata["keys"]:
if not client in self.accessdata["trusted"]:
print("Trusting new client: {0}".format(client))
self.accessdata["trusted"].append(client)
self._sendPacket(server, False, {"cmd":"pmsg", "id": client, "val":"I:TRUSTED"})
print(self.accessdata)
self._relayInitialData(client, server)
else:
self._sendPacket(server, False, {"cmd":"pmsg", "id": client, "val":"E:NOT_TRUSTED"})
else:
self._packet_handler(cmd, server, client, id, val, name, origin, packet)
def _packet_handler(self, cmd, server, client, id, val, name, origin, packet):
#print(cmd, server, client, id, val, name, origin, packet)
cmdlist = ["clear", "setid", "gmsg", "pmsg", "gvar", "pvar"]
if cmd in cmdlist:
if cmd == "clear": # Clears comms
self._sendPacket(server, False, {"cmd":"gmsg", "val":""})
self._sendPacket(server, False, {"cmd":"pmsg", "val":""})
if cmd == "setid": # Set username on server link
if "val" in packet:
if not client in self.handlers:
self.userlist.append(val)
self.handlers.append(client)
else:
if self.users[str(client)]['name'] in self.userlist:
self.userlist[self.userlist.index(self.users[str(client)]['name'])] = val
self.users[str(client)]['name'] = val
print("User {0} declared username: {1}".format(client['id'], self.users[str(client)]['name']))
self._relayUserList(server, False, client)
if cmd == "gmsg": # Set global stream data values
self.gdata = str(val)
self._sendPacket(server, False, {"cmd":"gmsg", "val":self.gdata})
if cmd == "pmsg": # Set private stream data values
if not id == "":
if not origin == "":
self._sendPacket(server, True, {"cmd":"pmsg", "id":id, "val":val, "origin":origin})
if cmd == "gvar": # Set global variable data values
self._sendPacket(server, False, {"cmd":"gvar", "name":name, "val":val})
if cmd == "pvar": # Set private variable data values
if not id == "":
if not origin == "":
self._sendPacket(server, True, {"cmd":"pvar", "name":name, "id":id, "val":val, "origin":origin})
else:
print("[ i ] Routing packet using UPL")
if not id == "":
if not origin == "":
self._sendPacket(server, True, {"cmd":cmd, "id":id, "val":val, "origin":origin})
def _set_fn_new_packet(self, fn): # Client: Defines API-friendly callback to new packet events
self.fn_msg = fn
def _set_fn_connected(self, fn): # Client: Defines API-friendly callback to connected event
self.fn_con = fn
def _set_fn_error(self, fn): # Client: Defines API-friendly callback to error event
self.fn_err = fn
def _set_fn_disconnected(self, fn): # Client: API-friendly Defines callback to disconnected event
self.fn_ds = fn
def _on_message(self, ws, message): # Client: Defines callback to new packet events
if not json.loads(message) == {"cmd": "ds"}:
self.fn_msg(json.loads(message))
else:
self.fn_ds()
def _on_error(self, ws, error): # Client: Defines callback to error events
self.fn_err(error)
self.fn_ds()
def _on_open(self, ws): # Client: Defines callback to connected event
self.fn_con()
``` |
{
"source": "101guptaji/Cryptography",
"score": 3
} |
#### File: 101guptaji/Cryptography/AES.py
```python
import os
from tqdm import tqdm
def random_key_generator(key_length):
return bytes.hex(os.urandom(key_length // 8))
class AES:
Nb = 4
Nk = 4
Nr = 10
Sbox = (
0x63, 0x7C, 0x77, 0x7B, 0xF2, 0x6B, 0x6F, 0xC5, 0x30, 0x01, 0x67, 0x2B, 0xFE, 0xD7, 0xAB, 0x76,
0xCA, 0x82, 0xC9, 0x7D, 0xFA, 0x59, 0x47, 0xF0, 0xAD, 0xD4, 0xA2, 0xAF, 0x9C, 0xA4, 0x72, 0xC0,
0xB7, 0xFD, 0x93, 0x26, 0x36, 0x3F, 0xF7, 0xCC, 0x34, 0xA5, 0xE5, 0xF1, 0x71, 0xD8, 0x31, 0x15,
0x04, 0xC7, 0x23, 0xC3, 0x18, 0x96, 0x05, 0x9A, 0x07, 0x12, 0x80, 0xE2, 0xEB, 0x27, 0xB2, 0x75,
0x09, 0x83, 0x2C, 0x1A, 0x1B, 0x6E, 0x5A, 0xA0, 0x52, 0x3B, 0xD6, 0xB3, 0x29, 0xE3, 0x2F, 0x84,
0x53, 0xD1, 0x00, 0xED, 0x20, 0xFC, 0xB1, 0x5B, 0x6A, 0xCB, 0xBE, 0x39, 0x4A, 0x4C, 0x58, 0xCF,
0xD0, 0xEF, 0xAA, 0xFB, 0x43, 0x4D, 0x33, 0x85, 0x45, 0xF9, 0x02, 0x7F, 0x50, 0x3C, 0x9F, 0xA8,
0x51, 0xA3, 0x40, 0x8F, 0x92, 0x9D, 0x38, 0xF5, 0xBC, 0xB6, 0xDA, 0x21, 0x10, 0xFF, 0xF3, 0xD2,
0xCD, 0x0C, 0x13, 0xEC, 0x5F, 0x97, 0x44, 0x17, 0xC4, 0xA7, 0x7E, 0x3D, 0x64, 0x5D, 0x19, 0x73,
0x60, 0x81, 0x4F, 0xDC, 0x22, 0x2A, 0x90, 0x88, 0x46, 0xEE, 0xB8, 0x14, 0xDE, 0x5E, 0x0B, 0xDB,
0xE0, 0x32, 0x3A, 0x0A, 0x49, 0x06, 0x24, 0x5C, 0xC2, 0xD3, 0xAC, 0x62, 0x91, 0x95, 0xE4, 0x79,
0xE7, 0xC8, 0x37, 0x6D, 0x8D, 0xD5, 0x4E, 0xA9, 0x6C, 0x56, 0xF4, 0xEA, 0x65, 0x7A, 0xAE, 0x08,
0xBA, 0x78, 0x25, 0x2E, 0x1C, 0xA6, 0xB4, 0xC6, 0xE8, 0xDD, 0x74, 0x1F, 0x4B, 0xBD, 0x8B, 0x8A,
0x70, 0x3E, 0xB5, 0x66, 0x48, 0x03, 0xF6, 0x0E, 0x61, 0x35, 0x57, 0xB9, 0x86, 0xC1, 0x1D, 0x9E,
0xE1, 0xF8, 0x98, 0x11, 0x69, 0xD9, 0x8E, 0x94, 0x9B, 0x1E, 0x87, 0xE9, 0xCE, 0x55, 0x28, 0xDF,
0x8C, 0xA1, 0x89, 0x0D, 0xBF, 0xE6, 0x42, 0x68, 0x41, 0x99, 0x2D, 0x0F, 0xB0, 0x54, 0xBB, 0x16,
)
InvSbox = (
0x52, 0x09, 0x6A, 0xD5, 0x30, 0x36, 0xA5, 0x38, 0xBF, 0x40, 0xA3, 0x9E, 0x81, 0xF3, 0xD7, 0xFB,
0x7C, 0xE3, 0x39, 0x82, 0x9B, 0x2F, 0xFF, 0x87, 0x34, 0x8E, 0x43, 0x44, 0xC4, 0xDE, 0xE9, 0xCB,
0x54, 0x7B, 0x94, 0x32, 0xA6, 0xC2, 0x23, 0x3D, 0xEE, 0x4C, 0x95, 0x0B, 0x42, 0xFA, 0xC3, 0x4E,
0x08, 0x2E, 0xA1, 0x66, 0x28, 0xD9, 0x24, 0xB2, 0x76, 0x5B, 0xA2, 0x49, 0x6D, 0x8B, 0xD1, 0x25,
0x72, 0xF8, 0xF6, 0x64, 0x86, 0x68, 0x98, 0x16, 0xD4, 0xA4, 0x5C, 0xCC, 0x5D, 0x65, 0xB6, 0x92,
0x6C, 0x70, 0x48, 0x50, 0xFD, 0xED, 0xB9, 0xDA, 0x5E, 0x15, 0x46, 0x57, 0xA7, 0x8D, 0x9D, 0x84,
0x90, 0xD8, 0xAB, 0x00, 0x8C, 0xBC, 0xD3, 0x0A, 0xF7, 0xE4, 0x58, 0x05, 0xB8, 0xB3, 0x45, 0x06,
0xD0, 0x2C, 0x1E, 0x8F, 0xCA, 0x3F, 0x0F, 0x02, 0xC1, 0xAF, 0xBD, 0x03, 0x01, 0x13, 0x8A, 0x6B,
0x3A, 0x91, 0x11, 0x41, 0x4F, 0x67, 0xDC, 0xEA, 0x97, 0xF2, 0xCF, 0xCE, 0xF0, 0xB4, 0xE6, 0x73,
0x96, 0xAC, 0x74, 0x22, 0xE7, 0xAD, 0x35, 0x85, 0xE2, 0xF9, 0x37, 0xE8, 0x1C, 0x75, 0xDF, 0x6E,
0x47, 0xF1, 0x1A, 0x71, 0x1D, 0x29, 0xC5, 0x89, 0x6F, 0xB7, 0x62, 0x0E, 0xAA, 0x18, 0xBE, 0x1B,
0xFC, 0x56, 0x3E, 0x4B, 0xC6, 0xD2, 0x79, 0x20, 0x9A, 0xDB, 0xC0, 0xFE, 0x78, 0xCD, 0x5A, 0xF4,
0x1F, 0xDD, 0xA8, 0x33, 0x88, 0x07, 0xC7, 0x31, 0xB1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xEC, 0x5F,
0x60, 0x51, 0x7F, 0xA9, 0x19, 0xB5, 0x4A, 0x0D, 0x2D, 0xE5, 0x7A, 0x9F, 0x93, 0xC9, 0x9C, 0xEF,
0xA0, 0xE0, 0x3B, 0x4D, 0xAE, 0x2A, 0xF5, 0xB0, 0xC8, 0xEB, 0xBB, 0x3C, 0x83, 0x53, 0x99, 0x61,
0x17, 0x2B, 0x04, 0x7E, 0xBA, 0x77, 0xD6, 0x26, 0xE1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0C, 0x7D,
)
Rcon = (
0x00, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40,
0x80, 0x1B, 0x36, 0x6C, 0xD8, 0xAB, 0x4D, 0x9A,
0x2F, 0x5E, 0xBC, 0x63, 0xC6, 0x97, 0x35, 0x6A,
0xD4, 0xB3, 0x7D, 0xFA, 0xEF, 0xC5, 0x91, 0x39,
)
def __init__(self, key, mode=128):
if mode == 192:
self.Nk = 6
self.Nr = 12
self.key = self.text2matrix(key, 24)
elif mode == 256:
self.Nk = 8
self.Nr = 14
self.key = self.text2matrix(key, 32)
else:
self.key = self.text2matrix(key)
self.key_expansion(self.key)
def text2matrix(self, text, len=16):
state = []
for i in range(len):
# two hex characters == 1 byte
byte = int(text[i*2:i*2+2], 16)
if i % 4 == 0:
# this means that the byte to append is the first of the column
state.append([byte])
else:
# Append byte to the row i // 4
state[i // 4].append(byte)
return state
def matrix2text(self, s, len=16):
text = ""
for i in range(len // 4):
for j in range(4):
text += format(s[i][j], '02x')
return text
def sub_bytes(self, s):
for i in range(self.Nb):
for j in range(4):
s[i][j] = self.Sbox[s[i][j]]
def inv_sub_bytes(self, s):
for i in range(self.Nb):
for j in range(4):
s[i][j] = self.InvSbox[s[i][j]]
def shift_rows(self, s):
s[0][1], s[1][1], s[2][1], s[3][1] = s[1][1], s[2][1], s[3][1], s[0][1]
s[0][2], s[1][2], s[2][2], s[3][2] = s[2][2], s[3][2], s[0][2], s[1][2]
s[0][3], s[1][3], s[2][3], s[3][3] = s[3][3], s[0][3], s[1][3], s[2][3]
def inv_shift_rows(self, s):
s[0][1], s[1][1], s[2][1], s[3][1] = s[3][1], s[0][1], s[1][1], s[2][1]
s[0][2], s[1][2], s[2][2], s[3][2] = s[2][2], s[3][2], s[0][2], s[1][2]
s[0][3], s[1][3], s[2][3], s[3][3] = s[1][3], s[2][3], s[3][3], s[0][3]
def xtime(self, b):
if b & 0x80:
# check if b7 of the given polynomial is 1 or 0.
b = b << 1
b ^= 0x1B
else:
b = b << 1
return b & 0xFF # get the first 8 bits.
def mix_one_column(self, c):
t = c[0] ^ c[1] ^ c[2] ^ c[3]
u = c[0]
c[0] ^= self.xtime(c[0] ^ c[1]) ^ t
c[1] ^= self.xtime(c[1] ^ c[2]) ^ t
c[2] ^= self.xtime(c[2] ^ c[3]) ^ t
c[3] ^= self.xtime(c[3] ^ u) ^ t
def mix_columns(self, s):
for i in range(self.Nb):
self.mix_one_column(s[i])
def inv_mix_columns(self, s):
for i in range(self.Nb):
u = self.xtime(self.xtime(s[i][0] ^ s[i][2]))
v = self.xtime(self.xtime(s[i][1] ^ s[i][3]))
s[i][0] ^= u
s[i][1] ^= v
s[i][2] ^= u
s[i][3] ^= v
self.mix_columns(s)
def add_round_key(self, s, k):
for i in range(self.Nb):
for j in range(4):
s[i][j] ^= k[i][j]
def sub_word(self, w):
for i in range(len(w)):
w[i] = self.Sbox[w[i]]
def rotate_word(self, w):
w[0], w[1], w[2], w[3] = w[1], w[2], w[3], w[0]
def key_expansion(self, key):
self.round_keys = self.key
for i in range(self.Nk, self.Nb * (self.Nr + 1)):
self.round_keys.append([0, 0, 0, 0])
temp = self.round_keys[i - 1][:]
# word is multiple of Nk
if i % self.Nk == 0:
self.rotate_word(temp)
self.sub_word(temp)
temp[0] = temp[0] ^ self.Rcon[i // self.Nk]
elif self.Nk > 6 and i % self.Nk == 4:
self.sub_word(temp)
for j in range(4):
self.round_keys[i][j] = self.round_keys[i - self.Nk][j] ^ temp[j]
def cipher(self, text):
self.state = self.text2matrix(text)
self.add_round_key(self.state, self.round_keys[:4])
for i in range(1, self.Nr):
self.sub_bytes(self.state)
self.shift_rows(self.state)
self.mix_columns(self.state)
self.add_round_key(self.state, self.round_keys[self.Nb * i : self.Nb * (i + 1)])
self.sub_bytes(self.state)
self.shift_rows(self.state)
self.add_round_key(self.state, self.round_keys[len(self.round_keys) - 4:])
return self.matrix2text(self.state)
def decipher(self, text):
self.encrypted_state = self.text2matrix(text)
self.add_round_key(self.encrypted_state, self.round_keys[len(self.round_keys) - 4:])
for i in range(self.Nr - 1, 0, -1):
self.inv_shift_rows(self.encrypted_state)
self.inv_sub_bytes(self.encrypted_state)
self.add_round_key(self.encrypted_state, self.round_keys[self.Nb * i : self.Nb * (i + 1)])
self.inv_mix_columns(self.encrypted_state)
self.inv_shift_rows(self.encrypted_state)
self.inv_sub_bytes(self.encrypted_state)
self.add_round_key(self.encrypted_state, self.round_keys[:4])
return self.matrix2text(self.encrypted_state)
def pad(block, block_length):
bytes_to_pad = block_length - len(block) // 2
for _ in range(bytes_to_pad):
block += format(bytes_to_pad, '02x')
return block
def unpad(block):
bytes_to_unpad = int(block[-2:], 16)
return block[:-bytes_to_unpad*2]
def xor_blocks(block_1, block_2):
return format(int(block_1, 16) ^ int(block_2, 16), '032x')
def generate_random_iv(iv_length):
return bytes.hex(os.urandom(iv_length))
def generate_random_ctr():
return generate_random_iv(8) + "0000000000000000"
def increment_ctr(ctr):
ctr_inc_int = int.from_bytes(bytes.fromhex(ctr), byteorder="big") + 1
return bytes.hex(ctr_inc_int.to_bytes(length=16, byteorder="big"))
class ECB:
def __init__(self, block_cipher_alg):
self.block_cipher_alg = block_cipher_alg
def cipher(self, filename, encrypted_file_name):
hex_array = FileTools.open_file(filename, 32)
# check if last block need to be padded
if len(hex_array[-1]) < 32:
hex_array[-1] = pad(hex_array[-1], 16)
cipher_array = []
for i in tqdm(range(len(hex_array)), desc="ECB encryption"):
cipher_array.append(self.block_cipher_alg.cipher(hex_array[i]))
FileTools.write_file(encrypted_file_name, cipher_array)
def decipher(self, filename, decrypted_file_name):
hex_array = FileTools.open_file(filename, 32)
decrypted_array = []
for i in tqdm(range(len(hex_array)), desc="ECB decryption"):
decrypted_array.append(self.block_cipher_alg.decipher(hex_array[i]))
# unpad last block
decrypted_array[-1] = unpad(decrypted_array[-1])
FileTools.write_file(decrypted_file_name, decrypted_array)
class CBC:
def __init__(self, block_cipher_alg, iv_length):
self.block_cipher_alg = block_cipher_alg
self.iv = generate_random_iv(iv_length)
def cipher(self, filename, encrypted_file_name):
hex_array = FileTools.open_file(filename, 32)
# check if last block need to be padded
if len(hex_array[-1]) < 32:
hex_array[-1] = pad(hex_array[-1], 16)
# Prefix the IV to the cipher text.
cipher_array = [self.iv]
iv = self.iv
for i in tqdm(range(len(hex_array)), desc="CBC encryption"):
block_to_cipher = xor_blocks(iv, hex_array[i])
cipher_array.append(self.block_cipher_alg.cipher(block_to_cipher))
# the ciphered block will be the "IV" for the next block
iv = cipher_array[i + 1]
FileTools.write_file(encrypted_file_name, cipher_array)
def decipher(self, filename, decrypted_file_name):
hex_array = FileTools.open_file(filename, 32)
iv = hex_array[0]
decrypted_array = []
for i in tqdm(range(1, len(hex_array)), desc="CBC decryption"):
decrypted_array.append(self.block_cipher_alg.decipher(hex_array[i]))
decrypted_array[i - 1] = xor_blocks(iv, decrypted_array[i - 1])
# the ciphered block will be the "IV" for the next block
iv = hex_array[i]
# unpad last block
decrypted_array[-1] = unpad(decrypted_array[-1])
FileTools.write_file(decrypted_file_name, decrypted_array)
class CTR:
def __init__(self, block_cipher_alg):
self.block_cipher_alg = block_cipher_alg
self.ctr = generate_random_ctr()
def cipher(self, filename, encrypted_file_name):
hex_array = FileTools.open_file(filename, 32)
# Prefix the ctr to the cipher text.
cipher_array = [self.ctr]
ctr = self.ctr
for i in tqdm(range(len(hex_array)), desc="CTR encryption"):
ctr_encrypted = self.block_cipher_alg.cipher(ctr)
cipher_array.append(xor_blocks(ctr_encrypted, hex_array[i]))
ctr = increment_ctr(ctr)
FileTools.write_file(encrypted_file_name, cipher_array)
def decipher(self, filename, decrypted_file_name):
hex_array = FileTools.open_file(filename, 32)
ctr = hex_array[0]
decrypted_array = []
for i in tqdm(range(1, len(hex_array)), desc="CTR decryption"):
ctr_encrypted = self.block_cipher_alg.cipher(ctr)
decrypted_array.append(xor_blocks(ctr_encrypted, hex_array[i]))
ctr = increment_ctr(ctr)
FileTools.write_file(decrypted_file_name, decrypted_array)
class FileTools:
@staticmethod
def open_file(filename, chunk_size):
with open(filename, "rb") as f:
hex_array = []
for offset in range(0, os.path.getsize(filename), 16):
hex_array.append(bytes.hex(f.read(16)))
f.seek(offset + 16)
f.close()
return hex_array
@staticmethod
def write_file(filename, block_array):
with open(filename, "ab") as f:
for i in range(len(block_array)):
f.write(bytes.fromhex(block_array[i]))
f.close()
def read_key():
try:
f = open("key.txt", "r")
except IOError:
return 1
key = f.read()
f.close()
return key
def write_key(key):
with open("key.txt", "w") as f:
f.write(key)
f.close()
def main(encrypt, input_file, output_file, block_cipher_mode, key_length):
if encrypt:
key = random_key_generator(int(key_length))
if key_length == "128":
AES = AES(key, 128)
elif key_length == "192":
AES = AES(key, 192)
elif key_length == "256":
AES = AES(key, 256)
if block_cipher_mode == "ECB":
bcm = ECB(AES)
elif block_cipher_mode == "CBC":
bcm = CBC(AES, 16)
elif block_cipher_mode == "CTR":
bcm = CTR(AES)
bcm.cipher(input_file, output_file)
print("Cipher Key:", key)
write_key(key)
else:
key = read_key()
if key == 1:
print("File key.txt doesn't exists! Can't decrypt without key")
exit(1)
key_length = len(key) * 4
if key_length == 128:
AES = AES(key, 128)
elif key_length == 192:
AES = AES(key, 192)
elif key_length == 256:
AES = AES(key, 256)
else:
print("Key length not valid!")
exit(1)
if block_cipher_mode == "ECB":
bcm = ECB(AES)
elif block_cipher_mode == "CBC":
bcm = CBC(AES, 16)
elif block_cipher_mode == "CTR":
bcm = CTR(AES)
bcm.decipher(input_file, output_file)
if __name__ == "__main__":
main()
```
#### File: 101guptaji/Cryptography/des2.py
```python
PC1 = [57, 49, 41, 33, 25, 17, 9,
1, 58, 50, 42, 34, 26, 18,
10, 2, 59, 51, 43, 35, 27,
19, 11, 3, 60, 52, 44, 36,
63, 55, 47, 39, 31, 23, 15,
7, 62, 54, 46, 38, 30, 22,
14, 6, 61, 53, 45, 37, 29,
21, 13, 5, 28, 20, 12, 4]
PC2 = [14, 17, 11, 24, 1, 5,
3, 28, 15, 6, 21, 10,
23, 19, 12, 4, 26, 8,
16, 7, 27, 20, 13, 2,
41, 52, 31, 37, 47, 55,
30, 40,51, 45, 33, 48,
44, 49, 39, 56,34, 53,
46, 42, 50, 36, 29, 32]
IP = [58, 50, 42, 34, 26, 18, 10, 2,
60, 52, 44, 36, 28, 20, 12, 4,
62, 54, 46, 38, 30, 22, 14, 6,
64, 56, 48, 40, 32, 24, 16, 8,
57, 49, 41, 33, 25, 17, 9, 1,
59, 51, 43, 35, 27, 19, 11, 3,
61, 53, 45, 37, 29, 21, 13, 5,
63, 55, 47, 39, 31, 23, 15, 7]
E = [32, 1, 2, 3, 4, 5,
4, 5, 6, 7, 8, 9,
8, 9, 10, 11, 12, 13,
12, 13, 14, 15, 16, 17,
16, 17, 18, 19, 20, 21,
20, 21, 22, 23, 24, 25,
24, 25, 26, 27, 28, 29,
28, 29, 30, 31, 32, 1]
S = [
[[14, 4, 13, 1, 2, 15, 11, 8, 3, 10, 6, 12, 5, 9, 0, 7],
[0, 15, 7, 4, 14, 2, 13, 1, 10, 6, 12, 11, 9, 5, 3, 8],
[4, 1, 14, 8, 13, 6, 2, 11, 15, 12, 9, 7, 3, 10, 5, 0],
[15, 12, 8, 2, 4, 9, 1, 7, 5, 11, 3, 14, 10, 0, 6, 13],
],
[[15, 1, 8, 14, 6, 11, 3, 4, 9, 7, 2, 13, 12, 0, 5, 10],
[3, 13, 4, 7, 15, 2, 8, 14, 12, 0, 1, 10, 6, 9, 11, 5],
[0, 14, 7, 11, 10, 4, 13, 1, 5, 8, 12, 6, 9, 3, 2, 15],
[13, 8, 10, 1, 3, 15, 4, 2, 11, 6, 7, 12, 0, 5, 14, 9],
],
[[10, 0, 9, 14, 6, 3, 15, 5, 1, 13, 12, 7, 11, 4, 2, 8],
[13, 7, 0, 9, 3, 4, 6, 10, 2, 8, 5, 14, 12, 11, 15, 1],
[13, 6, 4, 9, 8, 15, 3, 0, 11, 1, 2, 12, 5, 10, 14, 7],
[1, 10, 13, 0, 6, 9, 8, 7, 4, 15, 14, 3, 11, 5, 2, 12],
],
[[7, 13, 14, 3, 0, 6, 9, 10, 1, 2, 8, 5, 11, 12, 4, 15],
[13, 8, 11, 5, 6, 15, 0, 3, 4, 7, 2, 12, 1, 10, 14, 9],
[10, 6, 9, 0, 12, 11, 7, 13, 15, 1, 3, 14, 5, 2, 8, 4],
[3, 15, 0, 6, 10, 1, 13, 8, 9, 4, 5, 11, 12, 7, 2, 14],
],
[[2, 12, 4, 1, 7, 10, 11, 6, 8, 5, 3, 15, 13, 0, 14, 9],
[14, 11, 2, 12, 4, 7, 13, 1, 5, 0, 15, 10, 3, 9, 8, 6],
[4, 2, 1, 11, 10, 13, 7, 8, 15, 9, 12, 5, 6, 3, 0, 14],
[11, 8, 12, 7, 1, 14, 2, 13, 6, 15, 0, 9, 10, 4, 5, 3],
],
[[12, 1, 10, 15, 9, 2, 6, 8, 0, 13, 3, 4, 14, 7, 5, 11],
[10, 15, 4, 2, 7, 12, 9, 5, 6, 1, 13, 14, 0, 11, 3, 8],
[9, 14, 15, 5, 2, 8, 12, 3, 7, 0, 4, 10, 1, 13, 11, 6],
[4, 3, 2, 12, 9, 5, 15, 10, 11, 14, 1, 7, 6, 0, 8, 13],
],
[[4, 11, 2, 14, 15, 0, 8, 13, 3, 12, 9, 7, 5, 10, 6, 1],
[13, 0, 11, 7, 4, 9, 1, 10, 14, 3, 5, 12, 2, 15, 8, 6],
[1, 4, 11, 13, 12, 3, 7, 14, 10, 15, 6, 8, 0, 5, 9, 2],
[6, 11, 13, 8, 1, 4, 10, 7, 9, 5, 0, 15, 14, 2, 3, 12],
],
[[13, 2, 8, 4, 6, 15, 11, 1, 10, 9, 3, 14, 5, 0, 12, 7],
[1, 15, 13, 8, 10, 3, 7, 4, 12, 5, 6, 11, 0, 14, 9, 2],
[7, 11, 4, 1, 9, 12, 14, 2, 0, 6, 10, 13, 15, 3, 5, 8],
[2, 1, 14, 7, 4, 10, 8, 13, 15, 12, 9, 0, 3, 5, 6, 11],
]
]
P = [16, 7, 20, 21,
29, 12, 28, 17,
1, 15, 23, 26,
5, 18, 31, 10,
2, 8, 24, 14,
32, 27, 3, 9,
19, 13, 30, 6,
22, 11, 4, 25]
IP_1 = [40, 8, 48, 16, 56, 24, 64, 32,
39, 7, 47, 15, 55, 23, 63, 31,
38, 6, 46, 14, 54, 22, 62, 30,
37, 5, 45, 13, 53, 21, 61, 29,
36, 4, 44, 12, 52, 20, 60, 28,
35, 3, 43, 11, 51, 19, 59, 27,
34, 2, 42, 10, 50, 18, 58, 26,
33, 1, 41, 9, 49, 17, 57, 25]
def printkey(lhs, key, sp):
res = ""
for i in range(0, len(key)):
res += key[i]
if (i + 1) % sp == 0:
res += " "
print(lhs + " = " + res)
def generate_from_table(key, table):
new_key = ""
for i in range(0, len(table)):
new_key += key[table[i] - 1]
return new_key
def generate_block_pairs(key):
shift = [1, 1, 2, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 2, 1]
C = key[:int(len(key)/2)]
D = key[int(len(key)/2):]
key_list = []
for i in range(0, 16):
for j in range(0, shift[i]):
C += C[0]
C = C[1:len(C)]
D += D[0]
D = D[1:len(D)]
#print("C" + str(i+1) + " = " + C)
#print("D" + str(i+1) + " = " + D + "\n")
key_list.append(C + D)
return key_list
def generate_subkeys(key_list):
for i in range(0, len(key_list)):
key_list[i] = generate_from_table(key_list[i], PC2)
#print("K%.2d = %s" % (i+1, key_list[i]))
def XOR(x, y):
res = ""
for i in range(0, len(x)):
if x[i] == y[i]:
res += '0'
else:
res += '1'
return res
def f(key, block):
block_expanded = generate_from_table(block, E)
xor_res = ""
xor_res = XOR(key, block_expanded)
#printkey("XOR" , xor_res, 6)
sbox_ind = 0
row = 0
col = 0
ind = 0
b = ""
res = ""
for i in range(0, len(xor_res)):
b += xor_res[i]
sbox_ind += (i and i % 6 == 0);
if len(b) % 6 == 0:
row = int(b[0] + b[len(b)-1], 2)
col = int(b[1:len(b)-1], 2)
sbox_res = bin(S[sbox_ind][row][col])[2:]
res += "0"*(4-len(sbox_res)) + sbox_res
b = ""
#printkey("f", res, 4)
res = generate_from_table(res, P)
return res
def encrypt(mess, key_list):
L = mess[:int(len(mess)/2)]
R = mess[int(len(mess)/2):]
new_L = ""
new_R = ""
for i in range(0, 16):
#print("Round %d:" % (i + 1))
new_L = R
new_R = XOR(L, f(key_list[i], R))
L = new_L
R = new_R
#printkey("L", L, 4)
#printkey("R", R, 4)
return generate_from_table(R+L, IP_1)
def hex_to_bin(hex):
res = ""
for i in range(0, len(hex)):
s = bin(int(hex[i], 16))[2:]
res += "0"*(4-len(s)) + s
return res
def bin_to_hex(bin):
res = ""
i = 0
while i < len(bin):
res += hex(int(bin[i:i+4], 2))[2:]
i += 4
res = res.upper()
return res
hex_mess = input("\nEnter your message (in hex) to encrypt: ")
mess = hex_to_bin(hex_mess)
#print("\nApplying initial permutation to the 64-bit message..")
mess = generate_from_table(mess, IP)
#printkey("Initial permutation of messa", mess, 8)
hex_key = input("Enter 1st encryption key(in Hex): ")
key = hex_to_bin(hex_key)
#printkey("Binary key", key, 8)
#print("Generating PC-1 permutation for the key...")
key = generate_from_table(key, PC1)
#printkey("PC-1 permutation of the key", key, 8)
#print("Generating block pairs..")
key_list = generate_block_pairs(key)
#print("Generating subkeys..")
generate_subkeys(key_list)
#print("Encrypting messa...")
cipher1 = encrypt(mess, key_list)
#printkey("Intermediate Cipher text (Binary)", cipher1, 8)
cipher = bin_to_hex(cipher1)
print("Intermediate Cipher text(Hex) = " , cipher)
hex_key = input("Enter 2nd encryption key(in Hex): ")
key = hex_to_bin(hex_key)
#printkey("Binary key", key, 8)
#print("Generating PC-1 permutation for the key...")
key = generate_from_table(key, PC1)
#printkey("PC-1 permutation of the key", key, 8)
#print("Generating block pairs..")
key_list = generate_block_pairs(key)
#print("Generating subkeys..")
generate_subkeys(key_list)
mess = generate_from_table(cipher1, IP)
#printkey("Initial permutation of messa", mess, 8)
cipher1 = encrypt(mess, key_list)
#printkey("Cipher text (Binary)", cipher1, 8)
cipher = bin_to_hex(cipher1)
print("Cipher text(Hex) = " , cipher)
```
#### File: Cryptography/lab5/des.py
```python
binary1 = {0:'0000', 1:'0001', 2:'0010', 3:'0011', 4:'0100', 5:'0101', 6:'0110', 7:'0111', 8:'1000',
9:'1001', 10:'1010', 11:'1011', 12:'1100', 13:'1101', 14:'1110', 15:'1111'}
binary = {'0':'0000', '1':'0001', '2':'0010', '3':'0011', '4':'0100', '5':'0101', '6':'0110', '7':'0111',
'8':'1000', '9':'1001', 'A':'1010', 'B':'1011', 'C':'1100', 'D':'1101', 'E':'1110', 'F':'1111'}
shift = {1:1, 2:1, 3:2, 4:2, 5:2, 6:2, 7:2, 8:2, 9:1, 10:2, 11:2, 12:2, 13:2, 14:2, 15:2, 16:1}
q = {'00':0, '01':1, '10':2, '11':3}
q1 ={'0000':'0', '0001':'1', '0010':'2', '0011':'3', '0100':'4', '0101':'5', '0110':'6', '0111':'7',
'1000':'8', '1001':'9', '1010':'A', '1011':'B', '1100':'C', '1101':'D', '1110':'E', '1111':'F'}
q11 ={'0000':0, '0001':1, '0010':2, '0011':3, '0100':4, '0101':5, '0110':6, '0111':7, '1000':8,
'1001':9, '1010':10, '1011':11, '1100':12, '1101':13, '1110':14, '1111':15}
comp={'0':'F', '1':'E', '2':'D', '3':'C', '4':'B', '5':'A', '6':'9', '7':'8', '8':'7', '9':'6', 'A':'5', 'B':'4', 'C':'3',
'D':'2', 'E':'1', 'F':'0'}
#parity drop table
PC1=[57,49,41,33,25,17,9,1,58,50,42,34,26,18,10,2,59,51,43,35,27,19,11,3,60,52,44,36,63,55,47,39,31,23,15,7,62,54,46,38,30,22,14,6,61,53,45,37,29,21,13,5,28,20,12,4]
#Key compression table
PC2=[14,17,11,24,1,5,3,28,15,6,21,10,23,19,12,4,26,8,16,7,27,20,13,2,41,52,31,37,47,55,30,40,51,45,33,48,44,49,39,56,34,53,46,42,50,36,29,32]
IP1=[58,50,42,34,26,18,10,2,60,52,44,36,28,20,12,4,62,54,46,38,30,22,14,6,64,56,48,40,32,24,16,8,57,49,41,33,25,17,9,1,59,51,43,35,27,19,11,3,61,53,45,37,29,21,13,5,63,55,47,39,31,23,15,7]
EBIT=[32,1,2,3,4,5,4,5,6,7,8,9,8,9,10,11,12,13,12,13,14,15,16,17,16,17,18,19,20,21,20,21,22,23,24,25,24,25,26,27,28,29,28,29,30,31,32,1]
P=[16,7,20,21,29,12,28,17,1,15,23,26,5,18,31,10,2,8,24,14,32,27,3,9,19,13,30,6,22,11,4,25]
IPP=[40,8,48,16,56,24,64,32,39,7,47,15,55,23,63,31,38,6,46,14,54,22,62,30,37,5,45,13,53,21,61,29,36,4,44,12,52,20,60,28,35,3,43,11,51,19,59,27,34,2,42,10,50,18,58,26,33,1,41,9,49,17,57,25]
s1=[14,4,13,1,2,15,11,8,3,10,6,12,5,9,0,7,0,15,7,4,14,2,13,1,10,6,12,11,9,5,3,8,4,1,14,8,13,6,2,11,15,12,9,7,3,10,5,0,15,12,8,2,4,9,1,7,5,11,3,14,10,0,6,13]
s2=[15,1,8,14,6,11,3,4,9,7,2,13,12,0,5,10,3,13,4,7,15,2,8,14,12,0,1,10,6,9,11,5,0,14,7,11,10,4,13,1,5,8,12,6,9,3,2,15,13,8,10,1,3,15,4,2,11,6,7,12,0,5,14,9]
s3=[10,0,9,14,6,3,15,5,1,13,12,7,11,4,2,8,13,7,0,9,3,4,6,10,2,8,5,14,12,11,15,1,13,6,4,9,8,15,3,0,11,1,2,12,5,10,14,7,1,10,13,0,6,9,8,7,4,15,14,3,11,5,2,12]
s4=[7,13,14,3,0,6,9,10,1,2,8,5,11,12,4,15,13,8,11,5,6,15,0,3,4,7,3,12,1,10,14,9,10,6,9,0,12,11,7,13,15,1,3,14,5,2,8,4,3,15,0,6,10,1,13,8,9,4,5,11,12,7,2,14]
s5=[2,12,4,1,7,10,11,6,8,5,3,15,13,0,14,9,14,11,2,12,4,7,13,1,5,0,15,10,3,9,8,6,4,2,1,11,10,13,7,8,15,9,12,5,6,3,0,14,11,8,12,7,1,14,2,13,6,15,0,9,10,4,5,3]
s6=[12,1,10,15,9,2,6,8,0,13,3,4,14,7,5,11,10,15,4,2,7,12,9,5,6,1,13,14,0,11,3,8,9,14,15,5,2,8,12,3,7,0,4,10,1,13,11,6,4,3,2,12,9,5,15,10,11,14,1,7,6,0,8,13]
s7=[4,11,2,14,15,0,8,13,3,12,9,7,5,10,6,1,13,0,11,7,4,9,1,10,14,3,5,12,2,15,8,6,1,4,11,13,12,3,7,14,10,15,6,8,0,5,9,2,6,11,13,8,1,4,10,7,9,5,0,15,14,2,3,12]
s8=[13,2,8,4,6,15,11,1,10,9,3,14,5,0,12,7,1,15,13,8,10,3,7,4,12,5,6,11,0,14,9,2,7,11,4,1,9,12,14,2,0,6,10,13,15,3,5,8,2,1,14,7,4,10,8,13,15,12,9,0,3,5,6,11]
s = [s1] + [s2] + [s3] + [s4] + [s5] + [s6] + [s7] + [s8]
def EE(R):
R_ = ""
for i in range(0,48):
R_ = R_ + str(R[EBIT[i]-1])
return R_
def xor(a1,a2):
empty = ""
x = len(a1)
for i in range(0,x):
if(a1[i] == a2[i]):
empty = empty + '0'
else:
empty = empty + '1'
return empty
def spfunc(temp):
B = [0] * 8
S = [0] * 8
S1 =[0] * 8
SB =[0] * 8
B[0] = temp[0:6]
B[1] = temp[6:12]
B[2] = temp[12:18]
B[3] = temp[18:24]
B[4] = temp[24:30]
B[5] = temp[30:36]
B[6] = temp[36:42]
B[7] = temp[42:48]
for i in range(0,8):
S[i] = q[B[i][0] + B[i][len(B[i])-1]]
S1[i]= q11[B[i][1:5]]
for i in range(0,8):
SB[i] = binary1[s[i][int(S[i]*16) + int(S1[i])]]
return SB
def DES(x,K__,numb):
L = ""
R = ""
y = len(x)
for i in range(0,y//2):
L = L + binary[x[i]]
for i in range(y//2,y):
R = R + binary[x[i]]
K = ""
for i in range(0,len(K__)):
if(K__[i] in binary):
K = K + binary[K__[i]]
K_ = ""
IP = ""
IP_ = ""
IP_ = L + R
L0 = R0 = ""
C0 = D0 = ""
for i in range(0,len(PC1)):
K_ = K_ + K[PC1[i]-1]
for i in range(0,len(IP1)):
IP = IP + IP_[IPP[i]-1]
print("IP=",IP)
y = len(K_)
for i in range(0,y//2):
C0 = C0 + K_[i]
for i in range(y//2,y):
D0 = D0 + K_[i]
y1 = len(IP)
for i in range(0,y1//2):
L0 = L0 + IP[i]
for i in range(y1//2,y1):
R0 = R0 + IP[i]
C = [0] * 17
D = [0]* 17
C[0] = C0
D[0] = D0
for i in range(1,17):
if shift[i] == 1:
C[i] = C[i-1][1:] + C[i-1][0]
D[i] = D[i-1][1:] + D[i-1][0]
else:
C[i] = C[i-1][2:] + C[i-1][0] + C[i-1][1]
D[i] = D[i-1][2:] + D[i-1][0] + D[i-1][1]
K = [0] * 16
K_=""
for i in range(0,16):
K[i] = C[i+1] + D[i+1]
if (numb == 1):
K = K[::-1]
for j in range(0,16):
for i in range(0,len(PC2)):
K_ = K_ + K[j][PC2[i]-1]
K[j] = K_
K_ = ""
L = [0] * 17
R = [0] * 17
L[0] = L0
R[0] = R0
print("\t\tleft right round key")
for i in range(1,17):
print("Round",i,end=" ")
L[i] = R[i-1]
temp = EE(R[i-1])
temp1 = xor(temp,K[i-1])
t1 = spfunc(temp1)
t = ""
t2 = ""
for j in range(0,8):
t2 = t2 + t1[j]
for j in range(0,len(t2)):
t = t + t2[P[j]-1]
R[i] = xor(L[i-1],t)
L1=R1=""
for j in range(0,len(L[i])//4):
L1 =L1 + q1[L[i][j*4:(j+1)*4]]
R1 =R1 + q1[R[i][j*4:(j+1)*4]]
K1=""
for j in range(0,len(K[i-1])//4):
K1 =K1 + q1[K[i-1][j*4:(j+1)*4]]
print("\t\t",L1,R1,K1)
R16L16 = R[16] + L[16]
l = ""
for i in range(0,len(R16L16)):
l = l + R16L16[IPP[i]-1]
C = ""
for i in range(0,len(l)//4):
C = C + q1[l[i*4:(i+1)*4]]
return C
x = input("Enter Original Plaintext : ")
K__ = input("Enter Key1 : ")
#Encryption -> '0' -> To indicate that keys must be in same order i.e., K1K2K3...
C = DES(x,K__,0)
print('Original Ciphertext: ',C)
#Decryption -> '1' -> To indicate that keys must be in reversed order i.e., K16K15K14...
c = DES(C,K__,1)
print('Plain Text: ',c)
``` |
{
"source": "101hof010/poker-player-azdprgnd5ulir8tzhthvttbyg8d0nad2hpwwpg6muj",
"score": 3
} |
#### File: 101hof010/poker-player-azdprgnd5ulir8tzhthvttbyg8d0nad2hpwwpg6muj/player.py
```python
import sys
import json
import random
import time
import os
class Player:
VERSION = "Vroomfondel"
def betRequest(self, game_state):
sys.stderr.write("\n\nData: " + str(game_state) + "\n\n")
try:
current_buy_in = game_state['current_buy_in']
players = game_state['players']
minimum_raise = game_state['minimum_raise']
index = 0
for i in range(0, len(players)):
if players[i]['name'] == 'azDpRGnd5ULir8TzHtHvttByG8D0nAd2hPWwpg6MUJ':
index = i
stack = game_state['players'][index]['stack']
#if stack > current_buy_in - players[index]['bet'] + minimum_raise:
# return stack/2
#else:
# return 0
#return random.randint(0, stack)
#if random.randint(0, 6) == 6:
# return stack
try:
with open("foo", "r") as f:
struct = json.loads(f.read())
except:
struct = {'score':0}
cards = []
for card in players[index]['hole_cards']:
cards.append(card)
for card in game_state['community_cards']:
cards.append(card)
if game_state['community_cards'] == []:
sys.stderr.write("In Pre-Flop.")
minimum_raise = 0
else:
sys.stderr.write("Not in Pre-Flop.")
sys.stderr.write("\n\n### Currently, we have " + str(stack) + " Coins.\n\n")
score = self.check_cards(cards)
oscore = struct['score']
sys.stderr.write("\n\n### Old Score: " + str(oscore) + " New Score: " + str(score))
if oscore < score:
try:
with open("foo", "w") as f:
f.write(json.dumps({'score':score}))
except:
pass
else:
minimum_raise = 0
max_amount = score * stack/100
sys.stderr.write("\n\n### Going to a max of " + str(max_amount) + "\n\n")
if current_buy_in - players[index]['bet'] + minimum_raise <= max_amount:
sys.stderr.write("\n\n### We want to do it and have to set: " + str(current_buy_in - players[index]['bet'] + minimum_raise) + "\n\n")
if stack >= current_buy_in - players[index]['bet'] + minimum_raise:
sys.stderr.write("\n\n### We will set " + str(current_buy_in - players[index]['bet'] + minimum_raise) + "\n\n")
return current_buy_in - players[index]['bet'] + minimum_raise
else:
sys.stderr.write("\n\n### We can't set " + str(current_buy_in - players[index]['bet'] + minimum_raise) + "\n\n")
return 0
else:
sys.stderr.write("\n\n### We don't want to do it. We had to set: " + str(current_buy_in - players[index]['bet'] + minimum_raise) + " but we want to set a max of " + str(max_amount) + "\n\n")
return 0
except Exception as e:
sys.stderr.write("\n\n### There was a Problem: " + str(e) + "\n\n")
return 0
def showdown(self, game_state):
os.system("rm foo")
pass
def check_cards(self, a_cards):
"""
a_cards: 1st and 2nd cards are own cards,
other cards are open cards
return a score how good the cards are (between 0 and 100)
"""
score = 0
nscore = 0
cards = []
# Calculate the Card-IDs for every card
for card in a_cards:
if card['suit'] == 'diamonds':
cards.append(100)
elif card['suit'] == 'hearts':
cards.append(200)
elif card['suit'] == 'spades':
cards.append(300)
else:
cards.append(400)
if card['rank'] == 'A':
cards[len(cards)-1] += 14
elif card['rank'] == 'K':
cards[len(cards)-1] += 13
elif card['rank'] == 'Q':
cards[len(cards)-1] += 12
elif card['rank'] == 'J':
cards[len(cards)-1] += 11
else:
cards[len(cards)-1] += int(card['rank'])
sys.stderr.write("\n\n### cards = " + str(cards) + "\n\n")
# Same color
if cards[0] // 100 == cards[1] // 100:
nscore = 5
if nscore > score:
score = nscore
# Rank of J, Q, K, A
if cards[0] % 100 > 10:
nscore = 5
if cards[1] % 100 > 10:
nscore = 5
if nscore > score:
score = nscore
# Same rank
amount = {}
for card in cards:
if card % 100 not in amount:
amount[card % 100] = 1
else:
amount[card % 100] += 1
number = 0
for rank in amount:
if amount[rank] > number:
number = amount[rank]
if amount[rank] == number and number > 1:
# 2 Pairs
number = 5
if number == 2:
sys.stderr.write("\n\n### Zwilling \n\n")
nscore = 10
elif number == 5:
sys.stderr.write("\n\n### zwei Zwilling \n\n")
nscore = 20
elif number == 3:
sys.stderr.write("\n\n### Drilling \n\n")
nscore = 35
elif number == 4:
sys.stderr.write("\n\n### Vierling \n\n")
nscore = 75
if nscore > score:
score = nscore
# Check for Straight
tmp = []
for card in cards:
tmp.append(card % 100)
tmp.sort()
temp = tmp[0]
try:
for i in range(1, 5):
if tmp[i] == temp + 1:
temp += 1
if temp == 15:
temp = 2
else:
temp = -1
except:
temp = -1
if temp != -1:
sys.stderr.write("\n\n### Straight \n\n")
nscore = 45
if nscore > score:
score = nscore
# Check for Flush
nscore = 50
tmp = cards[0] // 100
try:
for i in range(1, 5):
if tmp != cards[i] // 100:
nscore = 0
except:
nscore = 0
if nscore > score:
score = nscore
# Check for Full House
amount = {}
for card in cards:
if card % 100 not in amount:
amount[card % 100] = 1
else:
amount[card % 100] += 1
num = 0
fh = True
for am in amount:
if amount[am] == 2 and num != 2:
num = 2
elif amount[am] == 3 and num != 3:
num = 3
else:
fh = False
if fh:
sys.stderr.write("\n\n### Full House \n\n")
nscore = 60
if nscore > score:
score = nscore
# Straight Flush
tmp = []
for card in cards:
tmp.append(card % 100)
tmp.sort()
temp = tmp[0]
suit = cards[0]//100
try:
for i in range(1, 5):
if tmp[i] == temp + 1 and cards[i] // 100 == suit:
temp += 1
if temp == 15:
temp = 2
temp = -1
except:
temp = -1
if temp != -1:
sys.stderr.write("\n\n### Straight Flush\n\n")
nscore = 90
if nscore > score:
score = nscore
# Royal Flush
tmp = []
for card in cards:
tmp.append(card % 100)
tmp.sort()
temp = 9
suit = cards[0]//100
try:
for i in range(0, 5):
if tmp[i] == temp + 1 and cards[i] // 100 == suit:
temp += 1
if temp == 15:
temp = 2
else:
temp = -1
except:
temp = -1
if temp != -1:
sys.stderr.write("\n\n### Royal Flush\n\n")
nscore = 100
if nscore > score:
score = nscore
#if cards[0]//100 == cards[1] // 100:
# # same color
# pass
return score
``` |
{
"source": "101Loop/drf-bulk",
"score": 2
} |
#### File: 101Loop/drf-bulk/setup.py
```python
from __future__ import unicode_literals
import os
from setuptools import find_packages, setup
from rest_framework_bulk import __version__, __author__
def read(fname):
return (
open(os.path.join(os.path.dirname(__file__), fname), "rb")
.read()
.decode("utf-8")
)
authors = read("AUTHORS.rst")
history = read("HISTORY.rst").replace(".. :changelog:", "")
licence = read("LICENSE.rst")
readme = read("README.rst")
requirements = read("requirements.txt").splitlines() + [
"setuptools",
]
test_requirements = (
read("requirements.txt").splitlines()
+ read("requirements-dev.txt").splitlines()[1:]
)
setup(
name="drf-bulk",
version=__version__,
author=__author__,
author_email="<EMAIL>",
description="Django REST Framework bulk CRUD view mixins",
long_description="\n\n".join([readme, history, authors, licence]),
url="https://github.com/101loop/drf-bulk",
license="MIT",
keywords="django",
packages=find_packages(),
install_requires=requirements,
tests_require=test_requirements,
classifiers=[
"Development Status :: 3 - Alpha",
"Framework :: Django",
"Intended Audience :: Developers",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Topic :: Utilities",
"Topic :: Internet :: WWW/HTTP",
"License :: OSI Approved :: MIT License",
],
)
``` |
{
"source": "1021149914/oj",
"score": 2
} |
#### File: migrations/versions/b1a8c4a51bc8_users_table.py
```python
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'b1a8c4a51bc8'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('user',
sa.Column('userid', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=64), nullable=True),
sa.Column('useremail', sa.String(length=120), nullable=True),
sa.Column('userpassword', sa.String(length=128), nullable=True),
sa.PrimaryKeyConstraint('userid')
)
op.create_index(op.f('ix_user_useremail'), 'user', ['useremail'], unique=True)
op.create_index(op.f('ix_user_username'), 'user', ['username'], unique=True)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_user_username'), table_name='user')
op.drop_index(op.f('ix_user_useremail'), table_name='user')
op.drop_table('user')
# ### end Alembic commands ###
```
#### File: migrations/versions/c00e3d390f1b_tot_table.py
```python
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'c00e3d390f1b'
down_revision = '<PASSWORD>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('contest',
sa.Column('contestid', sa.Integer(), nullable=False),
sa.Column('contestbegin', sa.DateTime(), nullable=True),
sa.Column('contestlen', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('contestid')
)
op.create_table('problem',
sa.Column('problemid', sa.Integer(), nullable=False),
sa.Column('problemms', sa.Integer(), nullable=True),
sa.Column('problemkb', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('problemid')
)
op.create_table('commit',
sa.Column('commitid', sa.Integer(), nullable=False),
sa.Column('userid', sa.Integer(), nullable=True),
sa.Column('problemid', sa.Integer(), nullable=True),
sa.Column('committime', sa.DateTime(), nullable=True),
sa.Column('commitans', sa.String(length=64), nullable=True),
sa.Column('commitms', sa.Integer(), nullable=True),
sa.Column('commitkb', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['problemid'], ['problem.problemid'], ),
sa.ForeignKeyConstraint(['userid'], ['user.userid'], ),
sa.PrimaryKeyConstraint('commitid')
)
op.create_index(op.f('ix_commit_committime'), 'commit', ['committime'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_commit_committime'), table_name='commit')
op.drop_table('commit')
op.drop_table('problem')
op.drop_table('contest')
# ### end Alembic commands ###
``` |
{
"source": "1021256354/ELE-Clouds",
"score": 2
} |
#### File: WIFI/httpcc/netConnect.py
```python
u'''
******************************************************************************
* 文 件:netConnect.py
* 概 述:网络连接函数
* 版 本:V0.10
* 作 者:<NAME>
* 日 期:2018年5月8日
* 历 史: 日期 编辑 版本 记录
2018年5月8日 <NAME> V0.10 创建文件
******************************************************************************'''
import network
import time
# 设当前设备为“客户端”模式,并连接WIFI
def connectWifi(_ssid, _passwd):
global wlan
wlan = network.WLAN(network.STA_IF)
wlan.active(True)
wlan.disconnect()
wlan.connect(_ssid, _passwd)
while (wlan.ifconfig()[0] == '0.0.0.0'):
time.sleep(3) # 3秒后重新连接
return wlan.ifconfig()[0]
# 设当前设备为AP模式
``` |
{
"source": "1021256354/NXP-MCUBootUtility",
"score": 2
} |
#### File: src/ui/ui_cfg_flexspinor.py
```python
import wx
import sys
import os
import math
import uidef
import uivar
import uilang
sys.path.append(os.path.abspath(".."))
from win import bootDeviceWin_FlexspiNor
from utils import sound
class secBootUiCfgFlexspiNor(bootDeviceWin_FlexspiNor.bootDeviceWin_FlexspiNor):
def __init__(self, parent):
bootDeviceWin_FlexspiNor.bootDeviceWin_FlexspiNor.__init__(self, parent)
self._setLanguage()
flexspiNorOpt0, flexspiNorOpt1, flexspiDeviceModel = uivar.getBootDeviceConfiguration(uidef.kBootDevice_XspiNor)
#1. Prepare Flash option
# 0xc0000006 is the tag for Serial NOR parameter selection
# bit [31:28] Tag fixed to 0x0C
# bit [27:24] Option size fixed to 0
# bit [23:20] Flash type option
# 0 - QuadSPI SDR NOR
# 1 - QUadSPI DDR NOR
# bit [19:16] Query pads (Pads used for query Flash Parameters)
# 0 - 1
# bit [15:12] CMD pads (Pads used for query Flash Parameters)
# 0 - 1
# bit [11: 08] Quad Mode Entry Setting
# 0 - Not Configured, apply to devices:
# - With Quad Mode enabled by default or
# - Compliant with JESD216A/B or later revision
# 1 - Set bit 6 in Status Register 1
# 2 - Set bit 1 in Status Register 2
# 3 - Set bit 7 in Status Register 2
# 4 - Set bit 1 in Status Register 2 by 0x31 command
# bit [07: 04] Misc. control field
# 3 - Data Order swapped, used for Macronix OctaFLASH devcies only (except MX25UM51345G)
# 4 - Second QSPI NOR Pinmux
# bit [03: 00] Flash Frequency, device specific
self.flexspiNorOpt0 = flexspiNorOpt0
self.flexspiNorOpt1 = flexspiNorOpt1
self.flexspiDeviceModel = flexspiDeviceModel
self._recoverLastSettings()
def _setLanguage( self ):
runtimeSettings = uivar.getRuntimeSettings()
langIndex = runtimeSettings[3]
self.m_staticText_deviceModel.SetLabel(uilang.kSubLanguageContentDict['sText_deviceModel'][langIndex])
self.m_notebook_norOpt0.SetPageText(0, uilang.kSubLanguageContentDict['panel_norOpt0'][langIndex])
self.m_staticText_deviceType.SetLabel(uilang.kSubLanguageContentDict['sText_deviceType'][langIndex])
self.m_staticText_queryPads.SetLabel(uilang.kSubLanguageContentDict['sText_queryPads'][langIndex])
self.m_staticText_cmdPads.SetLabel(uilang.kSubLanguageContentDict['sText_cmdPads'][langIndex])
self.m_staticText_quadModeSetting.SetLabel(uilang.kSubLanguageContentDict['sText_quadModeSetting'][langIndex])
self.m_staticText_miscMode.SetLabel(uilang.kSubLanguageContentDict['sText_miscMode'][langIndex])
self.m_staticText_maxFrequency.SetLabel(uilang.kSubLanguageContentDict['sText_maxFrequency'][langIndex])
self.m_staticText_hasOption1.SetLabel(uilang.kSubLanguageContentDict['sText_hasOption1'][langIndex])
self.m_notebook_norOpt1.SetPageText(0, uilang.kSubLanguageContentDict['panel_norOpt1'][langIndex])
self.m_staticText_flashConnection.SetLabel(uilang.kSubLanguageContentDict['sText_flashConnection'][langIndex])
self.m_staticText_driveStrength.SetLabel(uilang.kSubLanguageContentDict['sText_driveStrength'][langIndex])
self.m_staticText_dqsPinmuxGroup.SetLabel(uilang.kSubLanguageContentDict['sText_dqsPinmuxGroup'][langIndex])
self.m_staticText_enableSecondPinmux.SetLabel(uilang.kSubLanguageContentDict['sText_enableSecondPinmux'][langIndex])
self.m_staticText_statusOverride.SetLabel(uilang.kSubLanguageContentDict['sText_statusOverride'][langIndex])
self.m_staticText_dummyCycles.SetLabel(uilang.kSubLanguageContentDict['sText_dummyCycles'][langIndex])
self.m_button_ok.SetLabel(uilang.kSubLanguageContentDict['button_flexspinor_ok'][langIndex])
self.m_button_cancel.SetLabel(uilang.kSubLanguageContentDict['button_flexspinor_cancel'][langIndex])
def _updateOpt1Field ( self, isEnabled ):
if isEnabled:
self.m_choice_flashConnection.Enable( True )
self.m_textCtrl_driveStrength.Enable( True )
self.m_textCtrl_dqsPinmuxGroup.Enable( True )
self.m_choice_enableSecondPinmux.Enable( True )
self.m_textCtrl_statusOverride.Enable( True )
self.m_textCtrl_dummyCycles.Enable( True )
else:
self.m_choice_flashConnection.Enable( False )
self.m_textCtrl_driveStrength.Enable( False )
self.m_textCtrl_dqsPinmuxGroup.Enable( False )
self.m_choice_enableSecondPinmux.Enable( False )
self.m_textCtrl_statusOverride.Enable( False )
self.m_textCtrl_dummyCycles.Enable( False )
def _recoverLastSettings ( self ):
self.m_choice_deviceMode.SetSelection(self.m_choice_deviceMode.FindString(self.flexspiDeviceModel))
deviceType = (self.flexspiNorOpt0 & 0x00F00000) >> 20
self.m_choice_deviceType.SetSelection(deviceType)
queryPads = (self.flexspiNorOpt0 & 0x000F0000) >> 16
if queryPads == 0:
self.m_choice_queryPads.SetSelection(queryPads)
else:
self.m_choice_queryPads.SetSelection(queryPads - 1)
cmdPads = (self.flexspiNorOpt0 & 0x0000F000) >> 12
if queryPads == 0:
self.m_choice_cmdPads.SetSelection(cmdPads)
else:
self.m_choice_cmdPads.SetSelection(cmdPads - 1)
quadModeSetting = (self.flexspiNorOpt0 & 0x00000F00) >> 8
self.m_choice_quadModeSetting.SetSelection(quadModeSetting)
miscMode = (self.flexspiNorOpt0 & 0x000000F0) >> 4
self.m_choice_miscMode.SetSelection(miscMode)
maxFrequency = (self.flexspiNorOpt0 & 0x0000000F) >> 0
self.m_choice_maxFrequency.SetSelection(maxFrequency - 1)
hasOption1 = (self.flexspiNorOpt0 & 0x0F000000) >> 24
self.m_choice_hasOption1.SetSelection(hasOption1)
if hasOption1 == 0:
self._updateOpt1Field(False)
else:
self._updateOpt1Field(True)
flashConnection = (self.flexspiNorOpt1 & 0xF0000000) >> 28
self.m_choice_flashConnection.SetSelection(flashConnection)
driveStrength = (self.flexspiNorOpt1 & 0x0F000000) >> 24
self.m_textCtrl_driveStrength.Clear()
self.m_textCtrl_driveStrength.write(str(driveStrength))
dqsPinmuxGroup = (self.flexspiNorOpt1 & 0x00F00000) >> 20
self.m_textCtrl_dqsPinmuxGroup.Clear()
self.m_textCtrl_dqsPinmuxGroup.write(str(dqsPinmuxGroup))
enableSecondPinmux = (self.flexspiNorOpt1 & 0x000F0000) >> 16
self.m_choice_enableSecondPinmux.SetSelection(enableSecondPinmux)
statusOverride = (self.flexspiNorOpt1 & 0x0000FF00) >> 8
self.m_textCtrl_statusOverride.Clear()
self.m_textCtrl_statusOverride.write(str(statusOverride))
dummyCycles = (self.flexspiNorOpt1 & 0x000000FF) >> 0
self.m_textCtrl_dummyCycles.Clear()
self.m_textCtrl_dummyCycles.write(str(dummyCycles))
def _getDeviceType( self ):
txt = self.m_choice_deviceType.GetString(self.m_choice_deviceType.GetSelection())
if txt == 'QuadSPI SDR NOR':
val = 0x0
elif txt == 'QuadSPI DDR NOR':
val = 0x1
elif txt == 'Hyper Flash 1.8V':
val = 0x2
elif txt == 'Hyper Flash 3.0V':
val = 0x3
elif txt == 'Macronix Octal DDR':
val = 0x4
elif txt == 'Macronix Octal SDR':
val = 0x5
elif txt == 'Micron Octal DDR':
val = 0x6
elif txt == 'Micron Octal SDR':
val = 0x7
elif txt == 'Adesto EcoXIP DDR':
val = 0x8
elif txt == 'Adesto EcoXIP SDR':
val = 0x9
else:
pass
self.flexspiNorOpt0 = (self.flexspiNorOpt0 & 0xFF0FFFFF) | (val << 20)
def _getQueryPads( self ):
val = int(self.m_choice_queryPads.GetString(self.m_choice_queryPads.GetSelection()))
val = int(math.log(val, 2))
self.flexspiNorOpt0 = (self.flexspiNorOpt0 & 0xFFF0FFFF) | (val << 16)
def _getCmdPads( self ):
val = int(self.m_choice_cmdPads.GetString(self.m_choice_cmdPads.GetSelection()))
val = int(math.log(val, 2))
self.flexspiNorOpt0 = (self.flexspiNorOpt0 & 0xFFFF0FFF) | (val << 12)
def _getQuadModeSetting( self ):
txt = self.m_choice_quadModeSetting.GetString(self.m_choice_quadModeSetting.GetSelection())
if txt == 'Not Configured':
val = 0x0
elif txt == 'Set StatusReg1[6]':
val = 0x1
elif txt == 'Set StatusReg2[1]':
val = 0x2
elif txt == 'Set StatusReg2[7]':
val = 0x3
elif txt == 'Set StatusReg2[1] by 0x31':
val = 0x4
else:
pass
self.flexspiNorOpt0 = (self.flexspiNorOpt0 & 0xFFFFF0FF) | (val << 8)
def _getMiscMode( self ):
txt = self.m_choice_miscMode.GetString(self.m_choice_miscMode.GetSelection())
if txt == 'Disabled':
val = 0x0
elif txt == '0_4_4 Mode':
val = 0x1
elif txt == '0_8_8 Mode':
val = 0x2
elif txt == 'Data Order Swapped':
val = 0x3
else:
pass
self.flexspiNorOpt0 = (self.flexspiNorOpt0 & 0xFFFFFF0F) | (val << 4)
def _getMaxFrequency( self ):
txt = self.m_choice_maxFrequency.GetString(self.m_choice_maxFrequency.GetSelection())
if txt == '30MHz':
val = 0x1
elif txt == '50MHz':
val = 0x2
elif txt == '60MHz':
val = 0x3
elif txt == '75MHz':
val = 0x4
elif txt == '80MHz':
val = 0x5
elif txt == '100MHz':
val = 0x6
elif txt == '133MHz':
val = 0x7
elif txt == '166MHz':
val = 0x8
else:
pass
self.flexspiNorOpt0 = (self.flexspiNorOpt0 & 0xFFFFFFF0) | (val << 0)
def _getHasOpt1( self ):
txt = self.m_choice_hasOption1.GetString(self.m_choice_hasOption1.GetSelection())
if txt == 'No':
val = 0x0
elif txt == 'Yes':
val = 0x1
else:
pass
self.flexspiNorOpt0 = (self.flexspiNorOpt0 & 0xF0FFFFFF) | (val << 24)
def _getFlashConnection( self ):
txt = self.m_choice_flashConnection.GetString(self.m_choice_flashConnection.GetSelection())
if txt == 'Single Port A':
val = 0x0
elif txt == 'Parallel':
val = 0x1
elif txt == 'Single Port B':
val = 0x2
elif txt == 'Both Ports':
val = 0x3
else:
pass
self.flexspiNorOpt1 = (self.flexspiNorOpt1 & 0x0FFFFFFF) | (val << 28)
def _getDriveStrength( self ):
val = int(self.m_textCtrl_driveStrength.GetLineText(0))
self.flexspiNorOpt1 = (self.flexspiNorOpt1 & 0xF0FFFFFF) | (val << 24)
def _getDqsPinmuxGroup( self ):
val = int(self.m_textCtrl_dqsPinmuxGroup.GetLineText(0))
self.flexspiNorOpt1 = (self.flexspiNorOpt1 & 0xFF0FFFFF) | (val << 20)
def _getEnableSecondPinmux( self ):
txt = self.m_choice_enableSecondPinmux.GetString(self.m_choice_enableSecondPinmux.GetSelection())
if txt == 'No':
val = 0x0
elif txt == 'Yes':
val = 0x1
else:
pass
self.flexspiNorOpt1 = (self.flexspiNorOpt1 & 0xFFF0FFFF) | (val << 16)
def _getStatusOverride( self ):
val = int(self.m_textCtrl_statusOverride.GetLineText(0))
self.flexspiNorOpt1 = (self.flexspiNorOpt1 & 0xFFFF00FF) | (val << 8)
def _getDummyCycles( self ):
val = int(self.m_textCtrl_dummyCycles.GetLineText(0))
self.flexspiNorOpt1 = (self.flexspiNorOpt1 & 0xFFFFFF00) | (val << 0)
def callbackUseTypicalDeviceModel( self, event ):
txt = self.m_choice_deviceMode.GetString(self.m_choice_deviceMode.GetSelection())
self.flexspiDeviceModel = txt
if txt == uidef.kFlexspiNorDevice_ISSI_IS25LP064A:
self.flexspiNorOpt0 = uidef.kFlexspiNorOpt0_ISSI_IS25LP064A
elif txt == uidef.kFlexspiNorDevice_ISSI_IS26KS512S:
self.flexspiNorOpt0 = uidef.kFlexspiNorOpt0_ISSI_IS26KS512S
elif txt == uidef.kFlexspiNorDevice_MXIC_MX25UM51245G:
self.flexspiNorOpt0 = uidef.kFlexspiNorOpt0_MXIC_MX25UM51245G
elif txt == uidef.kFlexspiNorDevice_MXIC_MX25UM51345G:
self.flexspiNorOpt0 = uidef.kFlexspiNorOpt0_MXIC_MX25UM51345G
elif txt == uidef.kFlexspiNorDevice_Micron_MT35X:
self.flexspiNorOpt0 = uidef.kFlexspiNorOpt0_Micron_MT35X
elif txt == uidef.kFlexspiNorDevice_Adesto_AT25SF128A:
self.flexspiNorOpt0 = uidef.kFlexspiNorOpt0_Adesto_AT25SF128A
elif txt == uidef.kFlexspiNorDevice_Adesto_ATXP032:
self.flexspiNorOpt0 = uidef.kFlexspiNorOpt0_Adesto_ATXP032
elif txt == uidef.kFlexspiNorDevice_Cypress_S26KS512S:
self.flexspiNorOpt0 = uidef.kFlexspiNorOpt0_Cypress_S26KS512S
elif txt == uidef.kFlexspiNorDevice_GigaDevice_GD25LB256E:
self.flexspiNorOpt0 = uidef.kFlexspiNorOpt0_GigaDevice_GD25LB256E
elif txt == uidef.kFlexspiNorDevice_GigaDevice_GD25LT256E:
self.flexspiNorOpt0 = uidef.kFlexspiNorOpt0_GigaDevice_GD25LT256E
elif txt == uidef.kFlexspiNorDevice_GigaDevice_GD25LX256E:
self.flexspiNorOpt0 = uidef.kFlexspiNorOpt0_GigaDevice_GD25LX256E
elif txt == uidef.kFlexspiNorDevice_Winbond_W25Q128JV:
self.flexspiNorOpt0 = uidef.kFlexspiNorOpt0_Winbond_W25Q128JV
else:
pass
if txt != 'No':
self._recoverLastSettings()
def callbackHasOption1( self, event ):
txt = self.m_choice_hasOption1.GetString(self.m_choice_hasOption1.GetSelection())
if txt == 'No':
self._updateOpt1Field(False)
elif txt == 'Yes':
self._updateOpt1Field(True)
else:
pass
def callbackOk( self, event ):
self._getDeviceType()
self._getQueryPads()
self._getCmdPads()
self._getQuadModeSetting()
self._getMiscMode()
self._getMaxFrequency()
self._getHasOpt1()
hasOption1 = (self.flexspiNorOpt0 & 0x0F000000) >> 24
if hasOption1:
self._getFlashConnection()
self._getDriveStrength()
self._getDqsPinmuxGroup()
self._getEnableSecondPinmux()
self._getStatusOverride()
self._getDummyCycles()
uivar.setBootDeviceConfiguration(uidef.kBootDevice_XspiNor, self.flexspiNorOpt0, self.flexspiNorOpt1, self.flexspiDeviceModel)
uivar.setRuntimeSettings(False)
self.Show(False)
runtimeSettings = uivar.getRuntimeSettings()
sound.playSoundEffect(runtimeSettings[1], runtimeSettings[2], uidef.kSoundEffectFilename_Progress)
def callbackCancel( self, event ):
uivar.setRuntimeSettings(False)
self.Show(False)
def callbackClose( self, event ):
uivar.setRuntimeSettings(False)
self.Show(False)
``` |
{
"source": "1021ky/nlp100_2020",
"score": 4
} |
#### File: nlp100_2020/chapter1/1_08.py
```python
def cipher(param: str):
result = ''
for s in param:
if s.isalpha() and not s.isupper():
result += chr(219 - ord(s))
else:
result += s
return result
encrypt = cipher('Permalink')
print(encrypt)
decrypt = cipher(encrypt)
print(decrypt)
``` |
{
"source": "1021ky/radiko_recorder",
"score": 3
} |
#### File: src/radiko/recorder.py
```python
from datetime import datetime, timedelta, timezone
import requests
from requests.exceptions import Timeout
import os
import sys
import time
import logging
import ffmpeg
import m3u8
from gcloud.storage import upload_blob
from radiko.authorization import Authorization
JST = timezone(timedelta(hours=+9), 'JST')
class RadikoRecorder(object):
"""Radikoの録音クラス"""
_MASTER_PLAYLIST_BASE_URL = 'https://rpaa.smartstream.ne.jp/so/playlist.m3u8'
_DUMMY_LSID = '11111111111111111111111111111111111111' # Radiko APIの仕様で38桁の文字列が必要。
def __init__(self, station, record_time, outfile):
self._headers = self._make_headers()
self._station = station
self._record_time = record_time
self._file = outfile
def _make_headers(self):
"""HTTPリクエストのヘッダーを作成する"""
headers = Authorization().get_auththenticated_headers()
headers['Connection']='keep-alive'
logging.debug(f'headers: {headers}')
return headers
def _make_master_playlist_url(self):
"""master playlistのURLを作成する"""
url = f'{RadikoRecorder._MASTER_PLAYLIST_BASE_URL}?station_id={self._station}&l=15&lsid={RadikoRecorder._DUMMY_LSID}&type=b'
logging.debug(f'playlist url:{url}')
return url
def _make_audio_headers(self):
"""音声取得用HTTPリクエストのヘッダーを作成する
requests用のhttpヘッダーをもとにffmpeg用に文字列のHTTPリクエストヘッダーを作る。
"""
header_list = [f'{k}: {v}'for k, v in self._headers.items()]
audio_headers = '\r\n'.join(header_list)+'\r\n'
logging.debug(f'audio headers: {audio_headers}')
return audio_headers
def _get_media_playlist_url(self):
"""media playlistのURLを取得する"""
u = self._make_master_playlist_url()
r = requests.get(url=u, headers=self._headers)
if r.status_code != 200:
logging.warning('failed to get media playlist url')
logging.warning(f'status_code:{r.status_code}')
logging.warning(f'content:{r.content}')
raise Exception('failed in radiko get media playlist')
m3u8_obj = m3u8.loads(r.content.decode('utf-8'))
media_playlist_url = m3u8_obj.playlists[0].uri
logging.debug(f'media_playlist_url: {media_playlist_url}')
return media_playlist_url
def _get_media_url(self, media_playlist_url):
"""音声ファイルのURLをmedia playlistから取得する"""
query_time = int(datetime.now(tz=JST).timestamp() * 100)
r = requests.get(url=f'{media_playlist_url}&_={query_time}',headers=self._headers)
logging.debug(f'aac url:{media_playlist_url}&_={query_time}')
if r.status_code != 200:
return None
m3u8_obj = m3u8.loads(str(r.content.decode('utf-8')))
return [(s.program_date_time, s.uri) for s in m3u8_obj.segments]
def record(self):
"""録音する"""
logging.debug('record start')
media_playlist_url = self._get_media_playlist_url()
end = datetime.now() + timedelta(minutes=self._record_time)
recorded = set()
while(datetime.now() <= end):
url_list = self._get_media_url(media_playlist_url)
if url_list == None:
# 時間をおいてリトライすると取れるときがあるため待つ
time.sleep(3.0)
continue
headers = self._make_audio_headers()
# m3u8ファイルに記述されている音声ファイルを重複しないように取得する
for dt, url in url_list:
if dt in recorded:
continue
if not os.path.isdir('./tmp'):
os.mkdir('./tmp')
try:
ffmpeg\
.input(filename=url, f='aac', headers=headers)\
.output(filename=f'./tmp/{dt}.aac')\
.run(capture_stdout=True)
except Exception as e:
logging.warning('failed in run ffmpeg')
logging.warning(e)
recorded.add(dt)
time.sleep(5.0)
logging.debug('record end')
return recorded
def record(station, program, rtime, outfilename):
# 録音を実施する
recorder = RadikoRecorder(station, rtime, outfilename)
recorded = recorder.record()
# mp3ファイルを一つに
l = sorted(recorded)
files = [f'./tmp/{e}.aac' for e in l]
try:
streams = [ffmpeg.input(filename=f) for f in files]
ffmpeg\
.concat(*streams,a=1,v=0)\
.output(filename=outfilename, absf='aac_adtstoasc')\
.run(capture_stdout=True)
except Exception as e:
logging.warning('failed in run ffmpeg concat')
logging.warning(e)
for f in files:
os.remove(f)
``` |
{
"source": "10220/quaternion",
"score": 2
} |
#### File: 10220/quaternion/setup.py
```python
from os import environ
from sys import platform
version = None
on_windows = ('win' in platform.lower() and not 'darwin' in platform.lower())
if "package_version" in environ:
version = environ["package_version"]
print("Setup.py using environment version='{0}'".format(version))
else:
print("The variable 'package_version' was not present in the environment")
try:
# For cases where this is being installed from git. This gives the true version number.
from subprocess import check_output
if on_windows:
version = check_output("""git log -1 --format=%cd --date=format:'%Y.%m.%d.%H.%M.%S'""", shell=False)
version = version.decode('ascii').strip().replace('.0', '.').replace("'", "")
else:
try:
from subprocess import DEVNULL as devnull
version = check_output("""git log -1 --format=%cd --date=format:'%Y.%-m.%-d.%-H.%-M.%-S'""", shell=True, stderr=devnull)
except AttributeError:
from os import devnull
version = check_output("""git log -1 --format=%cd --date=format:'%Y.%-m.%-d.%-H.%-M.%-S'""", shell=True, stderr=devnull)
version = version.decode('ascii').rstrip()
print("Setup.py using git log version='{0}'".format(version))
except:
pass
if version is not None:
with open('_version.py', 'w') as f:
f.write('__version__ = "{0}"'.format(version))
else:
try:
from ast import literal_eval
with open('_version.py', 'r') as f:
first_line = f.readline()
version_string = first_line.split('=')[1].strip()
version = literal_eval(version_string)
except:
pass
long_description = """\
This package creates a quaternion type in python, and further enables numpy to create and manipulate arrays of
quaternions. The usual algebraic operations (addition and multiplication) are available, along with numerous
properties like norm and various types of distance measures between two quaternions. There are also
additional functions like "squad" and "slerp" interpolation, and conversions to and from axis-angle, matrix,
and Euler-angle representations of rotations. The core of the code is written in C for speed.
"""
if __name__ == "__main__":
# Note: Because pip may try to install this package before numpy (on which it depends, and which
# it needs to use *during* setup), we need to try to fail gracefully when numpy is not
# installed. The following mostly follows the strategy found in scipy's setup.py script, here:
# https://github.com/scipy/scipy/blob/9ccc68475fc431c4a44c120693cf6878cc4c14a7/setup.py#L180
import sys
setup_metadata = dict(
name='numpy-quaternion', # Uploaded to pypi under this name
packages=['quaternion'], # This is the actual package name, as used in python
package_dir={'quaternion': ''},
url='https://github.com/moble/quaternion',
author='<NAME>',
author_email='<EMAIL>',
description='Add built-in support for quaternions to numpy',
long_description=long_description,
)
if version is not None:
setup_metadata['version'] = version
if len(sys.argv) >= 2 and ('--help' in sys.argv[1:] or sys.argv[1] in ('--help-commands', 'egg_info', '--version', 'clean')):
# For these actions, NumPy is not required.
#
# They are required to succeed without Numpy for example when
# pip is used to install Scipy when Numpy is not yet present in
# the system.
try:
from setuptools import setup
setup_metadata['install_requires'] = ['numpy>=1.13',]
setup_metadata['setup_requires'] = ['setuptools', 'wheel', 'numpy>=1.13',]
except ImportError:
from distutils.core import setup
else:
from setuptools import setup, Extension
from setuptools.command.build_ext import build_ext as _build_ext
setup_metadata['install_requires'] = ['numpy>=1.13',]
setup_metadata['setup_requires'] = ['setuptools', 'wheel', 'numpy>=1.13',]
extension = Extension(
name='quaternion.numpy_quaternion', # This is the name of the object file that will be compiled
sources=['quaternion.c', 'numpy_quaternion.c'],
extra_compile_args=['/O2' if on_windows else '-O3'],
depends=['quaternion.c', 'quaternion.h', 'numpy_quaternion.c'],
)
setup_metadata['ext_modules'] = [extension]
class build_ext(_build_ext):
# This addition was suggested in https://stackoverflow.com/a/21621689/1194883
def finalize_options(self):
_build_ext.finalize_options(self)
# Prevent numpy from thinking it is still in its setup process:
try:
__builtins__.__NUMPY_SETUP__ = False
except:
# For python 3
import builtins
builtins.__NUMPY_SETUP__ = False
import numpy
self.include_dirs.append(numpy.get_include())
if numpy.__dict__.get('quaternion') is not None:
from distutils.errors import DistutilsError
raise DistutilsError('The target NumPy already has a quaternion type')
setup_metadata['cmdclass'] = {'build_ext': build_ext}
setup(**setup_metadata)
``` |
{
"source": "10239847509238470925387z/tmp123",
"score": 3
} |
#### File: 10239847509238470925387z/tmp123/app.py
```python
import urllib
import json
import os
import constants
import accounts
from flask import Flask
from flask import request
from flask import make_response
# Flask app should start in global layout
app = Flask(__name__)
PERSON = constants.TEST_1
@app.route('/webhook', methods=['POST'])
def webhook():
req = request.get_json(silent=True, force=True)
print("Request:")
print(json.dumps(req, indent=4))
res = makeWebhookResult(req)
res = json.dumps(res, indent=4)
print(res)
r = make_response(res)
r.headers['Content-Type'] = 'application/json'
return r
def makeWebhookResult(req):
if req.get("result").get("action") != "account-balance":
return constants.ERR_DICT(req.get("result").get("action"))
result = req.get("result")
parameters = result.get("parameters")
acct = parameters.get("account-type")
acct = acct.strip()
if acct=='401k':
acct='WI'
qual = parameters.get("qualifier")
speech = str(req.get("result").get("action"))
if acct:
if acct in constants.ACCT_TYPES:
speech = "The value of your {ACCT_TYPE} accounts is {VALU} dollars.".format(VALU=accounts.get_balance(PERSON, acct), ACCT_TYPE=acct)
else:
speech = "You don't have any accounts of that type. The total value of your other accounts is {VALU} dollars.".format(
VALU=accounts.get_balance(PERSON))
elif qual:
speech = "The total value of your accounts is {VALU} dollars.".format(VALU=accounts.get_balance(PERSON))
else:
speech = "The total value of your accounts is {VALU} dollars.".format(VALU=accounts.get_balance(PERSON))
# speech = "The cost of shipping to " + zone + " is " + str(cost[zone]) + " euros."
print("Response:")
print(speech)
speech += "\nAnything else I can help you with today?"
return {
"speech": speech,
"displayText": speech,
#"data": {},
# "contextOut": [],
"source": "home"
}
if __name__ == '__main__':
port = int(os.getenv('PORT', 5000))
print "Starting app on port %d" % port
app.run(debug=True, port=port, host='0.0.0.0')
``` |
{
"source": "1024642475/Entity_Relation_Extraction",
"score": 3
} |
#### File: Entity_Relation_Extraction/project/utils.py
```python
import pickle as pkl
import os
import json
from seqeval.metrics import f1_score, precision_score, recall_score
transtab = str.maketrans("","","##")
def translate_string(string):
"""
如果字符串中有两个##连在一起,则删掉这两个##
"""
global transtab
return string.translate(transtab)
def make_label(en_start, en_end, en_type, label_list):
"""
根据开始位置和结束位置打标签
BIOES
"""
if en_end-en_start==1:
label_list[en_start] = "S-"+en_type
else:
for index in range(en_start, en_end):
if index==en_start:
label_list[index] = "B-"+en_type
elif index==en_end-1:
label_list[index] = "E-"+en_type
else:
label_list[index] = "I-"+en_type
#
# def get_input(filepath, debug=True):
# """
# 得到Bert模型的输入
# """
# with open(filepath, "rb") as f:
# datas = pkl.load(f)
#
# input_word_list = []
# input_label_list = []
# for data in datas:
# bert_words = data["bert_words"]
# label_list = ["O" for _ in bert_words] # 首先制作全O的标签
# for entity in data["golden-entity-mentions"]:
# en_start = entity["start"]
# en_end = entity["end"]
# en_type = entity["entity-type"]
# # 验证entity["text"]是否与bert_words能够对应
# #if debug:
# # bert_words_text = []
# # for index in range(en_start, en_end):
# # bert_words_text.append(translate_string(bert_words[index]))
# # en_text = entity["text"].lower() # 变成小写
# # if "".join(bert_words_text)!=en_text:
# # # 如果它们匹配不上
# # for index in range(len(bert_words_text)):
# # # 如果匹配不上的不是[UNK]
# # if bert_words_text[index]!="[UNK]" and en_text[index]!=bert_words_text[index]:
# # raise ValueError("bert_words_text: {} not equal entity['text']: {}".format(bert_words_text, en_text))
# # 根据开始与结束位置打标签
# make_label(en_start, en_end, en_type, label_list)
# input_word_list.append(["[CLS]"]+bert_words+["[SEP]"])
# input_label_list.append(["O"]+label_list+["O"])
# return input_word_list, input_label_list
def get_input(debug=True):
"""
得到Bert模型的输入
"""
# with open(filepath, "rb") as f:
# datas = pkl.load(f)
files = os.listdir('/data1/shgpu/sh/new/project/gingko/data/label_data/entity')
input_word_list = []
input_label_list = []
for file in files:
with open("/data1/shgpu/sh/new/project/gingko/data/label_data/entity/"+file, 'r', encoding='UTF-8') as f:
data = json.load(f)
bert_words = list(data["sentence"])
label_list = ["O" for _ in bert_words] # 首先制作全O的标签
for entity in data["entity-mentions"]:
en_start = entity["start"]
en_end = entity["end"]
en_type = entity["entity-type"]
# 根据开始与结束位置打标签
make_label(en_start, en_end, en_type, label_list)
input_word_list.append(["[CLS]"]+bert_words+["[SEP]"])
input_label_list.append(["O"]+label_list+["O"])
# print(input_word_list)
# # print(input_label_list)
# # print()
return input_word_list, input_label_list
def produce_length(sequences, max_len, padding, ret_attention_mask):
"""
处理长度,短则填充,长则截断,padding是填充的符号
"""
if ret_attention_mask:
attention_mask = [[1 for _ in range(len(sequence))] for sequence in sequences] # 原始长度填充1
for index, sequence in enumerate(sequences):
while len(sequence)<max_len:
sequence.append(padding)
if ret_attention_mask:
attention_mask[index].append(0) # pad的部分填充0
sequences[index] = sequence[:max_len]
if ret_attention_mask:
attention_mask[index] = attention_mask[index][:max_len]
if ret_attention_mask:
return sequences, attention_mask
return sequences
def caculate_report(y_true, y_pred, transform_func):
"""
计算预测的分数
"""
for i in range(len(y_true)):
y_true[i] = transform_func(y_true[i])
for i in range(len(y_pred)):
y_pred[i] = transform_func(y_pred[i])
return f1_score(y_true, y_pred), precision_score(y_true, y_pred), recall_score(y_true, y_pred)
if __name__=="__main__":
filepath = "/data0/dlw/sunrui_joint_ee/datasets/xujin_law_v2/bert_data/all_data_length_300.pkl"
input_word_list, input_label_list = get_input(filepath)
``` |
{
"source": "1024inc/clickhouse-driver",
"score": 3
} |
#### File: clickhouse_driver/compression/base.py
```python
from io import BytesIO
from ..reader import read_binary_uint32
from ..writer import write_binary_uint8, write_binary_uint32
from .. import errors
try:
from clickhouse_cityhash.cityhash import CityHash128
except ImportError:
raise RuntimeError(
'Package clickhouse-cityhash is required to use compression'
)
class BaseCompressor(object):
"""
Partial file-like object with write method.
"""
method = None
method_byte = None
def __init__(self):
self.data = BytesIO()
super(BaseCompressor, self).__init__()
def get_value(self):
value = self.data.getvalue()
self.data.seek(0)
self.data.truncate()
return value
def write(self, p_str):
self.data.write(p_str)
def compress_data(self, data):
raise NotImplementedError
def get_compressed_data(self, extra_header_size):
rv = BytesIO()
data = self.get_value()
compressed = self.compress_data(data)
header_size = extra_header_size + 4 + 4 # sizes
write_binary_uint32(header_size + len(compressed), rv)
write_binary_uint32(len(data), rv)
rv.write(compressed)
return rv.getvalue()
class BaseDecompressor(object):
method = None
method_byte = None
def __init__(self, real_stream):
self.stream = real_stream
super(BaseDecompressor, self).__init__()
def decompress_data(self, data, uncompressed_size):
raise NotImplementedError
def check_hash(self, compressed_data, compressed_hash):
if CityHash128(compressed_data) != compressed_hash:
raise errors.ChecksumDoesntMatchError()
def get_decompressed_data(self, method_byte, compressed_hash,
extra_header_size):
size_with_header = read_binary_uint32(self.stream)
compressed_size = size_with_header - extra_header_size - 4
compressed = BytesIO(self.stream.read(compressed_size))
block_check = BytesIO()
write_binary_uint8(method_byte, block_check)
write_binary_uint32(size_with_header, block_check)
block_check.write(compressed.getvalue())
self.check_hash(block_check.getvalue(), compressed_hash)
uncompressed_size = read_binary_uint32(compressed)
compressed = compressed.read(compressed_size - 4)
return self.decompress_data(compressed, uncompressed_size)
```
#### File: clickhouse_driver/compression/zstd.py
```python
import zstd
from .base import BaseCompressor, BaseDecompressor
from ..protocol import CompressionMethod, CompressionMethodByte
class Compressor(BaseCompressor):
method = CompressionMethod.ZSTD
method_byte = CompressionMethodByte.ZSTD
def compress_data(self, data):
return zstd.compress(data)
class Decompressor(BaseDecompressor):
method = CompressionMethod.ZSTD
method_byte = CompressionMethodByte.ZSTD
def decompress_data(self, data, uncompressed_size):
return zstd.decompress(data)
``` |
{
"source": "1024inc/django-redis-ratelimit",
"score": 2
} |
#### File: django-redis-ratelimit/tests/tests.py
```python
from django.conf.urls import url
from django.test import RequestFactory, TestCase
from django.test.utils import override_settings
from django.views import View
from unittest.mock import MagicMock, patch
from redis_ratelimit import ratelimit
from redis_ratelimit.exceptions import RateLimited
from redis_ratelimit.utils import parse_rate
from redis_ratelimit.decorators import (
ignore_redis_errors,
is_rate_limited,
redis_connection,
)
from redis.exceptions import TimeoutError
factory = RequestFactory()
def make_request(view):
class DynamicUrlPattern:
urlpatterns = [url(r'', view)]
with override_settings(ROOT_URLCONF=DynamicUrlPattern):
req = factory.get('/')
view(req)
class RateParsingTests(TestCase):
def test_rate_parsing(self):
tests = (
('100/s', (100, 1)),
('100/10s', (100, 10)),
('100/m', (100, 60)),
('400/10m', (400, 10 * 60)),
('600/h', (600, 60 * 60)),
('800/d', (800, 24 * 60 * 60)),
)
for input, output in tests:
assert output == parse_rate(input)
class DecoratorTests(TestCase):
def test_no_rate(self):
@ratelimit()
def view(request):
return True
req = factory.get('/')
assert view(req)
class RedisTests(TestCase):
def setUp(self):
self.redis = redis_connection()
def test_existing_key_gets_expiry(self):
key = 'REDIS_RATELIMIT/127.0.0.1/tests.tests.view/500/60'
self.redis.delete(key)
self.redis.set(key, 20)
@ratelimit(rate='500/m')
def view(request):
return True
make_request(view)
self.assertEqual(self.redis.ttl(key), 60)
def test_new_key_gets_expiry(self):
key = 'REDIS_RATELIMIT/127.0.0.1/tests.tests.view/500/60'
self.redis.delete(key)
@ratelimit(rate='500/m')
def view(request):
return True
make_request(view)
self.assertEqual(self.redis.ttl(key), 60)
class RateLimitTests(TestCase):
def test_method_decorator(self):
@ratelimit(rate='5/s')
def view(request):
return True
for _ in range(5):
make_request(view)
with self.assertRaises(RateLimited):
make_request(view)
def test_cbv_decorator(self):
class Cbv(View):
@ratelimit(rate='5/s')
def get(self, request):
return True
class DynamicUrlPattern:
urlpatterns = [url(r'', Cbv.as_view())]
with override_settings(ROOT_URLCONF=DynamicUrlPattern):
for _ in range(5):
req = factory.get('/')
Cbv.as_view()(req)
with self.assertRaises(RateLimited):
req = factory.get('/')
Cbv.as_view()(req)
class IgnoreRedisErrorsTest(TestCase):
def test_invokes_function(self):
@ignore_redis_errors
def fake_rate_limited():
return True
assert fake_rate_limited()
def test_error(self):
@ignore_redis_errors
def fake_rate_limited():
raise TimeoutError
assert fake_rate_limited() == False
``` |
{
"source": "1024inc/django-rq",
"score": 2
} |
#### File: django_rq/templatetags/django_rq.py
```python
from django import template
from django.utils import timezone
register = template.Library()
@register.filter
def to_localtime(time):
'''
A function to convert naive datetime to
localtime base on settings
'''
utc_time = time.replace(tzinfo=timezone.utc)
to_zone = timezone.get_default_timezone()
return utc_time.astimezone(to_zone)
```
#### File: django-rq/django_rq/utils.py
```python
from rq.registry import (DeferredJobRegistry, FinishedJobRegistry,
StartedJobRegistry)
from rq.exceptions import NoSuchJobError
from .queues import get_connection, get_queue_by_index
from .settings import QUEUES_LIST
from .templatetags.django_rq import to_localtime
from .workers import collect_workers_by_connection, get_all_workers_by_configuration
def get_statistics():
queues = []
workers = []
workers_collections = collect_workers_by_connection(QUEUES_LIST)
for index, config in enumerate(QUEUES_LIST):
queue = get_queue_by_index(index)
connection = queue.connection
connection_kwargs = connection.connection_pool.connection_kwargs
# Raw access to the first item from left of the redis list.
# This might not be accurate since new job can be added from the left
# with `at_front` parameters.
# Ideally rq should supports Queue.oldest_job
last_job_id = connection.lindex(queue.key, 0)
last_job = queue.fetch_job(last_job_id.decode('utf-8')) if last_job_id else None
if last_job:
oldest_job_timestamp = to_localtime(last_job.enqueued_at)\
.strftime('%Y-%m-%d, %H:%M:%S')
else:
oldest_job_timestamp = "-"
# parse_class is not needed and not JSON serializable
try:
del(connection_kwargs['parser_class'])
except KeyError:
pass
queue_data = {
'name': queue.name,
'jobs': queue.count,
'oldest_job_timestamp': oldest_job_timestamp,
'index': index,
'connection_kwargs': connection_kwargs
}
if queue.name == 'failed':
queue_data['workers'] = '-'
queue_data['finished_jobs'] = '-'
queue_data['started_jobs'] = '-'
queue_data['deferred_jobs'] = '-'
else:
connection = get_connection(queue.name)
all_workers = get_all_workers_by_configuration(
config['connection_config'],
workers_collections
)
seen_workers = [w.name for w in workers]
workers += [w for w in all_workers if w.name not in seen_workers]
queue_workers = [worker for worker in all_workers if queue in worker.queues]
queue_data['workers'] = len(queue_workers)
finished_job_registry = FinishedJobRegistry(queue.name, connection)
started_job_registry = StartedJobRegistry(queue.name, connection)
deferred_job_registry = DeferredJobRegistry(queue.name, connection)
queue_data['finished_jobs'] = len(finished_job_registry)
queue_data['started_jobs'] = len(started_job_registry)
queue_data['deferred_jobs'] = len(deferred_job_registry)
queues.append(queue_data)
# TODO: Right now the scheduler can run on multiple queues, but multiple
# queues can use the same connection. Either need to dedupe connections or
# split scheduled into its own queue, like failed.
#
# TODO: the real solution here is ditch allowing queues to have separate
# connections - make a single global connection and multiple queues are
# only separated by name. This will solve the multiple failed queue issue
# too. But was there a reason to allow multiple connections? Also, this
# will require some massive doc updates.
scheduled_jobs = []
scheduler_running = False
scheduler_installed = False
try:
from rq_scheduler import Scheduler
scheduler_installed = True
except ImportError:
pass
else:
connection = get_connection('default')
scheduler = Scheduler(connection=connection)
scheduled_jobs = scheduler.get_jobs(with_times=True)
# TODO: should expose this from rq-scheduler.
# TODO: this is really per-queue.
scheduler_running = (connection.exists(scheduler.scheduler_key) and
not connection.hexists(scheduler.scheduler_key, 'death'))
def get_job_graceful(worker):
if not worker:
return None
try:
return worker.get_current_job()
except NoSuchJobError:
return None
def job_serializer(job):
if not job:
return None
return {
'id': job.id,
'description': job.description,
'created_at': job.created_at,
'enqueued_at': job.enqueued_at,
'status': job.get_status(),
'func_name': job.func_name,
'args': job.args,
'kwargs': job.kwargs,
}
def scheduled_job_serializer(job):
# job is actually tuple of (job, datetime)
if not job:
return None
# TODO: job.origin is the scheduler queue originally used to schedule
# the job. Need to check if this is how the scheduler actually picks
# which queue to put the job into.
return {
'job': job_serializer(job[0]),
'runtime': job[1],
'queue': job[0].origin,
}
return {
'queues': queues,
'workers': [{
'name': worker.name,
'state': worker.get_state(),
'birth': worker.birth_date,
'queue_names': worker.queue_names(),
'job': job_serializer(get_job_graceful(worker)),
} for worker in list(set(workers))],
'scheduler_installed': scheduler_installed,
'scheduler_running': 'running' if scheduler_running else 'stopped',
'scheduled_jobs': [
scheduled_job_serializer(job) for job in scheduled_jobs
]
}
``` |
{
"source": "1024jp/atamaTracker",
"score": 3
} |
#### File: 1024jp/atamaTracker/track.py
```python
import os.path
import sys
from atamatracker.config import manager as config_manager
from atamatracker import gui, moviefile
from atamatracker.data import History, Track
from atamatracker.detector import PatternDetector
def setup(config):
"""initialize PatternDetector class.
"""
PatternDetector.pattern_size = (config.pattern_size, config.pattern_size)
PatternDetector.dx_range = (-config.find_buffer, config.find_buffer)
PatternDetector.dy_range = (-config.find_buffer, config.find_buffer)
def main(file_path):
# setup with config file
config_manager.load_config(file_path)
config = config_manager.config
setup(config)
# init variables
time = 0.0
last_time = None
last_index = -1
history = History()
# load movie file
movie = moviefile.Movie(file_path)
# open a window
file_name = os.path.basename(file_path)
window = gui.Window(file_name)
eventListener = gui.EventListener(window)
# process each frame
while True:
image = movie.load_image(time)
if image is None:
break
window.image = image
# auto-track points
if last_time is not None:
prev_image = movie.load_image(last_time)
if prev_image is None:
break
detector = PatternDetector(prev_image, image)
for last_track in history.tracks(time=last_time):
point = detector.detect(last_track.point)
if point:
history.append(Track(point, last_track.label, time))
window.draw_marker(point, config.pattern_size)
window.display()
# wait for mouse event
try:
clicked_points = eventListener.get_xy()
except gui.UserCancelException: # cancel with esc key
break
# append new tracks
for point in clicked_points:
last_index += 1
history.append(Track(point, last_index, time, is_manual=True))
last_time = time
time += config.time_step
window.close()
return history
if __name__ == "__main__":
result = main(sys.argv[1])
if len(sys.argv) > 2:
result.dump(sys.argv[2])
``` |
{
"source": "1024jp/LensCalibrator",
"score": 3
} |
#### File: LensCalibrator/modules/argsparser.py
```python
import argparse
import io
import logging
import os
import sys
try:
from . import __version__ as version
except ImportError:
version = 'n/a'
class Parser(argparse.ArgumentParser):
description = 'Translate coordinates in a picture to the real world.'
datafile_name = 'source'
def __init__(self):
argparse.ArgumentParser.__init__(self, description=self.description)
self.init_arguments()
def init_arguments(self):
"""Setup arguments of command.
"""
# argument
self.add_argument('file',
type=argparse.FileType('rb'),
metavar='FILE',
nargs='?',
help="path to source file"
)
# define options
self.add_argument('--version',
action='version',
version=version
)
self.add_argument('-t', '--test',
action='store_true',
default=False,
help="test the program"
)
self.add_argument('-v', '--verbose',
action='store_true',
default=False,
help="display debug info to standard output"
" (default: %(default)s)"
)
output = self.add_argument_group('output options')
output.add_argument('--out',
type=argparse.FileType('w'),
default=sys.stdout,
metavar='FILE',
help="path to output file"
" (default: display to standard output)"
)
input_ = self.add_argument_group('input options')
input_.add_argument('--location',
type=str,
default=None,
metavar='FILE',
help="path to location file"
" (default: Localiton.csv in the same"
" directory of source file)"
)
input_.add_argument('--camera',
type=argparse.FileType('rb'),
default=None,
metavar='FILE',
help="path to camera model file for undistortion"
" (default: points in source file are used)"
)
# format values
fileformat = self.add_argument_group('format options')
fileformat.add_argument('--size',
type=int,
nargs=2,
default=(3840, 2160),
metavar=('WIDTH', 'HEIGHT'),
help=("dimension of the image"
" (default: %(default)s)")
)
fileformat.add_argument('--in_cols',
type=int,
nargs=2,
default=[2, 3],
metavar='INDEX',
help=("column positions of x, y in file "
" (default: %(default)s)")
)
fileformat.add_argument('--z_col',
type=int,
default=None,
metavar='INDEX',
help=("column position of z in file "
" (default: %(default)s)")
)
fileformat.add_argument('--out_cols',
type=int,
nargs=2,
default=None,
metavar='INDEX',
help=("column positions of x, y in file for"
" calibrated data"
" (default: same as in_cols)")
)
@property
def datafile(self):
"""Path to data file.
Return:
datafile (file) -- File to process, given as a command argument.
"""
args = super(Parser, self).parse_args()
return args.file
def parse_args(self, **kwargs):
"""Parse command arguments and return.
Return:
args (namespace) -- a simple object holding command arguments.
"""
args = super(Parser, self).parse_args(**kwargs)
if len(kwargs) > 0:
return args
if not args.test and not args.file:
self.error('This script requires a path to a {} file.\n'.format(
self.datafile_name))
# size to tuple instead of list
args.size = tuple(args.size)
# set logging level
if args.verbose:
logging.basicConfig(level=logging.DEBUG,
format='[%(levelname)s] %(module)s -'
'%(message)s (%(relativeCreated)4dms)')
display(args)
return args
def display(args):
"""Display input arguments for test use.
"""
print('[arguments]')
for key, arg in vars(args).items():
if isinstance(arg, io.IOBase):
arg = arg.name
print(' {:10s} {}'.format(key + ':', arg))
if __name__ == "__main__":
parser = Parser()
display(parser.parse_args())
``` |
{
"source": "1024sparrow/traliva_platforms",
"score": 2
} |
#### File: django_project/root_app/views.py
```python
from django.shortcuts import render
#from django.http import HttpResponse
# Create your views here.
def index_html(p_request):
#return HttpResponse('I am Root.')
return render(p_request, 'root_app/index.html', {}, content_type='text/html')
def robots_txt(p_request):
return render(p_request, 'root_app/robots.txt', {}, content_type="text/plain")
``` |
{
"source": "1024sparrow/traliva",
"score": 3
} |
#### File: build_scripts/utils/js__map.py
```python
import sys, re
def get_map(pin_js_paths, pin_css_paths, pout_js, pout_css, pout_js_css):
for i_src in [(pin_js_paths, pout_js), (pin_css_paths, pout_css)]:
for i in i_src[0]:
with open(i) as f:
cand = {
'filepath': i,
'text': _get_text_as_array(f.readlines(), True, True)
}
i_src[1].append(cand)
pout_js_css.append(cand)
print('get_map()')
#print('pout_js_css: ', pout_js_css)##
def apply_map(p_js, p_css, p_js_css):
print('apply_map()')
for i in p_js_css:
#print('#%s:' % i['filepath'])
if i['filepath'] is None:
continue
cand = ''
for i_text in i['text']:
cand += i_text['text']
#print(cand)
f = open(i['filepath'], 'w')
f.write(cand)
f.close()
#def process_code_fragment(p_code):
# retval = '>>>>' + p_code + '<<<<'
# #retval = 'XXXX'
# return retval
# p_text - массив отдельных строк
# Должен вернуть массив фрагментов с указанием их типов (0 - комментарий, 1 - код, 2 - содержимое строки)
# [
# {
# type: 1,
# text: 'do_some();\nconsole.log(\''
# },
# {
# type: 2,
# text: 'hello world'
# },
# {
# type: 1,
# text: '\');'
# },
# {
# type: 0,
# text: '//некий комментарий'
# },
# ]
re_one_line_comment = re.compile(r'//.*', re.DOTALL)
def _get_text_as_array(p_text, pp_comment, pp_newlines):
global __type
global __buffer
___type = None
__buffer = ''
retval = []
if not pp_newlines:
pp_comment = False
use_strict_used = False
a = ''
usestrict_pos = None
for line in p_text:
stripline = line.strip()
if not use_strict_used:
if stripline.startswith("'use strict'") or stripline.startswith('"use strict"'):
usestrict_pos = len(a)
a += '#' # любой символ. В результат он не попадёт.
use_strict_used = True
continue
if pp_comment:
a += line
else:
if not pp_newlines:
line_cand = line.strip()
a += re.sub(re_one_line_comment, '', line_cand)
#b = ''
in_comment_1 = False # // ...
in_comment_2 = False # /* ... */
in_comment = False
in_string_1 = False # '
in_string_2 = False # "
in_string_3 = False # ``
string_type = 0 # for in_string_3
#string_content = [] # for in_string_3
#string_state = 0 # for in_string_3
#string_indent = 0 # for in_string_3
"""
`` - тупое экранирование. Сохраняются переносы строки и все символы между '`'
`
asd
` --> '\n\t\tasd\n\t'
1`` - как ``, но дополнительно обрезаются первая и последняя строки
1`
asd
` --> '\t\tasd'
2`` - как 1``, но дополнительно убираются отступы. Вычисляется наибольший общий отступ, и он отрезается. Отступы работают только с пробелами - символ табуляции не считается за отступ.
var a = 2`
var a =
5;
`; --> var a ='var a =\n\t5;';
3`` - убираются крайние пробельные символы и все переносы строки. Если последний символ в строке отличен от '>' и первый символ следующей строки отличен от '<', то в результат вставляется пробел. Первая и последняя строки не обрезаются (так, если что..).
var a = 3`
<table>
<tr>
</tr>
<tr>
</tr>
</table>
` --> var a = '<table><tr></tr><tr></tr></table>'
"""
in_string = False
prev_char = 's' # nor '\\' or '/' or '*'
code_cand = ''
counter = 0
for i in a:
if not (counter is None):
if counter == usestrict_pos:
t = __buffer + code_cand
if __buffer:
retval.append({
'type': __type,
'text': __buffer
})
__buffer = ''
if code_cand:
retval.append({
'type': 1,
'text': code_cand
})
code_cand = ''
retval.append({
'type': 1,
'text': "\n'use strict';\n"
})
__type = 1
counter += 1
continue
counter += 1
skip_current = False
if (not in_comment) and (not in_string) and prev_char == '/' and i == '/':
if len(code_cand) > 0:
code_cand = code_cand[:-1]
#b += process_code_fragment(code_cand) + '/'
_accumulate_array_by_symbols(1, code_cand, retval)
_accumulate_array_by_symbols(0, '/', retval)
code_cand = ''
in_comment_1 = True
in_comment = True
elif in_comment_1 and i == '\n':
if not in_comment_2:
in_comment_1 = False
in_comment = False
elif prev_char == '/' and i == '*':
if not in_comment_1:
if len(code_cand) > 0:
code_cand = code_cand[:-1]
#b += process_code_fragment(code_cand) + '/'
_accumulate_array_by_symbols(1, code_cand, retval)
code_cand = ''
in_comment_2 = True
in_comment = True
if pp_comment:
_accumulate_array_by_symbols(0, '/', retval)
#if not pp_comment:
# b = b[:-1] # удаляем предыдущий символ ('/')
elif prev_char == '*' and i == '/':
if not in_comment_1:
in_comment_2 = False
in_comment = False
skip_current = True
elif prev_char == '\\' and i == '\\':
prev_char = 's'
#b += i
_accumulate_array_by_symbols(__type, i, retval)
continue
elif prev_char != '\\' and i == '"':
if not in_comment and not in_string_1 and not in_string_3:
if in_string:
if in_string_2:
in_string_2 = False
else:
in_string_1 = False
in_string_3 = False
in_string = False
else:
#b += process_code_fragment(code_cand + '"')
skip_current = True
_accumulate_array_by_symbols(1, code_cand + '"', retval)
skip_current = True
code_cand = ''
in_string_2 = True
in_string = True
elif prev_char != '\\' and i == "'":
if not in_comment and not in_string_2 and not in_string_3:
if in_string:
if in_string_1:
in_string_1 = False
else:
in_string_2 = False
in_string_3 = False
in_string = False
else:
#b += process_code_fragment(code_cand + "'")
skip_current = True
_accumulate_array_by_symbols(1, code_cand + "'", retval)
skip_current = True
code_cand = ''
in_string_1 = True
in_string = True
elif prev_char != '\\' and i == "`":
if not in_comment and not in_string_1 and not in_string_2:
if in_string:
#skip_current = True
if in_string_3:
#in_string_3 = False
if string_type == 0 or string_type == 3:
tmp = string_content
else:
tmp = string_content[1:-1] # обрезаем первую и последнюю строки
if string_type == 2:
indent = 10000
for ca in tmp:
cand = 0
for ca_i in ca:
if ca_i == ' ':
cand += 1
else:
break
if cand < indent:
indent = cand
if string_type == 3:
prev = 'q' # any letter symbol
tmp_between_parath = False
for ca in [tmp2.strip() for tmp2 in tmp]:
if len(ca) and len(prev) and prev[-1] != '>' and ca[0] != '<':
_accumulate_array_by_symbols(2, ' ', retval)
tmp_between_parath = False
else:
tmp_between_parath = True
cand = ca
if tmp_between_parath:
while len(cand) and cand[0] == ' ':
cand = cand[1:]
_accumulate_array_by_symbols(2, ca, retval)
prev = ca
else:
for ca in tmp:
if string_type == 2:
cand = ca[indent:]
else:
cand = ca
_accumulate_array_by_symbols(2, cand, retval)
else:
in_string_1 = False
in_string_2 = False
_accumulate_array_by_symbols(1, code_cand + "'", retval)
in_string = False
else:
skip_current = True
#print('::',prev_char,'::::::::::', code_cand)
in_string_3 = True
in_string = True
string_type = 0
string_content = ['']
string_state = 0
string_indent = 0
if prev_char == '1':
string_type = 1
code_cand = code_cand[:-1]
elif prev_char == '2':
string_type = 2
code_cand = code_cand[:-1]
elif prev_char == '3':
string_type = 3
code_cand = code_cand[:-1]
_accumulate_array_by_symbols(1, code_cand + "'", retval)
code_cand = ''
if (not in_comment) and (not skip_current):
if in_string:
if in_string_3:
if i == '\n':
string_content.append('')
else:
ca = i
if i == "'":
ca = '\\\''
string_content[-1] += ca
else:
#b += i
_accumulate_array_by_symbols(2, i, retval)
else:
if in_string_3:
#_accumulate_array_by_symbols(1, "'", retval)
#code_cand += "'"
in_string_3 = False
else:
code_cand += i
else: # комментарии /* ... */
if not in_string:
if pp_comment:
#b += i
_accumulate_array_by_symbols(0, i, retval)
prev_char = i
prev_instring = in_string
#b += process_code_fragment(code_cand)
_accumulate_array_by_symbols(1, code_cand, retval)
_stop_accumulating_array_by_symbols(retval)
return retval
__buffer = ''
__type = None
def _accumulate_array_by_symbols(pin_type, pin_fragment, pout_target):
global __buffer
global __type
if len(pin_fragment) > 0:
if pin_type == __type:
__buffer += pin_fragment
else:
if __buffer:
pout_target.append({
'type': __type,
'text': __buffer
})
__type = pin_type
__buffer = pin_fragment
def _stop_accumulating_array_by_symbols(pout_target):
global __buffer
global __type
if __buffer:
pout_target.append({
'type': __type,
'text': __buffer
})
__buffer = ''
__type = None
``` |
{
"source": "1026295417/edge-ml",
"score": 3
} |
#### File: 1026295417/edge-ml/CIFAR10_Inference_EdgeTPU.py
```python
import argparse
import io
import time
from datetime import datetime
import numpy as np
from PIL import Image, ImageDraw, ImageFont
from edgetpu.classification.engine import ClassificationEngine
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'--model', help='File path of Tflite model.', required=True)
parser.add_argument(
'--image', help='File path of file.', required=True)
args = parser.parse_args()
label_names = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
img = Image.open(args.image)
#np_image = np.array(img)
# Load Engine
engine = ClassificationEngine(args.model)
lap_time = time.time()
# Run inference.
for result in engine.ClassifyWithImage(img, top_k=3):
print ('---------------------------')
print (label_names[result[0]])
print ('Score : ', result[1])
previous_time = lap_time
lap_time = time.time()
print("Elapsed time for the last inference: ", lap_time - previous_time)
if __name__ == '__main__':
main()
``` |
{
"source": "1028sqnatc/aircraftmath",
"score": 3
} |
#### File: aircraftmath/src/example.py
```python
import math
# https://docs.python.org/2/library/math.html
def myFunc(x,a,b,c):
d = (a * math.pow(x,2) ) + (b * x) + (c)
return d
def addAll(x,a,b,c):
return x + a + b + c
``` |
{
"source": "1029127253/Product-Title-Classification",
"score": 3
} |
#### File: Product-Title-Classification/classifier/facebook-fasttext.py
```python
import fastText
from fastText import train_supervised
from fastText import load_model
import logging
logging.basicConfig(format='%(asctime)s : %(levelname)%s : %(message)s',level=logging.INFO)
def print_results(N, p, r):
print("N\t" + str(N))
print("P@{}\t{:.3f}".format(1, p))
print("R@{}\t{:.3f}".format(1, r))
valid_data='/content/drive/drive/爬虫/data/train-test.txt'
classifier=train_supervised('/content/drive/drive/爬虫/data/train-train.txt',loss='hs',minCount=5,thread=8,dim=300,wordNgrams=2,epoch=5)
classifier.save_model('/content/drive/drive/爬虫/ckpt-facebook/fasttext.model')
classifier=load_model('/content/drive/drive/爬虫/ckpt-facebook/fasttext.model')
print_results(*classifier.test(valid_data))
```
#### File: Product-Title-Classification/preprocess/convert-content.py
```python
from __future__ import print_function, unicode_literals
import pandas as pd
from collections import Counter
import re
def process(our_data):
our_data=our_data.lower()
return list(our_data)
def is_right(uchar):
if uchar >= u'\u4e00' and uchar <= u'\u9fa5':
return True
elif uchar >= u'\u0061' and uchar <= u'\u007a':
return True
else:
return False
def is_eng(word):
flag=True
for uchar in word:
if uchar < u'\u0061' or uchar > u'\u007a':
flag=False
return flag
def format_str(content):
content_str = ''
for i in content:
if is_right(i):
content_str = content_str +i
return content_str
import jieba
import jieba.posseg as pseg
#jieba.load_userdict("vocab-correct.txt")
'''
fin=open('plus-vocabs.txt')
for line in fin.readlines():
word=line.strip().split()[0]
jieba.add_word(word,100)
fin.close()
'''
#jieba.set_dictionary('vocab-correct.txt')
#dict_path='dict.txt'
#jieba.load_userdict(dict_path)
def fenci(datas):
#cut_words=nlp.tag(datas)
#return cut_words[0]['word']
cut_words=jieba.cut(datas,cut_all=False)
return cut_words
#os.chdir('data')
fcontent=open('test-content.txt','w')
fin=open('test-ubuntu.tsv')
readlines=fin.readlines()[1:]
for i in range(len(readlines)):
line=readlines[i]
if i%10000==0:
print (i)
content=line.strip()
result=[]
for part in re.split(r'[:-]',content):
for word in part.split():
result.extend(fenci(format_str(process(word))))
if len(result)==0:
print (i)
fcontent.write(' '.join(result)+'\n')
fcontent.close()
``` |
{
"source": "1029153958/TrumpTwitterAnalysis-1",
"score": 4
} |
#### File: 1029153958/TrumpTwitterAnalysis-1/MostUsedWords.py
```python
def getWord():
def normal(sentence,val=" "):
sentence=sentence.lower()
replace=["\n",":",".","/","\"","@","'","-","“","”","!"]
for word in replace:
sentence=sentence.replace(word,val)
return sentence
frequency = {}
NotInterestWord=["","the","to","realdonaldtrump","a","is","i","of","in","and",
"you","for","on","com","be","http","s","trump","will","that",
"are","at","with","it","have","&","my","your","t","this","he",
"twitter","by","not","can","so","from","what","as","if","do",
"about","would","very","www","who","an","u","we","our","was",
"all","me","just","they","all","ly","get","should","https",
"am","over","re","their","go","being","want","or","she","day",
'his','out','donald','thank','thanks','people','new','like',
'has','no','now','run','one','more','make','up','when','today',
'us','how','why','only','m','need','going','pic','never','again',
'see','true','back','than','great','bit','time','big','but',
'don','best','via','love','think','vote','show','there','mr',
'last','been','him','much','really','must','watch','good',
'had','did','please','amazing','many','know','right','them',
'were','way','win','doing','\xa0…','ever','her','always',
'better','man','because','said','keep','years','hope','year',
'needs','deal','country','first','tonight','news','night',
'status','president','p','looking','even','business','apprenticenbc',
'obamacare','bad','poll','nice','donaldtrump','happy',
'national',]
with open("./single/content.csv","r",encoding="utf-8") as f:
line=f.readline()
while(line!=""):
words=normal(line).split(" ")
for word in words:
if(word not in NotInterestWord):
if(word in frequency):
frequency[word]+=1
else:
frequency[word]=1
line=f.readline()
return frequency
frequency=getWord()
dict= sorted(frequency.items(), key=lambda d:d[1], reverse = True)
for i in range(20):
print(dict[i])
```
#### File: 1029153958/TrumpTwitterAnalysis-1/SplitData.py
```python
LENGTH=34737
def getFavouriteNum():
with open("./data/data.csv","r",encoding="utf-8") as O:
with open("./single/favourite.csv","w",encoding="utf-8") as I:
for i in range(LENGTH):
line=O.readline()
if(line!=[]):
single=line.split(",")
I.write(single[-1])
def getRetweetsNum():
with open("./data/data.csv","r",encoding="utf-8") as O:
with open("./single/retweets.csv","w",encoding="utf-8") as I:
for i in range(LENGTH):
line=O.readline()
if(line!=[]):
try:
single=line.split(",")
I.write(single[-2]+"\n")
except IndexError:
print(i)
def getTime():
with open("./data/data.csv","r",encoding="utf-8") as O:
with open("./single/time.csv","w",encoding="utf-8") as I:
for i in range(LENGTH):
line=O.readline()
if(line!=[]):
try:
single=line.split(",")
I.write(single[-3]+"\n")
except IndexError:
print(i)
def getContent():
with open("./data/data.csv","r",encoding="utf-8") as O:
with open("./single/content.csv","w",encoding="utf-8") as I:
for i in range(LENGTH):
line=O.readline()
if(line!=[]):
try:
single=line.split(",")
I.write(single[1]+"\n")
except IndexError:
print(i)
getFavouriteNum()
getRetweetsNum()
getTime()
getContent()
``` |
{
"source": "102/sign",
"score": 3
} |
#### File: 102/sign/rsa.py
```python
import random
import util
from functools import reduce
from functools import partial
from collections import deque
from math import log, ceil
to_bin = partial(int, base=2)
to_hex = partial(int, base=16)
class Key(object):
n = 0
def chunk_size(self):
return len('{:0b}'.format(self.n)) - 1
class PublicKey(Key):
def __init__(self, e, n):
self.e = e
self.n = n
def __repr__(self):
return '{:0x}:{:1x}'.format(self.e, self.n)
@classmethod
def fromstring(cls, string):
e, n = string.split(':')
return cls(to_hex(e), to_hex(n))
def __encrypt_chunk(self, message):
return util.power(message, self.e, self.n)
def encrypt(self, message):
chunk_size = self.chunk_size()
chunk_map = reduce(lambda x, y: 2 ** y + x, range(0, chunk_size), 0)
message = reduce(lambda acc, byte: (acc << 8) + byte, message, 0)
result = 0
for i in range(0, ceil(log(message, 2)), chunk_size):
m = self.__encrypt_chunk(message & chunk_map)
message >>= chunk_size
result = (result << chunk_size + 1) + m
d = deque()
while result:
d.appendleft(result & 0xff)
result >>= 8
return bytearray(d)
class PrivateKey(Key):
def __init__(self, d, n):
self.d = d
self.n = n
def __repr__(self):
return '{:0x}:{:1x}'.format(self.d, self.n)
@classmethod
def fromstring(cls, string):
d, n = string.split(':')
return cls(to_hex(d), to_hex(n))
def __decrypt_chunk(self, message):
return util.power(message, self.d, self.n)
def decrypt(self, message):
chunk_size = self.chunk_size() + 1
chunk_map = reduce(lambda x, y: 2 ** y + x, range(0, chunk_size), 0)
message = reduce(lambda acc, byte: (acc << 8) + byte, message, 0)
result = 0
for i in range(0, ceil(log(message, 2)), chunk_size):
m = self.__decrypt_chunk(message & chunk_map)
message >>= chunk_size
result = (result << chunk_size - 1) + m
d = deque()
while result:
d.appendleft(result & 0xff)
result >>= 8
return bytearray(d)
def get_key_pair(length):
length /= 2
def get_e(phi):
def gcd(a, b):
while b:
a, b = b, a % b
return a
while True:
x = random.randint(3, phi)
if gcd(x, phi) == 1:
return x
p, q = util.get_primes(length)
n = p * q
phi = (p - 1) * (q - 1)
e = get_e(phi)
d = util.modular_inverse(e, phi)
return PublicKey(e, n), PrivateKey(d, n)
``` |
{
"source": "1031975582/stm32f746-tflite-micro-mnist",
"score": 3
} |
#### File: DSP/Testing/processTests.py
```python
import argparse
import TestScripts.NewParser as parse
import TestScripts.CodeGen
from collections import deque
# When deprecation is forced on some nodes
# we ensure that a parent of a valid node is also valid
def correctDeprecation(node):
current = node.data["deprecated"]
for c in node.children:
if not correctDeprecation(c):
current = False
node.data["deprecated"] = current
return(current)
def deprecateRec(root,others,deprecated):
if others:
newOthers=others.copy()
newOthers.popleft()
if root.kind == TestScripts.Parser.TreeElem.TEST:
if others[0].isdigit() and int(root.id) == int(others[0]):
root.data["deprecated"]=False
for c in root.children:
deprecateRec(c,newOthers,False)
else:
root.data["deprecated"]=True
for c in root.children:
deprecateRec(c,others,deprecated)
else:
if root.data["class"] == others[0]:
root.data["deprecated"]=False
for c in root.children:
deprecateRec(c,newOthers,False)
else:
root.data["deprecated"]=deprecated
for c in root.children:
deprecateRec(c,others,deprecated)
def deprecate(root,others):
if others:
deprecateRec(root,deque(others),True)
correctDeprecation(root)
parser = argparse.ArgumentParser(description='Parse test description')
parser.add_argument('-f', nargs='?',type = str, default="test.txt", help="File path")
parser.add_argument('-p', nargs='?',type = str, default="Patterns", help="Pattern dir path")
parser.add_argument('-d', nargs='?',type = str, default="Parameters", help="Parameter dir path")
# -e true when no semihosting
# Input is include files
# Output is only one stdout
# So the .h for include files need to be generated.
parser.add_argument('-e', action='store_true', help="Embedded test")
parser.add_argument('others', nargs=argparse.REMAINDER)
args = parser.parse_args()
if args.f is not None:
# Create a treeelemt object
p = parse.Parser()
# Create a codegen object
c = TestScripts.CodeGen.CodeGen(args.p,args.d, args.e)
# Parse the test description.
root = p.parse(args.f)
deprecate(root,args.others)
print(root)
# Generate code with the tree of tests
c.genCodeForTree(root)
else:
parser.print_help()
``` |
{
"source": "1032998/LM2",
"score": 2
} |
#### File: layers/transformers/relative_transformer.py
```python
import math
import torch
import torch.nn.functional as F
from torch import Tensor, nn
from hanlp.common.structure import ConfigTracker
class RelativeSinusoidalPositionalEmbedding(nn.Module):
"""This module produces sinusoidal positional embeddings of any length.
Padding symbols are ignored.
Args:
embedding_dim: embedding size of each position
padding_idx:
Returns:
"""
def __init__(self, embedding_dim, padding_idx, init_size=1024):
super().__init__()
self.embedding_dim = embedding_dim
self.padding_idx = padding_idx
assert init_size % 2 == 0
weights = self.get_embedding(
init_size + 1,
embedding_dim,
padding_idx,
)
self.register_buffer('weights', weights)
self.register_buffer('_float_tensor', torch.as_tensor(1))
def get_embedding(self, num_embeddings, embedding_dim, padding_idx=None):
"""Build sinusoidal embeddings.
This matches the implementation in tensor2tensor, but differs slightly
from the description in Section 3.5 of "Attention Is All You Need".
Args:
num_embeddings:
embedding_dim:
padding_idx: (Default value = None)
Returns:
"""
half_dim = embedding_dim // 2
emb = math.log(10000) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, dtype=torch.float) * -emb)
emb = torch.arange(-num_embeddings // 2, num_embeddings // 2, dtype=torch.float).unsqueeze(1) * emb.unsqueeze(0)
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(num_embeddings, -1)
if embedding_dim % 2 == 1:
# zero pad
emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1)
if padding_idx is not None:
emb[padding_idx, :] = 0
self.origin_shift = num_embeddings // 2 + 1
return emb
def forward(self, inputs: Tensor):
"""Input is expected to be of size [bsz x seqlen].
Args:
inputs: Tensor:
Returns:
"""
bsz, seq_len = inputs.size()
max_pos = self.padding_idx + seq_len
if max_pos > self.origin_shift:
# recompute/expand embeddings if needed
weights = self.get_embedding(
max_pos * 2,
self.embedding_dim,
self.padding_idx,
)
weights = weights.to(self._float_tensor)
del self.weights
self.origin_shift = weights.size(0) // 2
self.register_buffer('weights', weights)
positions = torch.arange(-seq_len, seq_len).to(inputs.device).long() + self.origin_shift # 2*seq_len
embed = self.weights.index_select(0, positions.long()).detach()
return embed
class RelativeMultiHeadAttn(nn.Module):
def __init__(self, in_features, num_heads, dropout, r_w_bias=None, r_r_bias=None, init_seq_length=1024,
k_as_x=True):
"""
Args:
in_features:
num_heads:
dropout:
r_w_bias: n_head x head_dim or None
r_r_bias: n_head x head_dim or None
init_seq_length:
k_as_x:
"""
super().__init__()
self.k_as_x = k_as_x
if k_as_x:
self.qv_linear = nn.Linear(in_features, in_features * 2, bias=False)
else:
self.qkv_linear = nn.Linear(in_features, in_features * 3, bias=False)
self.n_head = num_heads
self.head_dim = in_features // num_heads
self.dropout_layer = nn.Dropout(dropout)
self.pos_embed = RelativeSinusoidalPositionalEmbedding(self.head_dim, 0, init_seq_length)
if r_r_bias is None or r_w_bias is None: # Biases are not shared
self.r_r_bias = nn.Parameter(nn.init.xavier_normal_(torch.zeros(num_heads, in_features // num_heads)))
self.r_w_bias = nn.Parameter(nn.init.xavier_normal_(torch.zeros(num_heads, in_features // num_heads)))
else:
self.r_r_bias = r_r_bias # r_r_bias就是v
self.r_w_bias = r_w_bias # r_w_bias就是u
def forward(self, x, mask):
"""
Args:
x: batch_size x max_len x d_model
mask: batch_size x max_len
Returns:
"""
batch_size, max_len, d_model = x.size()
pos_embed = self.pos_embed(mask) # l x head_dim
if self.k_as_x:
qv = self.qv_linear(x) # batch_size x max_len x d_model2
q, v = torch.chunk(qv, chunks=2, dim=-1)
k = x.view(batch_size, max_len, self.n_head, -1).transpose(1, 2)
else:
qkv = self.qkv_linear(x) # batch_size x max_len x d_model3
q, k, v = torch.chunk(qkv, chunks=3, dim=-1)
k = k.view(batch_size, max_len, self.n_head, -1).transpose(1, 2)
q = q.view(batch_size, max_len, self.n_head, -1).transpose(1, 2)
v = v.view(batch_size, max_len, self.n_head, -1).transpose(1, 2) # b x n x l x d
rw_head_q = q + self.r_r_bias[:, None]
AC = torch.einsum('bnqd,bnkd->bnqk', [rw_head_q, k]) # b x n x l x d, n是head
D_ = torch.einsum('nd,ld->nl', self.r_w_bias, pos_embed)[None, :, None] # head x 2max_len, 每个head对位置的bias
B_ = torch.einsum('bnqd,ld->bnql', q, pos_embed) # bsz x head x max_len x 2max_len,每个query对每个shift的偏移
E_ = torch.einsum('bnqd,ld->bnql', k, pos_embed) # bsz x head x max_len x 2max_len, key对relative的bias
BD = B_ + D_ # bsz x head x max_len x 2max_len, 要转换为bsz x head x max_len x max_len
if self.k_as_x:
BD = self._shift(BD)
attn = AC + BD
else:
BDE = self._shift(BD) + self._transpose_shift(E_)
attn = AC + BDE
attn = attn.masked_fill(mask[:, None, None, :].eq(0), float('-inf'))
attn = F.softmax(attn, dim=-1)
attn = self.dropout_layer(attn)
v = torch.matmul(attn, v).transpose(1, 2).reshape(batch_size, max_len, d_model) # b x n x l x d
return v
def _shift(self, BD):
"""类似
-3 -2 -1 0 1 2
-3 -2 -1 0 1 2
-3 -2 -1 0 1 2
转换为
0 1 2
-1 0 1
-2 -1 0
Args:
BD: batch_size x n_head x max_len x 2max_len
Returns:
batch_size x n_head x max_len x max_len
"""
bsz, n_head, max_len, _ = BD.size()
zero_pad = BD.new_zeros(bsz, n_head, max_len, 1)
BD = torch.cat([BD, zero_pad], dim=-1).view(bsz, n_head, -1, max_len) # bsz x n_head x (2max_len+1) x max_len
BD = BD.narrow(dim=2, start=0, length=2 * max_len) \
.view(bsz, n_head, max_len, -1) # bsz x n_head x 2max_len x max_len
BD = BD.narrow(dim=-1, start=max_len, length=max_len)
return BD
def _transpose_shift(self, E):
"""类似
-3 -2 -1 0 1 2
-30 -20 -10 00 10 20
-300 -200 -100 000 100 200
转换为
0 -10 -200
1 00 -100
2 10 000
Args:
E: batch_size x n_head x max_len x 2max_len
Returns:
batch_size x n_head x max_len x max_len
"""
bsz, n_head, max_len, _ = E.size()
zero_pad = E.new_zeros(bsz, n_head, max_len, 1)
# bsz x n_head x -1 x (max_len+1)
E = torch.cat([E, zero_pad], dim=-1).view(bsz, n_head, -1, max_len)
indice = (torch.arange(max_len) * 2 + 1).to(E.device)
E = E.index_select(index=indice, dim=-2).transpose(-1, -2) # bsz x n_head x max_len x max_len
return E
class RelativeTransformerLayer(nn.Module):
def __init__(self,
in_features,
num_heads=4,
feedforward_dim=256,
dropout=0.2,
dropout_attn=None,
after_norm=True,
k_as_x=True,
init_seq_length=1024):
super().__init__()
if dropout_attn is None:
dropout_attn = dropout
self.after_norm = after_norm
self.norm1 = nn.LayerNorm(in_features)
self.norm2 = nn.LayerNorm(in_features)
self.self_attn = RelativeMultiHeadAttn(in_features,
num_heads,
dropout=dropout_attn,
init_seq_length=init_seq_length,
k_as_x=k_as_x)
self.ffn = nn.Sequential(nn.Linear(in_features, feedforward_dim),
nn.LeakyReLU(),
nn.Dropout(dropout, inplace=True),
nn.Linear(feedforward_dim, in_features),
nn.Dropout(dropout, inplace=True))
def forward(self, x, mask):
"""
Args:
x: batch_size x max_len x hidden_size
mask: batch_size x max_len, 为0的地方为pad
Returns:
batch_size x max_len x hidden_size
"""
residual = x
if not self.after_norm:
x = self.norm1(x)
x = self.self_attn(x, mask)
x = x + residual
if self.after_norm:
x = self.norm1(x)
residual = x
if not self.after_norm:
x = self.norm2(x)
x = self.ffn(x)
x = residual + x
if self.after_norm:
x = self.norm2(x)
return x
class RelativeTransformer(nn.Module):
def __init__(self,
in_features,
num_layers,
feedforward_dim,
num_heads,
dropout,
dropout_attn=None,
after_norm=True,
init_seq_length=1024,
k_as_x=True):
super().__init__()
self.layers = nn.ModuleList([
RelativeTransformerLayer(in_features, feedforward_dim, num_heads, dropout, dropout_attn, after_norm,
init_seq_length=init_seq_length, k_as_x=k_as_x)
for _ in range(num_layers)
])
def forward(self, x: Tensor, mask: Tensor):
"""
Args:
x: batch_size x max_len
mask: batch_size x max_len. 有value的地方为1
x: Tensor:
mask: Tensor:
Returns:
"""
for layer in self.layers:
x = layer(x, mask)
return x
class RelativeTransformerEncoder(RelativeTransformer, ConfigTracker):
def __init__(self,
in_features,
num_layers=2,
num_heads=4,
feedforward_dim=256,
dropout=0.1,
dropout_attn=0.1,
after_norm=True,
k_as_x=True,
):
super().__init__(in_features, num_layers, num_heads, feedforward_dim, dropout, dropout_attn, after_norm)
ConfigTracker.__init__(self, locals())
def get_output_dim(self):
return self.config['in_features']
``` |
{
"source": "1033020837/CRF4Torch",
"score": 3
} |
#### File: 1033020837/CRF4Torch/train.py
```python
import torch
import torch.nn as nn
import torch.optim as optim
import os
import numpy as np
from torch.utils import data
from models import *
from utils import *
from config import *
from sklearn.metrics import *
# 日志模块
import logging
logging.basicConfig(level = logging.INFO,format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
# 训练函数
def train(model, iterator, optimizer, epoch):
model.train()
losses = [] # 存储loss
for i, batch in enumerate(iterator):
x, y, seqlens, masks = batch
x = x.to(device)
y = y.to(device)
masks = masks.to(device)
optimizer.zero_grad()
loss = model(x, masks, y, training=True)
loss.backward()
optimizer.step()
losses.append(loss.item())
if i%output_loss_freq==0:
logger.info(f"iteration:{epoch} of {n_epochs}, step: {i}/{len(iterator)}, NER loss: {np.mean(losses):.6f}")
losses = []
# 验证及测试函数
def eval(model, iterator):
model.eval()
y_true, y_pred = [], []
phrases_count = 0
with torch.no_grad():
for i, batch in enumerate(iterator):
x, y, seqlens, masks = batch
x = x.to(device)
masks = masks.to(device)
_, y_hat = model(x, masks, training=False)
for i,seqlen in enumerate(seqlens):
phrases_count += 1
y_true.extend(y[i,1:seqlen-1].tolist())
y_pred.extend(y_hat[i,1:seqlen-1].tolist())
y_true, y_pred = np.array(y_true), np.array(y_pred)
logger.info(f'processed {len(y_true)} tokens with {phrases_count} phrases;')
acc,p,r,f1 = accuracy_score(y_true,y_pred),precision_score(y_true,y_pred,average='macro'), \
recall_score(y_true,y_pred,average='macro',zero_division=0),f1_score(y_true,y_pred,average='macro')
logger.info(f'accuracy: {acc:.4f}, precision: {p:.4f}, recall: {r:.4f}, f1: {f1:.4f}')
for idx,tag in idx2tag.items():
if tag in [START_SYMBOL,END_SYMBOL]:
continue
tp = np.sum(y_pred[y_true == idx] == idx)
fp = np.sum(y_true[y_pred == idx] != idx)
fn = np.sum(y_pred[y_true == idx] != idx)
_p = tp / (tp+fp)
_r = tp / (tp+fn)
if _p == 0 and _r ==0:
_f1 = 0
else:
_f1 = 2*_p*_r/(_p+_r)
logger.info(f'{tag}: precision: {_p:.4f}, recall: {_r:.4f}, f1: {_f1:.4f}')
return p,r,f1
if __name__=="__main__":
# 使用cuda但是cuda获取不到
if use_cuda and not torch.cuda.is_available():
raise Exception('You choose use cuda but cuda is not available.')
os.makedirs(output_dir,exist_ok=True) # 创建输出目录
model = BertLstmCRF().to(device)
model_save_path = os.path.join(output_dir, 'model.pth')
logger.info('Initial model Done')
train_dataset = NerDataset(train_file)
eval_dataset = NerDataset(dev_file)
test_dataset = NerDataset(test_file)
logger.info('Load Data Done')
train_iter = data.DataLoader(dataset=train_dataset,
batch_size=batch_size,
shuffle=True,
num_workers=4,
collate_fn=pad)
eval_iter = data.DataLoader(dataset=eval_dataset,
batch_size=batch_size,
shuffle=False,
num_workers=4,
collate_fn=pad)
test_iter = data.DataLoader(dataset=test_dataset,
batch_size=batch_size,
shuffle=False,
num_workers=4,
collate_fn=pad)
param_optimizer = model.named_parameters() # 模型参数
# 针对bert以及非bert部分设置不同的学习率
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if 'bert' in n], 'lr': bert_lr},
{'params': [p for n, p in param_optimizer if 'bert' not in n], 'lr': lr}
]
optimizer = optim.Adam(optimizer_grouped_parameters)
logger.info('Start Train...')
best_dev_f1 = 0
no_improve_epoch = 0 # 验证集F1没有提示的轮数
for epoch in range(1, n_epochs+1): # 每个epoch对dev集进行测试
train(model, train_iter, optimizer, epoch)
logger.info(f"evaluate at epoch={epoch}")
precision, recall, f1 = eval(model, eval_iter)
if f1 > best_dev_f1:
best_dev_f1 = f1
logger.info(f'new best dev f1: {f1:.4f}')
no_improve_epoch = 0
torch.save(model.state_dict(), model_save_path)
logger.info('model saved')
else:
no_improve_epoch += 1
if no_improve_epoch >= early_stop:
logger.info('Early stoping...')
break
logger.info('Train done, testing...')
precision, recall, f1 = eval(model, test_iter)
```
#### File: 1033020837/CRF4Torch/utils.py
```python
import os
import numpy as np
import torch
from torch.utils.data import Dataset
from transformers import BertTokenizer
from config import *
tokenizer = BertTokenizer.from_pretrained(bert_model)
# 填充、开始与结束标签,输入TOKEN与输出TAG均用这两个标签作为开始与结束
START_SYMBOL, END_SYMBOL = '[CLS]', '[SEP]'
# 所有tag
TAGS = (START_SYMBOL, END_SYMBOL, 'O', 'B-LOC', 'I-LOC', 'B-PER', 'I-PER', 'B-ORG', 'I-ORG', )
tag2idx = {tag: idx for idx, tag in enumerate(TAGS)} # 标签转索引
idx2tag = {idx: tag for idx, tag in enumerate(TAGS)} # 索引转标签
start_label_id = tag2idx[START_SYMBOL] # 开始标签的索引
end_label_id = tag2idx[END_SYMBOL] # 结束标签的索引
tagset_size = len(tag2idx) # 标签个数
MAX_LEN = max_len - 2 # 需要在句子首尾加上[CLS]和[SEP]符号,所以减去2
# 数据类
class NerDataset(Dataset):
def __init__(self, f_path):
# 读取数据,将句子与对应标注存入两个list
with open(f_path, 'r', encoding='utf-8') as fr:
entries = fr.read().strip().split('\n\n')
sents, tags_li = [], []
for entry in entries:
words = [line.split()[0] for line in entry.splitlines()]
tags = ([line.split()[-1] for line in entry.splitlines()])
sents.append(["[CLS]"] + words[:MAX_LEN] + ["[SEP]"])
tags_li.append(['[CLS]'] + tags[:MAX_LEN] + ['[SEP]'])
self.sents, self.tags_li = sents, tags_li
# 获取一个样本
def __getitem__(self, idx):
words, tags = self.sents[idx], self.tags_li[idx]
x = tokenizer.convert_tokens_to_ids(words)
y = [tag2idx[tag] for tag in tags]
assert len(x) == len(y)
seqlen = len(tags)
return x, y, seqlen
def __len__(self):
return len(self.sents)
def pad(batch):
'''
填充至batch内最大长度
'''
f = lambda x: [sample[x] for sample in batch]
seqlens = f(-1)
maxlen = np.array(seqlens).max()
f = lambda x, seqlen: [sample[x] + [0] * (seqlen - len(sample[x])) for sample in batch] # 0: <pad>
x = f(0, maxlen)
y = f(1, maxlen)
f = lambda x, seqlen: [[1] * len(sample[x]) + [0] * (seqlen - len(sample[x])) for sample in batch]
masks = f(1, max_len) # 掩码
f = torch.LongTensor
return f(x), f(y), seqlens, torch.FloatTensor(masks)
``` |
{
"source": "103360729/assignmenta",
"score": 4
} |
#### File: 103360729/assignmenta/Q4.py
```python
class Node(object):
def __init__(self, initdata):
self.data = initdata
self.next = None
def getData(self):
return self.data
def setData(self, newdata):
self.data = newdata
def getNext(self):
return self.next
def setNext(self, newnext):
self.next = newnext
class LinkedList(object):
def __init__(self):
self.head = None
def isEmpty(self):
return self.head == None
def addItem(self, items):
temp = Node(items)
temp.setNext(self.head)
self.head = temp
def count(self):
count = 0
current = self.head
while current != None:
count += 1
current = current.getNext()
return count
def search(self, item):
current = self.head
found = False
while not found and current!= None:
if current.getData() == item:
found = True
else:
current = current.getNext()
return found
def remove(self, item):
current = self.head
previous = None
found = False
while not found:
if current.getData() == item:
found = True
else:
previous = current
current = current.getNext()
if previous == None:
self.head = current.getNext()
else:
previous.setNext(current.getNext())
def append(self, item):
current = self.head
previous = None
while current != None:
previous = current
current = current.getNext()
temp = Node(item)
if previous == None:
self.head = temp
else:
previous.setNext(temp)
def insert(self, item, index):
count = 0
current = self.head
previous = None
while count != index:
count = count + 1
previous = current
current = current.getNext()
temp = Node(item)
if previous == None:
temp.setNext(current)
self.head = temp
else:
temp.setNext(current)
previous.setNext(temp)
def index(self, item):
index = 0
found = False
current = self.head
while not found and current != None:
if current.getData() == item:
found = True
else:
index += 1
current = current.getNext()
if found:
return index
else:
return -1
def pop(self, index):
currentindex = 0
current = self.head
previous = None
while currentindex != index:
previous = None
current = current.getNext()
currentindex += 1
if previous == None:
self.head = current.getNext()
return current.getData()
else:
previous.setNext(current.getNext())
return current.getData()
``` |
{
"source": "1034300/Classic-WoW-AH-Archiver",
"score": 3
} |
#### File: 1034300/Classic-WoW-AH-Archiver/subscriber.py
```python
from requests import Session
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
from typing import Tuple
from time import sleep
import sqlite3
import json
import schedule
# Base URL for the nexushub API
API = 'https://api.nexushub.co/wow-classic/v1'
# The server/faction market to pull data for
AH = 'earthshaker-horde'
class Subscriber:
def __init__(self, API: str, auction_house: str, database='ah.db'):
'''
API: string,
base url of API to pull data from
auction_house: string,
auction houseto pull data for, example: 'faerlina-alliance'
scan_id: integer,
unique id of the current market scan available on the API
scanned_at: string,
time of current market scan available on the API
'''
self.API = API
self.auction_house = auction_house
self.database = database
self.scan_id = 0
self.scanned_at = ""
retry_strategy = Retry(
# Configure an HTTPAdapter to retry/backoff
# when a network connection is unavailable (IP lease renewal etc)
# https://findwork.dev/blog/advanced-usage-python-requests-timeouts-retries-hooks/#retry_on_failure
total=10,
backoff_factor=1,
status_forcelist=[429, 500, 502, 503, 504],
method_whitelist=["GET"]
)
adapter = HTTPAdapter(max_retries=retry_strategy)
self.http = Session()
self.http.mount("https://", adapter)
self.http.mount("http://", adapter)
def latest(self):
''' [GET] Gets info on the the latest scan available
on the API for this auction house '''
# Make GET request
r = self.http.get(self.API + "/scans/latest/" + self.auction_house)
# Validate json response
try:
r_json = json.loads(r.text)
except json.JSONDecodeError as e:
print("JSONDecodeError: {}".format(e))
return
try:
if r_json['scanId'] != self.scan_id:
# New scan_id indicates new data available
print("[latest]:", str(r_json['scanId']), r_json['scannedAt'])
# Update state to reflect this
self.scan_id = r_json['scanId']
s = r_json['scannedAt']
# Update scanned_at state
self.scanned_at = s
except(KeyError):
print(r_json['error'], r_json['reason'])
return
def overview(self):
''' [GET] Retrieves a full overview (aggregated info
about all active auctions) of the auction house
and saves it to disk in ./data/overview/ '''
# Make GET request
r = self.http.get(self.API + "/items/" + self.auction_house)
# Load string JSON response into a dict
resp_dict = json.loads(r.text)
# Slug index contains WoW server name/faction pair
# ("earthshaker-horde")
slug = resp_dict['slug']
print("[overview/INSERT]: retrieved new overview from scan_id",
str(self.scan_id))
# Get a connection to the DB
conn = sqlite3.connect(self.database)
# Get a cursor
c = conn.cursor()
# For every anonymous dict in the parent dict
for item in resp_dict["data"]:
# Build a tuple of the data we want in left-to-right order
# (or as the columns are ordered in the table)
t = (
self.scanned_at,
self.scan_id,
int(item['itemId']),
item['marketValue'],
item['historicalValue'],
item['minBuyout'],
item['numAuctions'],
item['quantity'],
slug
)
# Construct an INSERT query into the overview table
sql = "INSERT INTO overview VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)"
# Execute the query, supplying the tuple containing the data
try:
c.execute(sql, t)
except Exception as e:
print("{}: {}".format(type(e), e))
# After all item's data have been inserted, commit our changes
conn.commit()
# Finally, close the connection.
conn.close()
def main():
# Create a new subscriber instance
subscriber = Subscriber(API, AH)
while True:
last_scan_id = subscriber.scan_id
last_scanned_at = subscriber.scanned_at
# Retrieve information about the latest/current data available on the API
subscriber.latest()
# If there's new data available (scan id and time changed), retrieve it
# Else do nothing (wait for new data)
if last_scan_id != subscriber.scan_id\
and last_scanned_at != subscriber.scanned_at:
subscriber.overview()
# Wait one minute
sleep(60)
if __name__ == '__main__':
main()
``` |
{
"source": "10382/can-pdu-datareader",
"score": 2
} |
#### File: 10382/can-pdu-datareader/monitorfun.py
```python
import serial
import time
import struct
import pandas as pd
# 有功电能清空函数
def clear_consum(ser):
h_consum_clr = b'\x01\x06\x00\x04\xaa\xbb\xf6\xd8'
l_consum_clr = b'\x01\x06\x00\x05\xaa\xbb\xa7\x18'
ser.write(h_consum_clr)
flag = 0
if ser.readline() == h_consum_clr:
flag = 1
ser.write(l_consum_clr)
if flag == 1 and ser.readline() == l_consum_clr:
print("用电量清零成功!")
else:
print("用电量清空失败!!!")
def get_data(ser, filename):
data_rec = b'\x00\x00'
while len(data_rec) != 17 or data_rec[0:2] != b'\x01\x03':
time_send = int(time.time())
# print(time.mktime(time.localtime()))
ser.write(b"\x01\x03\x00\x00\x00\x06\xc5\xc8")
# data_rec = ser.read(17)
data_rec = ser.readline()
# time_rec = time.time()
print(time_send)
while len(data_rec) < 17:
data_break = ser.readline()
if data_break == b'':
print("break!")
break
elif data_rec[-1:] == b'\n':
data_rec = data_rec + data_break
# print(data_rec)
voltage = struct.unpack('!I', b'\x00\x00' + data_rec[3:5])[0] / 10.0
current = struct.unpack('!I', b'\x00\x00' + data_rec[5:7])[0] / 100.0
power = struct.unpack('!I', b'\x00\x00' + data_rec[7:9])[0]
power_factor = struct.unpack('!I', b'\x00\x00' + data_rec[9:11])[0] / 1000.0
power_consum = struct.unpack('!L', data_rec[11:15])[0]
df = pd.DataFrame({ 'epoch' : time_send,
'voltage' : voltage,
'current' : current,
'power' : power,
'power_factor' : power_factor,
'power_consumption' : power_consum }, index=[0])
df.to_csv(filename, mode='a', header=False, index=False)
# print("电压:", voltage, "V")
# print("电流:", current, "A")
# print("功率:", power, "kW")
# print("功率因子:", power_factor)
# print("有功电能:", power_consum, "kWh")
``` |
{
"source": "103yiran/open-vot",
"score": 3
} |
#### File: lib/utils/viz.py
```python
from __future__ import absolute_import, division
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image, ImageDraw
def show_frame(image, bndbox=None, fig_n=1, pause=0.001,
thickness=5, cmap=None, color=(255, 0, 0)):
global fig_dict
if not 'fig_dict' in globals():
fig_dict = {}
if isinstance(image, np.ndarray):
image = Image.fromarray(image)
if bndbox is not None:
image = image.copy()
draw = ImageDraw.Draw(image)
color = color if image.mode == 'RGB' else 255
for t in range(-thickness // 2, thickness // 2 + 1):
draw.rectangle((
int(bndbox[0] + t),
int(bndbox[1] + t),
int(bndbox[0] + bndbox[2] + t),
int(bndbox[1] + bndbox[3] + t)),
outline=color)
if not fig_n in fig_dict:
fig = plt.figure(fig_n)
plt.axis('off')
fig.tight_layout()
fig_dict[fig_n] = plt.imshow(image, cmap=cmap)
else:
fig_dict[fig_n].set_data(image)
plt.pause(pause)
plt.draw()
return image
``` |
{
"source": "10419/exame",
"score": 4
} |
#### File: 10419/exame/examen1.py
```python
class examen1:
def estCondicional01():
print("Hospital")
e=int(input("ingrese su edad:"))
print("sexo")
print("1=varon")
print("2=mujeer")
s=int(input("ingrese su sexo:"))
if e>=70:
print("la persona recibe la vacuna C")
elif e>=16 and e<70 and s==2:
print("la persona recibe la vacuna B")
elif e>=16 and e<70 and s==1:
print("la persona recibe la vacuna A")
else :
print("la persona recibe la vacuna A")
estCondicional01()
```
#### File: 10419/exame/exam.py
```python
class exam:
def estCondicional01():
print("UPeU")
print("Bonificacion para el docente")
sa=850
Su=int(input("ingrese su sueldo:"))
P=int(input("Ingrese la cantidada de puntos:"))
P==0
s=0
if P>=50 and P<101:
s=(sa*0.10)+Su
elif P>=101 and P<151:
s=(sa*0.40)+Su
elif P>=151 :
s=(sa*0.70)+Su
else:
print("verifique sus puntos")
print("Su sueldo sueldo es de",s,", gracias por trabajar con nosotros")
estCondicional01()
```
#### File: 10419/exame/exa.py
```python
class exa:
def estCondicional01():
print("UPeU")
print("Fundamentos de Programaion")
n=int(input("ingrese su nota, primera unidad:"))
n1=int(input("ingrese su nota, segunda unidad:"))
n2=int(input("ingrese su nota, tercera unidad:"))
n3=int(input("ingrese su nota, trabajo final:"))
n01=n*0.20
n02=n1*0.15
n03=n2*0.15
n04=n3*0.50
pro=(n01+n02+n03+n04)
print("La nota del alumno es de",pro)
if pro>=20:
print("Sigue asi llegaras muy lejos")
else:
print("esfuerzate")
estCondicional01()
``` |
{
"source": "10419/Tarea-10-ejercicios",
"score": 4
} |
#### File: 10419/Tarea-10-ejercicios/ejercicio 10.py
```python
def estCondicional01():
print("beca estudiaPeru")
e=int(input("ingrese su edad:"))
n=float(input("ingrese su nota promedio:"))
if e>18 and n>=9:
print("la beca sera de 2000.00 dolares")
elif e>18 and n>=7.5:
print("la beca sera de 1000.00 dolares")
elif e>18 and n>=6 and n<7.5:
print("la beca sera de 500.00 dolares")
elif e<18 and n>=9:
print("la beca sera de 3000.00 dolares")
elif e<18 and n>=8 and n<9:
print("la beca sera de 2000.00 dolares")
elif e<18 and n>=6 and n<8:
print("la beca sera de 100.00 dolares")
else:
print("sigue estudiando campeon")
estCondicional01()
```
#### File: 10419/Tarea-10-ejercicios/ejercicio 6.py
```python
def estCondicional101():
print("Bienvnido tienda el garoto")
d=0
p=int(input("ingrese el precio del articulos:"))
if p>0 and p<100:
d=p*0.10
elif p>100 and p<200:
d=p*0.12
else:
d=p*0.15
print("El precio del articulo es",d)
estCondicional101()
```
#### File: 10419/Tarea-10-ejercicios/ejercicio 7.py
```python
def estCondicional101():
print("Bienvendio querido empleado(a)")
pm=2000
ps=0
print("te daremos un pequeño regalo por tu antiguedad")
A=int(input("ingrese los años que viene trabajand con nosotros:"))
A==0
if A>=1 and A<2:
ps=pm+100
elif A>=2 and A<3:
ps=pm+200
elif A>=3 and A<4:
ps=pm+300
elif A>=4 and A<5:
ps=pm+400
elif A>=5 and A<6:
ps=pm+500
else:
ps=pm+1000
print("Su pago por este mes sera",ps,"gracias por trabajar con nosotros.")
estCondicional101()
``` |
{
"source": "10434542/tensors",
"score": 3
} |
#### File: 10434542/tensors/tensorcommon.py
```python
import numpy as np
import collections
import itertools as itt
import functools as fct
import warnings
class TensorCommon:
""" A base class for Tensor and AbelianTensor, that implements some
higher level functions that are common to the two. Useful also for
type checking as in isinstance(T, TensorCommon).
"""
@classmethod
def empty(cls, *args, **kwargs):
return cls.initialize_with(np.empty, *args, **kwargs)
@classmethod
def zeros(cls, *args, **kwargs):
return cls.initialize_with(np.zeros, *args, **kwargs)
@classmethod
def ones(cls, *args, **kwargs):
return cls.initialize_with(np.ones, *args, **kwargs)
@classmethod
def random(cls, *args, **kwargs):
return cls.initialize_with(np.random.random_sample, *args, **kwargs)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Miscellaneous
def form_str(self):
s = "shape: %s\nqhape: %s\ndirs: %s"%(
str(self.shape), str(self.qhape), str(self.dirs))
return s
@staticmethod
def flatten_shape(shape):
try:
return tuple(map(TensorCommon.flatten_dim, shape))
except TypeError:
return shape
@staticmethod
def flatten_dim(dim):
try:
return sum(dim)
except TypeError:
return dim
def norm_sq(self):
conj = self.conj()
all_inds = tuple(range(len(self.shape)))
norm_sq = self.dot(conj, (all_inds, all_inds))
return np.abs(norm_sq.value())
def norm(self):
return np.sqrt(self.norm_sq())
@classmethod
def default_trunc_err_func(cls, S, chi, norm_sq=None):
if norm_sq is None:
norm_sq = sum(S**2)
sum_disc = sum(S[chi:]**2)
err = np.sqrt(sum_disc/norm_sq)
return err
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# The meat: actual tensor operations
def to_matrix(self, left_inds, right_inds, dirs=None,
return_transposed_shape_data=False):
""" Transposes left_inds to one side of self and right_inds to
the other, and joins these indices so that the result is a
matrix. On both sides, before reshaping, the indices are also
transposed to the order given in left/right_inds. If one or both
of left/right_inds is not provided the result is a vector or a
scalar.
dirs are the directions of the new indices. By default it is
[1,-1] for matrices and [1] (respectively [-1]) if only
left_inds (respectively right_inds) is provided.
If return_transposed_shape_data is True then the shape, qhape
and dirs of the tensor after all the transposing but before
reshaping is returned as well.
"""
if dirs is None:
if len(left_inds) > 0 and len(right_inds) > 0:
dirs = [1,-1]
elif len(right_inds) > 0:
dirs = [-1]
elif len(left_inds) > 0:
dirs = [1]
else:
dirs = []
result = self.join_indices(left_inds, right_inds, dirs=dirs,
return_transposed_shape_data=\
return_transposed_shape_data)
if return_transposed_shape_data:
result, transposed_shape, transposed_qhape, transposed_dirs =\
result
# join_indices does not return a matrix with left_inds as the
# first index and right_inds as the second, so we may have to
# transpose.
if left_inds and right_inds and left_inds[0] > right_inds[0]:
result = result.swapaxes(1,0)
if return_transposed_shape_data:
ts_left = transposed_shape[:len(right_inds)]
ts_right = transposed_shape[len(right_inds):]
transposed_shape = ts_right + ts_left
if transposed_qhape is not None:
qs_left = transposed_qhape[:len(right_inds)]
qs_right = transposed_qhape[len(right_inds):]
transposed_qhape = qs_right + qs_left
if transposed_dirs is not None:
qs_left = transposed_dirs[:len(right_inds)]
qs_right = transposed_dirs[len(right_inds):]
transposed_dirs = qs_right + qs_left
if return_transposed_shape_data:
return result, transposed_shape, transposed_qhape, transposed_dirs
else:
return result
def from_matrix(self, left_dims, right_dims,
left_qims=None, right_qims=None,
left_dirs=None, right_dirs=None):
""" The counter part of to_matrix, from_matrix takes in a matrix
and the dims, qims and dirs lists of the left and right indices
that the resulting tensor should have. Mainly meant to be used
so that one first calls to_matrix, takes note of the
transposed_shape_data and uses that to reshape the matrix back
to a tensor once one is done operating on the matrix.
"""
indices = tuple(range(len(self.shape)))
final_dims = ()
final_qims = ()
final_dirs = ()
if indices:
if left_dims:
final_dims += (left_dims,)
final_qims += (left_qims,)
final_dirs += (left_dirs,)
if right_dims:
final_dims += (right_dims,)
final_qims += (right_qims,)
final_dirs += (right_dirs,)
if left_qims is right_qims is None:
final_qims = None
if left_dirs is right_dirs is None:
final_dirs = None
return self.split_indices(indices, final_dims, qims=final_qims,
dirs=final_dirs)
def dot(self, other, indices):
""" Dot product of tensors. See numpy.tensordot on how to use
this, the interface is exactly the same, except that this one is
a method, not a function. The original tensors are not modified.
"""
# We want to deal with lists, not tuples or bare integers
a,b = indices
if isinstance(a, collections.Iterable):
a = list(a)
else:
a = [a]
if isinstance(b, collections.Iterable):
b = list(b)
else:
b = [b]
# Check that 1) the number of contracted indices for self and
# other match and 2) that the indices are compatible, i.e. okay
# to contract with each other. In addition raise a warning if
# the dirs don't match.
assert(len(a) == len(b))
assert(all(itt.starmap(
fct.partial(self.compatible_indices, other),
zip(a, b))))
if (self.dirs is not None and other.dirs is not None and
not all(self.dirs[i] + other.dirs[j] == 0
for i,j in zip(a,b))):
warnings.warn("dirs in dot do not match.")
s_sum = a
s_open = [i for i in range(len(self.shape)) if i not in a]
o_sum = b
o_open = [i for i in range(len(other.shape)) if i not in b]
self, s_transposed_shape, s_transposed_qhape, s_transposed_dirs =\
self.to_matrix(s_open, s_sum,
return_transposed_shape_data=True)
other, o_transposed_shape, o_transposed_qhape, o_transposed_dirs =\
other.to_matrix(o_sum, o_open,
return_transposed_shape_data=True)
self = self.matrix_dot(other)
del(other)
l_dims = s_transposed_shape[:len(s_open)]
r_dims = o_transposed_shape[len(o_sum):]
try:
l_qims = s_transposed_qhape[:len(s_open)]
r_qims = o_transposed_qhape[len(o_sum):]
except TypeError:
l_qims = None
r_qims = None
try:
l_dirs = s_transposed_dirs[:len(s_open)]
r_dirs = o_transposed_dirs[len(o_sum):]
except TypeError:
l_dirs = None
r_dirs = None
self = self.from_matrix(l_dims, r_dims,
left_qims=l_qims, right_qims=r_qims,
left_dirs=l_dirs, right_dirs=r_dirs)
return self
def eig(self, a, b, *args, return_rel_err=False, **kwargs):
""" Transpose indices a to be on one side of self, b on the
other, and reshape self to a matrix. Then find the eigenvalues
and eigenvectors of this matrix, and reshape the eigenvectors to
have on the left side the indices that self had on its right
side after transposing but before reshaping.
If hermitian is True then the matrix that is formed after the
reshape is assumed to be hermitian.
Truncation works like with SVD.
Output is S, U, [rel_err], where S is a vector of eigenvalues
and U is a tensor such that the last index enumerates the
eigenvectors of self in the sense that if u_i = U[...,i] then
self.dot(u_i, (b, all_indices_of_u_i)) == S[i] * u_i. rel_err is
relative error in truncation, only returned if return_rel_err is
True.
The above syntax is precisely correct only for Tensors. For
AbelianTensors the idea is the same, but eigenvalues and vectors
come with quantum numbers so the syntax is slightly different.
See AbelianTensor.matrix_eig for more details about what
precisely happens.
The original tensor is not modified by this method.
"""
if not isinstance(a, collections.Iterable):
a = (a,)
if not isinstance(b, collections.Iterable):
b = (b,)
self, transposed_shape, transposed_qhape, transposed_dirs\
= self.to_matrix(a, b, return_transposed_shape_data=True)
S, U, rel_err = self.matrix_eig(*args, **kwargs)
del(self)
U_dims = (transposed_shape[:len(a)], S.shape)
if transposed_qhape is not None:
U_qims = (transposed_qhape[:len(a)], S.qhape)
else:
U_qims = (None, None)
if transposed_dirs is not None:
U_dirs = (transposed_dirs[:len(a)], U.dirs[1:])
else:
U_dirs = (None, None)
U = U.from_matrix(*U_dims,
left_qims=U_qims[0], right_qims=U_qims[1],
left_dirs=U_dirs[0], right_dirs=U_dirs[1])
ret_val = (S, U)
if return_rel_err:
ret_val += (rel_err,)
return ret_val
def svd(self, a, b, *args, return_rel_err=False, **kwargs):
""" Transpose indices a to be on one side of self, b on the
other, and reshape self to a matrix. Then singular value
decompose this matrix into U, S, V. Finally reshape the unitary
matrices to tensors that have a new index coming from the SVD,
for U as the last index and for V as the first, and U to have
indices a as its first indices and V to have indices b as its
last indices.
If eps>0 then the SVD may be truncated if the relative Frobenius
norm error can be kept below eps. For this purpose different
dimensions to truncate to can be tried, and these dimensions
should be listed in chis. If chis is None then the full range of
possible dimensions is tried.
If print_errors > 0 then the truncation error is printed.
If return_rel_err is True then the relative truncation error is
also returned.
Output is U, S, V, and possibly rel_err. Here S is a vector of
singular values and U and V are isometric tensors (unitary if
the matrix that is SVDed is square and there is no truncation).
U . diag(S) . V = self, up to truncation errors.
The original tensor is not modified by this method.
"""
if not isinstance(a, collections.Iterable):
a = (a,)
if not isinstance(b, collections.Iterable):
b = (b,)
self, transposed_shape, transposed_qhape, transposed_dirs =\
self.to_matrix(a, b, return_transposed_shape_data=True)
U, S, V, rel_err = self.matrix_svd(*args, **kwargs)
del(self)
U_dims = (transposed_shape[:len(a)], S.shape)
V_dims = (S.shape, transposed_shape[len(a):])
if transposed_qhape is not None:
U_qims = (transposed_qhape[:len(a)], S.qhape)
V_qims = (S.qhape, transposed_qhape[len(a):])
else:
U_qims = (None, None)
V_qims = (None, None)
if transposed_dirs is not None:
U_dirs = (transposed_dirs[:len(a)], U.dirs[1:])
V_dirs = (V.dirs[:1], transposed_dirs[len(a):])
else:
U_dirs = (None, None)
V_dirs = (None, None)
U = U.from_matrix(*U_dims,
left_qims=U_qims[0], right_qims=U_qims[1],
left_dirs=U_dirs[0], right_dirs=U_dirs[1])
V = V.from_matrix(*V_dims,
left_qims=V_qims[0], right_qims=V_qims[1],
left_dirs=V_dirs[0], right_dirs=V_dirs[1])
ret_val = (U, S, V)
if return_rel_err:
ret_val += (rel_err,)
return ret_val
def matrix_decomp_format_chis(self, chis, eps):
""" A common function for formatting the truncation parameters
of SVD and eig. This is meant to be called by the matrix_svd and
matrix_eig functions of subclasses.
"""
if chis is None:
min_dim = min(type(self).flatten_dim(self.shape[i])
for i in range(len(self.shape))) + 1
if eps > 0:
chis = tuple(range(min_dim))
else:
chis = [min_dim]
else:
try:
chis = tuple(chis)
except TypeError:
chis = [chis]
if eps == 0:
chis = [max(chis)]
else:
chis = sorted(chis)
return chis
def split(self, a, b, *args, return_rel_err=False,
return_sings=False, weight="both", **kwargs):
""" Split with SVD. Like SVD, but takes the square root of the
singular values and multiplies both unitaries with it, so that
the tensor is split into two parts. Values are returned as
(US, {S}, SV, {rel_err}),
where the ones in curly brackets are only returned if the
corresponding arguments are True.
The distribution of sqrt(S) onto the two sides can be changed
with the keyword argument weight. If weight="left"
(correspondingly "right") then S is multiplied into U
(correspondingly V). By default weight="both".
"""
svd_result = self.svd(a, b, *args, return_rel_err=return_rel_err,
**kwargs)
U, S, V = svd_result[0:3]
weight = weight.strip().lower()
if weight in ("both", "split", "center", "centre", "c", "middle", "m"):
S_sqrt = S.sqrt()
U = U.multiply_diag(S_sqrt, -1, direction="right")
V = V.multiply_diag(S_sqrt, 0, direction="left")
elif weight in ("left", "l", "a", "u"):
U = U.multiply_diag(S, -1, direction="right")
elif weight in ("right", "r", "b", "v"):
V = V.multiply_diag(S, 0, direction="left")
else:
raise ValueError("Unknown value for weight: {}".format(weight))
if return_sings:
ret_val = U, S, V
else:
ret_val = U, V
if return_rel_err:
ret_val += (svd_result[3],)
return ret_val
``` |
{
"source": "1044197988/Centernet-Tensorflow2.0",
"score": 2
} |
#### File: Centernet-Tensorflow2.0/TF2-CenterNet/hpdet_coco.py
```python
import argparse
import cv2
from glob import glob
import numpy as np
from tqdm import tqdm
import os
import json
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from hourglass import HourglassNetwork, normalize_image
from decode import HpDetDecode
from letterbox import LetterboxTransformer
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--output', default='output', type=str)
parser.add_argument('--data', default='val2017', type=str)
parser.add_argument('--annotations', default='annotations', type=str)
parser.add_argument('--inres', default='512,512', type=str)
parser.add_argument('--no-full-resolution', action='store_true')
args, _ = parser.parse_known_args()
args.inres = tuple(int(x) for x in args.inres.split(','))
if not args.no_full_resolution:
args.inres = (None, None)
os.makedirs(args.output, exist_ok=True)
kwargs = {
'num_stacks': 2,
'cnv_dim': 256,
'weights': 'hpdet_coco',
'inres': args.inres,
}
heads = {
'hm': 1, # 6
'hm_hp': 17, # 7
'hp_offset': 2, # 8
'hps': 34, # 9
'reg': 2, # 10
'wh': 2, # 11
}
out_fn_keypoints = os.path.join(args.output, args.data + '_keypoints_results_%s_%s.json' % (
args.inres[0], args.inres[1]))
model = HourglassNetwork(heads=heads, **kwargs)
model = HpDetDecode(model)
if args.no_full_resolution:
letterbox_transformer = LetterboxTransformer(args.inres[0], args.inres[1])
else:
letterbox_transformer = LetterboxTransformer(mode='testing', max_stride=128)
fns = sorted(glob(os.path.join(args.data, '*.jpg')))
results = []
for fn in tqdm(fns):
img = cv2.imread(fn)
image_id = int(os.path.splitext(os.path.basename(fn))[0])
pimg = letterbox_transformer(img)
pimg = normalize_image(pimg)
pimg = np.expand_dims(pimg, 0)
detections = model.predict(pimg)[0]
for d in detections:
score = d[4]
x1, y1, x2, y2 = d[:4]
x1, y1, x2, y2 = letterbox_transformer.correct_box(x1, y1, x2, y2)
x1, y1, x2, y2 = float(x1), float(y1), float(x2), float(y2)
kps = d[5:-1]
kps_x = kps[:17]
kps_y = kps[17:]
kps = letterbox_transformer.correct_coords(np.vstack([kps_x, kps_y])).T
# add z = 1
kps = np.concatenate([kps, np.ones((17, 1), dtype='float32')], -1)
kps = list(map(float, kps.flatten()))
image_result = {
'image_id': image_id,
'category_id': 1,
'score': float(score),
'bbox': [x1, y1, (x2 - x1), (y2 - y1)],
'keypoints': kps,
}
results.append(image_result)
if not len(results):
print("No predictions were generated.")
return
# write output
with open(out_fn_keypoints, 'w') as f:
json.dump(results, f, indent=2)
print("Predictions saved to: %s" % out_fn_keypoints)
# load results in COCO evaluation tool
gt_fn = os.path.join(args.annotations, 'person_keypoints_%s.json' % args.data)
print("Loading GT: %s" % gt_fn)
coco_true = COCO(gt_fn)
coco_pred = coco_true.loadRes(out_fn_keypoints)
coco_eval = COCOeval(coco_true, coco_pred, 'keypoints')
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
return coco_eval.stats
if __name__ == '__main__':
main()
``` |
{
"source": "1044197988/-",
"score": 3
} |
#### File: -/FCN8s/FCN8S预测.py
```python
import cv2
import random
import numpy as np
import os
import argparse
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.models import load_model
from sklearn.preprocessing import LabelEncoder
import tensorflow as tf
from FCN8S import dice_coef
import time
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
basePath="C:\\Users\Administrator\Desktop\Project\\";
TEST_SET = ['1.png']
image_size = 32
classes=[0.0,1.0,2.0,3.0,4.0,15.0]
labelencoder = LabelEncoder()
labelencoder.fit(classes)
def args_parse():
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-m", "--model", required=False,default="FCN.h5",
help="path to trained model model")
ap.add_argument("-s", "--stride", required=False,
help="crop slide stride", type=int, default=image_size)
args = vars(ap.parse_args())
return args
def predict(args):
# load the trained convolutional neural network
print("载入网络权重中……")
model = load_model(args["model"],custom_objects={'dice_coef': dice_coef})
stride = args['stride']
print("进行预测分割拼图中……")
for n in range(len(TEST_SET)):
path = TEST_SET[n]
#load the image
image = cv2.imread(basePath+'train\\' + path)
h,w,_ = image.shape
padding_h = (h//stride + 1) * stride
padding_w = (w//stride + 1) * stride
padding_img = np.zeros((padding_h,padding_w,3),dtype=np.uint8)
padding_img[0:h,0:w,:] = image[:,:,:]
padding_img = padding_img.astype("float") / 255.0
padding_img = img_to_array(padding_img)
mask_whole = np.zeros((padding_h,padding_w),dtype=np.uint8)
for i in range(padding_h//stride):
for j in range(padding_w//stride):
crop = padding_img[i*stride:i*stride+image_size,j*stride:j*stride+image_size,:3]
ch,cw,_ = crop.shape
#print(ch,cw,_)
if ch != 32 or cw != 32:
print('尺寸不正确,请检查!')
continue
crop = np.expand_dims(crop, axis=0)
pred = model.predict(crop,verbose=2)
pred=np.argmax(pred,axis=3)
pred=pred.flatten()
pred = labelencoder.inverse_transform(pred)
pred = pred.reshape((32,32)).astype(np.uint8)
mask_whole[i*stride:i*stride+image_size,j*stride:j*stride+image_size] = pred[:,:]
cv2.imwrite(basePath+'predict/'+path,mask_whole[0:h,0:w])
if __name__ == '__main__':
A=time.time()
args = args_parse()
predict(args)
B=time.time()
print("运行时长:%.1f" % float(B-A)+"s")
```
#### File: -/Segnet/训练.py
```python
import matplotlib
matplotlib.use("Agg")
import tensorflow as tf
import argparse
import numpy as np
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D,MaxPooling2D,UpSampling2D,BatchNormalization,Reshape,Permute,Activation
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.callbacks import ModelCheckpoint
from sklearn.preprocessing import LabelEncoder
from PIL import Image
import matplotlib.pyplot as plt
import cv2
import random
import os
from tqdm import tqdm
seed = 7
np.random.seed(seed)
#设置图像大小
img_w = 32
img_h = 32
#分类
n_label=6
classes=[0.0,17.0,34.0,51.0,68.0,255.0]
labelencoder = LabelEncoder()
labelencoder.fit(classes)
#训练批次和每次数据量
EPOCHS = 5
BS = 32
#图像最大值
divisor=255.0
#图像根路径
filepath ='C:\\Users\Administrator\Desktop\Project\src\\'
#读取图片
def load_img(path, grayscale=False):
if grayscale:
img = cv2.imread(path,cv2.IMREAD_GRAYSCALE)
else:
img = cv2.imread(path)
img = np.array(img,dtype="float") / divisor
return img
#获取训练数据和测试数据地址
def get_train_val(val_rate = 0.25):
train_url = []
train_set = []
val_set = []
for pic in os.listdir(filepath + 'train'):
train_url.append(pic)
random.shuffle(train_url)
total_num = len(train_url)
val_num = int(val_rate * total_num)
for i in range(len(train_url)):
if i < val_num:
val_set.append(train_url[i])
else:
train_set.append(train_url[i])
return train_set,val_set
# 生成训练数据
def generateData(batch_size,data=[]):
while True:
train_data = []
train_label = []
batch = 0
for i in (range(len(data))):
url = data[i]
batch += 1
img = load_img(filepath + 'train/' + url)
img = img_to_array(img)
train_data.append(img)
label = load_img(filepath + 'label/' + url, grayscale=True)
label = img_to_array(label).reshape((img_w * img_h,))
train_label.append(label)
if batch % batch_size==0:
train_data = np.array(train_data)
train_label = np.array(train_label).flatten() #拍平
train_label = labelencoder.transform(train_label)
train_label = to_categorical(train_label, num_classes=n_label) #编码输出便签
train_label = train_label.reshape((batch_size,img_w,img_h,n_label))
yield (train_data,train_label)
train_data = []
train_label = []
batch = 0
#生成测试的数据
def generateValidData(batch_size,data=[]):
while True:
valid_data = []
valid_label = []
batch = 0
for i in (range(len(data))):
url = data[i]
batch += 1
img = load_img(filepath + 'train/' + url)
img = img_to_array(img)
valid_data.append(img)
label = load_img(filepath + 'label/' + url, grayscale=True)
label = img_to_array(label).reshape((img_w * img_h,))
valid_label.append(label)
if batch % batch_size==0:
valid_data = np.array(valid_data)
valid_label = np.array(valid_label).flatten()
valid_label = labelencoder.transform(valid_label)
valid_label = to_categorical(valid_label, num_classes=n_label)
valid_label = valid_label.reshape((batch_size,img_w,img_h,n_label))
yield (valid_data,valid_label)
valid_data = []
valid_label = []
batch = 0
#定义模型-网络模型
def SegNet():
model = Sequential()
#encoder
model.add(Conv2D(64,(3,3),strides=(1,1),input_shape=(img_w,img_h,3),padding='same',activation='relu',data_format='channels_last'))
model.add(BatchNormalization())
model.add(Conv2D(64,(3,3),strides=(1,1),padding='same',activation='relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2,2)))
#(128,128)
model.add(Conv2D(128, (3, 3), strides=(1, 1), padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(Conv2D(128, (3, 3), strides=(1, 1), padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2,2)))
#(64,64)
model.add(Conv2D(256, (3, 3), strides=(1, 1), padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(Conv2D(256, (3, 3), strides=(1, 1), padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(Conv2D(256, (3, 3), strides=(1, 1), padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
#(32,32)
model.add(Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
#(16,16)
model.add(Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
#(8,8)
#decoder
model.add(UpSampling2D(size=(2,2)))
#(16,16)
model.add(Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(UpSampling2D(size=(2, 2)))
#(32,32)
model.add(Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(UpSampling2D(size=(2, 2)))
#(64,64)
model.add(Conv2D(256, (3, 3), strides=(1, 1), padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(Conv2D(256, (3, 3), strides=(1, 1), padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(Conv2D(256, (3, 3), strides=(1, 1), padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(UpSampling2D(size=(2, 2)))
#(128,128)
model.add(Conv2D(128, (3, 3), strides=(1, 1), padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(Conv2D(128, (3, 3), strides=(1, 1), padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(UpSampling2D(size=(2, 2)))
#(256,256)
model.add(Conv2D(64, (3, 3), strides=(1, 1), input_shape=(img_w, img_h,3), padding='same', activation='relu',data_format='channels_last'))
model.add(BatchNormalization())
model.add(Conv2D(64, (3, 3), strides=(1, 1), padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(Conv2D(n_label, (1, 1), strides=(1, 1), padding='same'))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',optimizer='sgd',metrics=['accuracy'])
model.summary()
return model
#开始训练
def train(args):
model = SegNet()
modelcheck = ModelCheckpoint(args['model'],monitor='val_acc',save_best_only=True,mode='max')
callable = [modelcheck,tf.keras.callbacks.TensorBoard(log_dir='.')]
train_set,val_set = get_train_val()
train_numb = len(train_set)
valid_numb = len(val_set)
print ("the number of train data is",train_numb)
print ("the number of val data is",valid_numb)
H = model.fit(x=generateData(BS,train_set),steps_per_epoch=(train_numb//BS),epochs=EPOCHS,verbose=2,
validation_data=generateValidData(BS,val_set),validation_steps=(valid_numb//BS),callbacks=callable)
# plot the training loss and accuracy
plt.style.use("ggplot")
plt.figure()
N = EPOCHS
plt.plot(np.arange(0, N), H.history["loss"], label="train_loss")
plt.plot(np.arange(0, N), H.history["val_loss"], label="val_loss")
plt.plot(np.arange(0, N), H.history["acc"], label="train_acc")
plt.plot(np.arange(0, N), H.history["val_acc"], label="val_acc")
plt.title("Training Loss and Accuracy on SegNet Satellite Seg")
plt.xlabel("Epoch #")
plt.ylabel("Loss/Accuracy")
plt.legend(loc="lower left")
plt.savefig(args["plot"])
#获取参数
def args_parse():
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-a", "--augment", help="using data augment or not",
action="store_true", default=False)
ap.add_argument("-m", "--model", required=False,default="segnet.h5",
help="path to output model")
ap.add_argument("-p", "--plot", type=str, default="plot.png",
help="path to output accuracy/loss plot")
args = vars(ap.parse_args())
return args
#运行程序
if __name__=='__main__':
args = args_parse()
train(args)
print("完成")
#predict()
``` |
{
"source": "1044197988/TF.Keras-Commonly-used-models",
"score": 3
} |
#### File: TF.Keras-Commonly-used-models/Others/tcn.py
```python
from typing import List
from tensorflow.keras import Model, Input
from tensorflow.keras import layers
from datetime import datetime
from pathlib import Path
import tensorflow as tf
from tensorflow.keras import losses, metrics
from tensorflow.keras.callbacks import TensorBoard
class ResidualBlock(layers.Layer):
"""
A TCN Residual block stacking the dilated causal convolution
:param filters: number of output filters in the convolution
:param kernel_size: length of the 1D convolution window
:param dilation_rate: dilation rate to use for dilated convolution
:param dropout_rate: dropout rate
:param activation: non linearity
"""
def __init__(self,
filters: int,
kernel_size: int,
dilation_rate: int,
dropout_rate: float,
activation: str,
**kwargs):
super(ResidualBlock, self).__init__(**kwargs)
self.filters = filters
self.causal_conv_1 = layers.Conv1D(filters=self.filters,
kernel_size=kernel_size,
dilation_rate=dilation_rate,
padding='causal')
self.weight_norm_1 = layers.LayerNormalization()
self.dropout_1 = layers.SpatialDropout1D(rate=dropout_rate)
self.activation_1 = layers.Activation(activation)
self.causal_conv_2 = layers.Conv1D(filters=self.filters,
kernel_size=kernel_size,
dilation_rate=dilation_rate,
padding='causal')
self.weight_norm_2 = layers.LayerNormalization()
self.dropout_2 = layers.SpatialDropout1D(rate=dropout_rate)
self.activation_2 = layers.Activation(activation)
self.activation_3 = layers.Activation(activation)
def build(self, input_shape):
in_channels = input_shape[-1]
if in_channels == self.filters:
self.skip_conv = None
else:
self.skip_conv = layers.Conv1D(filters=self.filters,
kernel_size=1)
super(ResidualBlock, self).build(input_shape)
def call(self, inputs, training=None, **kwargs):
if self.skip_conv is None:
skip = inputs
else:
skip = self.skip_conv(inputs)
x = self.causal_conv_1(inputs)
x = self.weight_norm_1(x)
x = self.activation_1(x)
x = self.dropout_1(x, training=training)
x = self.causal_conv_2(x)
x = self.weight_norm_2(x)
x = self.activation_2(x)
x = self.dropout_2(x, training=training)
x = self.activation_3(x + skip)
return x
class TCN(layers.Layer):
"""
The TCN-layer consisting of TCN-residual-blocks.
The dilation-rate grows exponentially with each residual block.
:param filters: number of conv filters per residual block
:param kernel_size: size of the conv kernels
:param return_sequence: flag if the last sequence should be returned or only last element
:param dropout_rate: dropout rate, default: 0.0
:param activation: non linearity, default: relu
"""
def __init__(self,
filters: List[int],
kernel_size: int,
return_sequence:bool = False,
dropout_rate:float = 0.0,
activation:str = "relu",
**kwargs):
super(TCN, self).__init__(**kwargs)
self.blocks = []
self.depth = len(filters)
self.kernel_size = kernel_size
self.return_sequence = return_sequence
for i in range(self.depth):
dilation_size = 2 ** i
self.blocks.append(
ResidualBlock(filters=filters[i],
kernel_size=kernel_size,
dilation_rate=dilation_size,
dropout_rate=dropout_rate,
activation=activation,
name=f"residual_block_{i}")
)
if not self.return_sequence:
self.slice_layer = layers.Lambda(lambda tt: tt[:, -1, :])
def call(self, inputs, training=None, **kwargs):
x = inputs
for block in self.blocks:
x = block(x)
if not self.return_sequence:
x = self.slice_layer(x)
return x
@property
def receptive_field_size(self):
return receptive_field_size(self.kernel_size, self.depth)
def receptive_field_size(kernel_size, depth):
return 1 + 2 * (kernel_size - 1) * (2 ** depth - 1)
def build_model(sequence_length: int,
channels: int,
filters: List[int],
num_classes:int,
kernel_size: int,
return_sequence:bool = False):
"""
Builds a simple TCN model for a classification task
:param sequence_length: lenght of the input sequence
:param channels: number of channels of the input sequence
:param filters: number of conv filters per residual block
:param num_classes: number of output classes
:param kernel_size: size of the conv kernels
:param return_sequence: flag if the last sequence should be returned or only last element
:return: a tf keras model
"""
inputs = Input(shape=(sequence_length, channels), name="inputs")
tcn_block = TCN(filters, kernel_size, return_sequence)
x = tcn_block(inputs)
outputs = layers.Dense(num_classes,
activation="softmax",
name="output")(x)
model = Model(inputs, outputs, name="tcn")
print(f"Input sequence lenght: {sequence_length}, model receptive field: {tcn_block.receptive_field_size}")
return model
def load_dataset():
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
x_train_reshaped = 1/255 * x_train.reshape(-1, 28 * 28, 1)
x_test_reshaped = 1/255 * x_test.reshape(-1, 28 * 28, 1)
train_dataset = tf.data.Dataset.from_tensor_slices((x_train_reshaped, y_train)).shuffle(1000)
test_dataset = tf.data.Dataset.from_tensor_slices((x_test_reshaped, y_test)).shuffle(1000)
return train_dataset, test_dataset
def train():
depth = 6
filters = 25
block_filters = [filters] * depth
print(block_filters)
model = build_model(sequence_length=28 * 28,
channels=1,
num_classes=10,
filters=block_filters,
kernel_size=8)
model.compile(optimizer="Adam",
metrics=[metrics.SparseCategoricalAccuracy()],
loss=losses.SparseCategoricalCrossentropy())
print(model.summary())
#train_dataset, test_dataset = load_dataset()
"""
model.fit(train_dataset.batch(32),
validation_data=test_dataset.batch(32),
callbacks=[TensorBoard(str(Path("logs") / datetime.now().strftime("%Y-%m-%dT%H-%M_%S")))],
epochs=10)
"""
if __name__ == '__main__':
train()
```
#### File: TF.Keras-Commonly-used-models/常用分割损失函数和指标/C_Focal_loss.py
```python
def focal_loss(classes_num, gamma=2., alpha=.25, e=0.1):
# classes_num contains sample number of each classes
def focal_loss_fixed(target_tensor, prediction_tensor):
'''
prediction_tensor is the output tensor with shape [None, 100], where 100 is the number of classes
target_tensor is the label tensor, same shape as predcition_tensor
'''
import tensorflow as tf
from tensorflow.python.ops import array_ops
from keras import backend as K
#1# get focal loss with no balanced weight which presented in paper function (4)
zeros = array_ops.zeros_like(prediction_tensor, dtype=prediction_tensor.dtype)
one_minus_p = array_ops.where(tf.greater(target_tensor,zeros), target_tensor - prediction_tensor, zeros)
FT = -1 * (one_minus_p ** gamma) * tf.log(tf.clip_by_value(prediction_tensor, 1e-8, 1.0))
#2# get balanced weight alpha
classes_weight = array_ops.zeros_like(prediction_tensor, dtype=prediction_tensor.dtype)
total_num = float(sum(classes_num))
classes_w_t1 = [ total_num / ff for ff in classes_num ]
sum_ = sum(classes_w_t1)
classes_w_t2 = [ ff/sum_ for ff in classes_w_t1 ] #scale
classes_w_tensor = tf.convert_to_tensor(classes_w_t2, dtype=prediction_tensor.dtype)
classes_weight += classes_w_tensor
alpha = array_ops.where(tf.greater(target_tensor, zeros), classes_weight, zeros)
#3# get balanced focal loss
balanced_fl = alpha * FT
balanced_fl = tf.reduce_mean(balanced_fl)
#4# add other op to prevent overfit
# reference : https://spaces.ac.cn/archives/4493
nb_classes = len(classes_num)
fianal_loss = (1-e) * balanced_fl + e * K.categorical_crossentropy(K.ones_like(prediction_tensor)/nb_classes, prediction_tensor)
return fianal_loss
return focal_loss_fixed
```
#### File: TF.Keras-Commonly-used-models/常用分割损失函数和指标/Focal_Tversky_loss.py
```python
import tensorflow as tf
from tensorflow.keras.layers import Flatten
from tensorflow.keras.losses import binary_crossentropy
def dsc(y_true, y_pred):
smooth = 1.
y_true_f = Flatten()(y_true)
y_pred_f = Flatten()(y_pred)
intersection = tf.reduce_sum(y_true_f * y_pred_f)
score = (2. * intersection + smooth) / (tf.reduce_sum(y_true_f) + tf.reduce_sum(y_pred_f) + smooth)
return score
def dice_loss(y_true, y_pred):
loss = 1 - dsc(y_true, y_pred)
return loss
def bce_dice_loss(y_true, y_pred):
loss = binary_crossentropy(y_true, y_pred) + dice_loss(y_true, y_pred)
return loss
# Focal Tversky loss, brought to you by: https://github.com/nabsabraham/focal-tversky-unet
def tversky(y_true, y_pred, smooth=1e-6):
y_true_pos = Flatten()(y_true)
y_pred_pos = Flatten()(y_pred)
true_pos = tf.reduce_sum(y_true_pos * y_pred_pos)
false_neg = tf.reduce_sum(y_true_pos * (1-y_pred_pos))
false_pos = tf.reduce_sum((1-y_true_pos)*y_pred_pos)
alpha = 0.7
return (true_pos + smooth)/(true_pos + alpha*false_neg + (1-alpha)*false_pos + smooth)
def tversky_loss(y_true, y_pred):
return 1 - tversky(y_true,y_pred)
def focal_tversky_loss(y_true,y_pred):
pt_1 = tversky(y_true, y_pred)
gamma = 0.75
return tf.keras.backend.pow((1-pt_1), gamma)
#最后卷积层为4类,且sigmoid
model.compile(optimizer=adam, loss=focal_tversky_loss, metrics=[dsc, tversky])
```
#### File: TF.Keras-Commonly-used-models/常用分割模型/deeplabv2.py
```python
from tensorflow import image
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Conv2D, MaxPooling2D, Add, ZeroPadding2D, Dropout, Layer, Activation
class BilinearUpsampling(Layer):
'''
一个简单的双线性上采样层。
#参数
上采样:整数> 0。身高和体重的上采样率。
名称:层的名称
'''
def __init__(self, upsampling, **kwargs):
self.upsampling = upsampling
super(BilinearUpsampling, self).__init__(**kwargs)
def build(self, input_shape):
super(BilinearUpsampling, self).build(input_shape)
def call(self, x, mask=None):
new_size = [x.shape[1] * self.upsampling, x.shape[2] * self.upsampling]
output = image.resize_images(x, new_size)
return output
def DeeplabV2(input_shape,upsampling=8,apply_softmax=True,classes=6):
img_input = Input(shape=input_shape)
# Block 1
h = ZeroPadding2D(padding=(1, 1))(img_input)
h = Conv2D(filters=64, kernel_size=(3, 3), activation='relu', name='conv1_1')(h)
h = ZeroPadding2D(padding=(1, 1))(h)
h = Conv2D(filters=64, kernel_size=(3, 3), activation='relu', name='conv1_2')(h)
h = ZeroPadding2D(padding=(1, 1))(h)
h = MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(h)
# Block 2
h = ZeroPadding2D(padding=(1, 1))(h)
h = Conv2D(filters=128, kernel_size=(3, 3), activation='relu', name='conv2_1')(h)
h = ZeroPadding2D(padding=(1, 1))(h)
h = Conv2D(filters=128, kernel_size=(3, 3), activation='relu', name='conv2_2')(h)
h = ZeroPadding2D(padding=(1, 1))(h)
h = MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(h)
# Block 3
h = ZeroPadding2D(padding=(1, 1))(h)
h = Conv2D(filters=256, kernel_size=(3, 3), activation='relu', name='conv3_1')(h)
h = ZeroPadding2D(padding=(1, 1))(h)
h = Conv2D(filters=256, kernel_size=(3, 3), activation='relu', name='conv3_2')(h)
h = ZeroPadding2D(padding=(1, 1))(h)
h = Conv2D(filters=256, kernel_size=(3, 3), activation='relu', name='conv3_3')(h)
h = ZeroPadding2D(padding=(1, 1))(h)
h = MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(h)
# Block 4
h = ZeroPadding2D(padding=(1, 1))(h)
h = Conv2D(filters=512, kernel_size=(3, 3), activation='relu', name='conv4_1')(h)
h = ZeroPadding2D(padding=(1, 1))(h)
h = Conv2D(filters=512, kernel_size=(3, 3), activation='relu', name='conv4_2')(h)
h = ZeroPadding2D(padding=(1, 1))(h)
h = Conv2D(filters=512, kernel_size=(3, 3), activation='relu', name='conv4_3')(h)
h = ZeroPadding2D(padding=(1, 1))(h)
h = MaxPooling2D(pool_size=(3, 3), strides=(1, 1))(h)
# Block 5 -TODO - Might be incorrect
h = ZeroPadding2D(padding=(2, 2))(h)
h = Conv2D(filters=512, kernel_size=(3, 3), dilation_rate=(2, 2), activation='relu', name='conv5_1')(h)
h = ZeroPadding2D(padding=(2, 2))(h)
h = Conv2D(filters=512, kernel_size=(3, 3), dilation_rate=(2, 2), activation='relu', name='conv5_2')(h)
h = ZeroPadding2D(padding=(2, 2))(h)
h = Conv2D(filters=512, kernel_size=(3, 3), dilation_rate=(2, 2), activation='relu', name='conv5_3')(h)
h = ZeroPadding2D(padding=(1, 1))(h)
p5 = MaxPooling2D(pool_size=(3, 3), strides=(1, 1))(h)
# branching for Atrous Spatial Pyramid Pooling - Until here -14 layers
# hole = 6
b1 = ZeroPadding2D(padding=(6, 6))(p5)
b1 = Conv2D(filters=1024, kernel_size=(3, 3), dilation_rate=(6, 6), activation='relu', name='fc6_1')(b1)
b1 = Dropout(0.5)(b1)
b1 = Conv2D(filters=1024, kernel_size=(1, 1), activation='relu', name='fc7_1')(b1)
b1 = Dropout(0.5)(b1)
b1 = Conv2D(filters=6, kernel_size=(1, 1), activation='relu', name='fc8_1')(b1)
# hole = 12
b2 = ZeroPadding2D(padding=(12, 12))(p5)
b2 = Conv2D(filters=1024, kernel_size=(3, 3), dilation_rate=(12, 12), activation='relu', name='fc6_2')(b2)
b2 = Dropout(0.5)(b2)
b2 = Conv2D(filters=1024, kernel_size=(1, 1), activation='relu', name='fc7_2')(b2)
b2 = Dropout(0.5)(b2)
b2 = Conv2D(filters=6, kernel_size=(1, 1), activation='relu', name='fc8_2')(b2)
# hole = 18
b3 = ZeroPadding2D(padding=(18, 18))(p5)
b3 = Conv2D(filters=1024, kernel_size=(3, 3), dilation_rate=(18, 18), activation='relu', name='fc6_3')(b3)
b3 = Dropout(0.5)(b3)
b3 = Conv2D(filters=1024, kernel_size=(1, 1), activation='relu', name='fc7_3')(b3)
b3 = Dropout(0.5)(b3)
b3 = Conv2D(filters=6, kernel_size=(1, 1), activation='relu', name='fc8_3')(b3)
# hole = 24
b4 = ZeroPadding2D(padding=(24, 24))(p5)
b4 = Conv2D(filters=1024, kernel_size=(3, 3), dilation_rate=(24, 24), activation='relu', name='fc6_4')(b4)
b4 = Dropout(0.5)(b4)
b4 = Conv2D(filters=1024, kernel_size=(1, 1), activation='relu', name='fc7_4')(b4)
b4 = Dropout(0.5)(b4)
b4 = Conv2D(filters=6, kernel_size=(1, 1), activation='relu', name='fc8_4')(b4)
s = Add()([b1, b2, b3, b4])
logits = BilinearUpsampling(upsampling=upsampling)(s)
if apply_softmax:
out = Activation('softmax')(logits)
else:
out = logits
model = Model(img_input, out, name='deeplabV2')
return model
if __name__=="__main__":
model=DeeplabV2((64,64,3),upsampling=8,apply_softmax=True,classes=6)
model.summary()
```
#### File: TF.Keras-Commonly-used-models/常用分割模型/DeeplabV3+.py
```python
from tensorflow.keras.layers import Activation,Conv2D,MaxPooling2D,BatchNormalization,Input,DepthwiseConv2D,add,Dropout,AveragePooling2D,Concatenate,Layer,InputSpec
from tensorflow.keras.models import Model
from tensorflow.python.keras.utils import conv_utils
import tensorflow as tf
class BilinearUpsampling(Layer):
def __init__(self, upsampling=(2, 2), data_format=None, **kwargs):
super(BilinearUpsampling, self).__init__(**kwargs)
self.data_format = conv_utils.normalize_data_format(data_format)
self.upsampling = conv_utils.normalize_tuple(upsampling, 2, 'size')
self.input_spec = InputSpec(ndim=4)
def compute_output_shape(self, input_shape):
height = self.upsampling[0] * \
input_shape[1] if input_shape[1] is not None else None
width = self.upsampling[1] * \
input_shape[2] if input_shape[2] is not None else None
return (input_shape[0],height,width,input_shape[3])
def call(self, inputs):
#.tf
return tf.image.resize_bilinear(inputs, (int(inputs.shape[1]*self.upsampling[0]),
int(inputs.shape[2]*self.upsampling[1])))
def get_config(self):
config = {'size': self.upsampling,'data_format': self.data_format}
base_config = super(BilinearUpsampling, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def xception_downsample_block(x,channels,top_relu=False):
##separable conv1
if top_relu:
x=Activation("relu")(x)
x=DepthwiseConv2D((3,3),padding="same",use_bias=False)(x)
x=BatchNormalization()(x)
x=Conv2D(channels,(1,1),padding="same",use_bias=False)(x)
x=BatchNormalization()(x)
x=Activation("relu")(x)
##separable conv2
x=DepthwiseConv2D((3,3),padding="same",use_bias=False)(x)
x=BatchNormalization()(x)
x=Conv2D(channels,(1,1),padding="same",use_bias=False)(x)
x=BatchNormalization()(x)
x=Activation("relu")(x)
##separable conv3
x=DepthwiseConv2D((3,3),strides=(2,2),padding="same",use_bias=False)(x)
x=BatchNormalization()(x)
x=Conv2D(channels,(1,1),padding="same",use_bias=False)(x)
x=BatchNormalization()(x)
return x
def res_xception_downsample_block(x,channels):
res=Conv2D(channels,(1,1),strides=(2,2),padding="same",use_bias=False)(x)
res=BatchNormalization()(res)
x=xception_downsample_block(x,channels)
x=add([x,res])
return x
def xception_block(x,channels):
##separable conv1
x=Activation("relu")(x)
x=DepthwiseConv2D((3,3),padding="same",use_bias=False)(x)
x=BatchNormalization()(x)
x=Conv2D(channels,(1,1),padding="same",use_bias=False)(x)
x=BatchNormalization()(x)
##separable conv2
x=Activation("relu")(x)
x=DepthwiseConv2D((3,3),padding="same",use_bias=False)(x)
x=BatchNormalization()(x)
x=Conv2D(channels,(1,1),padding="same",use_bias=False)(x)
x=BatchNormalization()(x)
##separable conv3
x=Activation("relu")(x)
x=DepthwiseConv2D((3,3),padding="same",use_bias=False)(x)
x=BatchNormalization()(x)
x=Conv2D(channels,(1,1),padding="same",use_bias=False)(x)
x=BatchNormalization()(x)
return x
def res_xception_block(x,channels):
res=x
x=xception_block(x,channels)
x=add([x,res])
return x
def aspp(x,input_shape,out_stride):
b0=Conv2D(256,(1,1),padding="same",use_bias=False)(x)
b0=BatchNormalization()(b0)
b0=Activation("relu")(b0)
b1=DepthwiseConv2D((3,3),dilation_rate=(6,6),padding="same",use_bias=False)(x)
b1=BatchNormalization()(b1)
b1=Activation("relu")(b1)
b1=Conv2D(256,(1,1),padding="same",use_bias=False)(b1)
b1=BatchNormalization()(b1)
b1=Activation("relu")(b1)
b2=DepthwiseConv2D((3,3),dilation_rate=(12,12),padding="same",use_bias=False)(x)
b2=BatchNormalization()(b2)
b2=Activation("relu")(b2)
b2=Conv2D(256,(1,1),padding="same",use_bias=False)(b2)
b2=BatchNormalization()(b2)
b2=Activation("relu")(b2)
b3=DepthwiseConv2D((3,3),dilation_rate=(12,12),padding="same",use_bias=False)(x)
b3=BatchNormalization()(b3)
b3=Activation("relu")(b3)
b3=Conv2D(256,(1,1),padding="same",use_bias=False)(b3)
b3=BatchNormalization()(b3)
b3=Activation("relu")(b3)
out_shape=int(input_shape[0]/out_stride)
b4=AveragePooling2D(pool_size=(out_shape,out_shape))(x)
b4=Conv2D(256,(1,1),padding="same",use_bias=False)(b4)
b4=BatchNormalization()(b4)
b4=Activation("relu")(b4)
b4=BilinearUpsampling((out_shape,out_shape))(b4)
x=Concatenate()([b4,b0,b1,b2,b3])
return x
def deeplabv3_plus(input_shape=(512,512,3),out_stride=16,num_classes=21):
img_input=Input(shape=input_shape)
x=Conv2D(32,(3,3),strides=(2,2),padding="same",use_bias=False)(img_input)
x=BatchNormalization()(x)
x=Activation("relu")(x)
x=Conv2D(64,(3,3),padding="same",use_bias=False)(x)
x=BatchNormalization()(x)
x=Activation("relu")(x)
x=res_xception_downsample_block(x,128)
res=Conv2D(256,(1,1),strides=(2,2),padding="same",use_bias=False)(x)
res=BatchNormalization()(res)
x=Activation("relu")(x)
x=DepthwiseConv2D((3,3),padding="same",use_bias=False)(x)
x=BatchNormalization()(x)
x=Conv2D(256,(1,1),padding="same",use_bias=False)(x)
x=BatchNormalization()(x)
x=Activation("relu")(x)
x=DepthwiseConv2D((3,3),padding="same",use_bias=False)(x)
x=BatchNormalization()(x)
x=Conv2D(256,(1,1),padding="same",use_bias=False)(x)
skip=BatchNormalization()(x)
x=Activation("relu")(skip)
x=DepthwiseConv2D((3,3),strides=(2,2),padding="same",use_bias=False)(x)
x=BatchNormalization()(x)
x=Conv2D(256,(1,1),padding="same",use_bias=False)(x)
x=BatchNormalization()(x)
x=add([x,res])
x=xception_downsample_block(x,728,top_relu=True)
for i in range(16):
x=res_xception_block(x,728)
res=Conv2D(1024,(1,1),padding="same",use_bias=False)(x)
res=BatchNormalization()(res)
x=Activation("relu")(x)
x=DepthwiseConv2D((3,3),padding="same",use_bias=False)(x)
x=BatchNormalization()(x)
x=Conv2D(728,(1,1),padding="same",use_bias=False)(x)
x=BatchNormalization()(x)
x=Activation("relu")(x)
x=DepthwiseConv2D((3,3),padding="same",use_bias=False)(x)
x=BatchNormalization()(x)
x=Conv2D(1024,(1,1),padding="same",use_bias=False)(x)
x=BatchNormalization()(x)
x=Activation("relu")(x)
x=DepthwiseConv2D((3,3),padding="same",use_bias=False)(x)
x=BatchNormalization()(x)
x=Conv2D(1024,(1,1),padding="same",use_bias=False)(x)
x=BatchNormalization()(x)
x=add([x,res])
x=DepthwiseConv2D((3,3),padding="same",use_bias=False)(x)
x=BatchNormalization()(x)
x=Conv2D(1536,(1,1),padding="same",use_bias=False)(x)
x=BatchNormalization()(x)
x=Activation("relu")(x)
x=DepthwiseConv2D((3,3),padding="same",use_bias=False)(x)
x=BatchNormalization()(x)
x=Conv2D(1536,(1,1),padding="same",use_bias=False)(x)
x=BatchNormalization()(x)
x=Activation("relu")(x)
x=DepthwiseConv2D((3,3),padding="same",use_bias=False)(x)
x=BatchNormalization()(x)
x=Conv2D(2048,(1,1),padding="same",use_bias=False)(x)
x=BatchNormalization()(x)
x=Activation("relu")(x)
#aspp
x=aspp(x,input_shape,out_stride)
x=Conv2D(256,(1,1),padding="same",use_bias=False)(x)
x=BatchNormalization()(x)
x=Activation("relu")(x)
x=Dropout(0.9)(x)
##decoder
x=BilinearUpsampling((4,4))(x)
dec_skip=Conv2D(48,(1,1),padding="same",use_bias=False)(skip)
dec_skip=BatchNormalization()(dec_skip)
dec_skip=Activation("relu")(dec_skip)
x=Concatenate()([x,dec_skip])
x=DepthwiseConv2D((3,3),padding="same",use_bias=False)(x)
x=BatchNormalization()(x)
x=Activation("relu")(x)
x=Conv2D(256,(1,1),padding="same",use_bias=False)(x)
x=BatchNormalization()(x)
x=Activation("relu")(x)
x=DepthwiseConv2D((3,3),padding="same",use_bias=False)(x)
x=BatchNormalization()(x)
x=Activation("relu")(x)
x=Conv2D(256,(1,1),padding="same",use_bias=False)(x)
x=BatchNormalization()(x)
x=Activation("relu")(x)
x=Conv2D(num_classes,(1,1),padding="same")(x)
x=BilinearUpsampling((4,4))(x)
model=Model(img_input,x)
return model
if __name__=="__main__":
model=deeplabv3_plus(input_shape=(128,128,3),out_stride=16,num_classes=6)
model.summary()
```
#### File: TF.Keras-Commonly-used-models/常用分类模型/DenseNet.py
```python
from tensorflow.keras.layers import Input, Dense, Dropout, Activation, Concatenate, BatchNormalization
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Conv2D, GlobalAveragePooling2D, AveragePooling2D
from tensorflow.keras.regularizers import l2
def DenseNet(input_shape=None, dense_blocks=3, dense_layers=-1, growth_rate=12, nb_classes=None, dropout_rate=None,
bottleneck=False, compression=1.0, weight_decay=1e-4, depth=40):
"""
Creating a DenseNet
Arguments:
input_shape : shape of the input images. E.g. (28,28,1) for MNIST
dense_blocks : amount of dense blocks that will be created (default: 3)
dense_layers : number of layers in each dense block. You can also use a list for numbers of layers [2,4,3]
or define only 2 to add 2 layers at all dense blocks. -1 means that dense_layers will be calculated
by the given depth (default: -1)
growth_rate : number of filters to add per dense block (default: 12)
nb_classes : number of classes
dropout_rate : defines the dropout rate that is accomplished after each conv layer (except the first one).
In the paper the authors recommend a dropout of 0.2 (default: None)
bottleneck : (True / False) if true it will be added in convolution block (default: False)
compression : reduce the number of feature-maps at transition layer. In the paper the authors recomment a compression
of 0.5 (default: 1.0 - will have no compression effect)
weight_decay : weight decay of L2 regularization on weights (default: 1e-4)
depth : number or layers (default: 40)
Returns:
Model : A Keras model instance
"""
if nb_classes==None:
raise Exception('Please define number of classes (e.g. num_classes=10). This is required for final softmax.')
if compression <=0.0 or compression > 1.0:
raise Exception('Compression have to be a value between 0.0 and 1.0. If you set compression to 1.0 it will be turn off.')
if type(dense_layers) is list:
if len(dense_layers) != dense_blocks:
raise AssertionError('Number of dense blocks have to be same length to specified layers')
elif dense_layers == -1:
if bottleneck:
dense_layers = (depth - (dense_blocks + 1))/dense_blocks // 2
else:
dense_layers = (depth - (dense_blocks + 1))//dense_blocks
dense_layers = [int(dense_layers) for _ in range(dense_blocks)]
else:
dense_layers = [int(dense_layers) for _ in range(dense_blocks)]
img_input = Input(shape=input_shape)
nb_channels = growth_rate * 2
print('Creating DenseNet')
print('#############################################')
print('Dense blocks: %s' % dense_blocks)
print('Layers per dense block: %s' % dense_layers)
print('#############################################')
# Initial convolution layer
x = Conv2D(nb_channels, (3,3), padding='same',strides=(1,1),
use_bias=False, kernel_regularizer=l2(weight_decay))(img_input)
# Building dense blocks
for block in range(dense_blocks):
# Add dense block
x, nb_channels = dense_block(x, dense_layers[block], nb_channels, growth_rate, dropout_rate, bottleneck, weight_decay)
if block < dense_blocks - 1: # if it's not the last dense block
# Add transition_block
x = transition_layer(x, nb_channels, dropout_rate, compression, weight_decay)
nb_channels = int(nb_channels * compression)
x = BatchNormalization(gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay))(x)
x = Activation('relu')(x)
x = GlobalAveragePooling2D()(x)
x = Dense(nb_classes, activation='softmax', kernel_regularizer=l2(weight_decay), bias_regularizer=l2(weight_decay))(x)
model_name = None
if growth_rate >= 36:
model_name = 'widedense'
else:
model_name = 'dense'
if bottleneck:
model_name = model_name + 'b'
if compression < 1.0:
model_name = model_name + 'c'
return Model(img_input, x, name=model_name), model_name
def dense_block(x, nb_layers, nb_channels, growth_rate, dropout_rate=None, bottleneck=False, weight_decay=1e-4):
"""
Creates a dense block and concatenates inputs
"""
x_list = [x]
for i in range(nb_layers):
cb = convolution_block(x, growth_rate, dropout_rate, bottleneck, weight_decay)
x_list.append(cb)
x = Concatenate(axis=-1)(x_list)
nb_channels += growth_rate
return x, nb_channels
def convolution_block(x, nb_channels, dropout_rate=None, bottleneck=False, weight_decay=1e-4):
"""
Creates a convolution block consisting of BN-ReLU-Conv.
Optional: bottleneck, dropout
"""
# Bottleneck
if bottleneck:
bottleneckWidth = 4
x = BatchNormalization(gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay))(x)
x = Activation('relu')(x)
x = Conv2D(nb_channels * bottleneckWidth, (1, 1), use_bias=False, kernel_regularizer=l2(weight_decay))(x)
# Dropout
if dropout_rate:
x = Dropout(dropout_rate)(x)
# Standard (BN-ReLU-Conv)
x = BatchNormalization(gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay))(x)
x = Activation('relu')(x)
x = Conv2D(nb_channels, (3, 3), padding='same', use_bias=False, kernel_regularizer=l2(weight_decay))(x)
# Dropout
if dropout_rate:
x = Dropout(dropout_rate)(x)
return x
def transition_layer(x, nb_channels, dropout_rate=None, compression=1.0, weight_decay=1e-4):
"""
Creates a transition layer between dense blocks as transition, which do convolution and pooling.
Works as downsampling.
"""
x = BatchNormalization(gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay))(x)
x = Activation('relu')(x)
x = Conv2D(int(nb_channels*compression), (1, 1), padding='same',
use_bias=False, kernel_regularizer=l2(weight_decay))(x)
# Adding dropout
if dropout_rate:
x = Dropout(dropout_rate)(x)
x = AveragePooling2D((2, 2), strides=(2, 2))(x)
return x
model,model_name = DenseNet(input_shape=(512,512,3), dense_blocks=3, dense_layers=-1, growth_rate=12, nb_classes=25, dropout_rate=0.1,
bottleneck=False, compression=1.0, weight_decay=1e-4, depth=40)
model.summary()
```
#### File: TF.Keras-Commonly-used-models/常用分类模型/GoogleNet.py
```python
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input,Dense,Dropout,BatchNormalization,Conv2D,MaxPooling2D,AveragePooling2D,concatenate
from tensorflow.keras.layers import Conv2D,MaxPooling2D,AveragePooling2D
import numpy as np
seed = 7
np.random.seed(seed)
def Conv2d_BN(x, nb_filter,kernel_size, padding='same',strides=(1,1),name=None):
if name is not None:
bn_name = name + '_bn'
conv_name = name + '_conv'
else:
bn_name = None
conv_name = None
x = Conv2D(nb_filter,kernel_size,padding=padding,strides=strides,activation='relu',name=conv_name)(x)
x = BatchNormalization(axis=3,name=bn_name)(x)
return x
def Inception(x,nb_filter):
branch1x1 = Conv2d_BN(x,nb_filter,(1,1), padding='same',strides=(1,1),name=None)
branch3x3 = Conv2d_BN(x,nb_filter,(1,1), padding='same',strides=(1,1),name=None)
branch3x3 = Conv2d_BN(branch3x3,nb_filter,(3,3), padding='same',strides=(1,1),name=None)
branch5x5 = Conv2d_BN(x,nb_filter,(1,1), padding='same',strides=(1,1),name=None)
branch5x5 = Conv2d_BN(branch5x5,nb_filter,(1,1), padding='same',strides=(1,1),name=None)
branchpool = MaxPooling2D(pool_size=(3,3),strides=(1,1),padding='same')(x)
branchpool = Conv2d_BN(branchpool,nb_filter,(1,1),padding='same',strides=(1,1),name=None)
x = concatenate([branch1x1,branch3x3,branch5x5,branchpool],axis=3)
return x
inpt = Input(shape=(224,224,3))
#padding = 'same',填充为(步长-1)/2,还可以用ZeroPadding2D((3,3))
x = Conv2d_BN(inpt,64,(7,7),strides=(2,2),padding='same')
x = MaxPooling2D(pool_size=(3,3),strides=(2,2),padding='same')(x)
x = Conv2d_BN(x,192,(3,3),strides=(1,1),padding='same')
x = MaxPooling2D(pool_size=(3,3),strides=(2,2),padding='same')(x)
x = Inception(x,64)#256
x = Inception(x,120)#480
x = MaxPooling2D(pool_size=(3,3),strides=(2,2),padding='same')(x)
x = Inception(x,128)#512
x = Inception(x,128)
x = Inception(x,128)
x = Inception(x,132)#528
x = Inception(x,208)#832
x = MaxPooling2D(pool_size=(3,3),strides=(2,2),padding='same')(x)
x = Inception(x,208)
x = Inception(x,256)#1024
x = AveragePooling2D(pool_size=(7,7),strides=(7,7),padding='same')(x)
x = Dropout(0.4)(x)
x = Dense(1000,activation='relu')(x)
x = Dense(1000,activation='softmax')(x)
model = Model(inpt,x,name='inception')
model.compile(loss='categorical_crossentropy',optimizer='sgd',metrics=['accuracy'])
model.summary()
``` |
{
"source": "1047984022/lynkcoHelper",
"score": 2
} |
#### File: lynkcoHelper/LynkCoHelper/lynco_wrok.py
```python
import threading
import time
import base64
from lynkco_app_request import lynkco_app_request
from com.uestcit.api.gateway.sdk.auth.aes import aes as AES
class lynco_wrok(threading.Thread):
"""新开线程处理任务"""
def __init__(self, config, account):
# 初始化线程
threading.Thread.__init__(self)
# 缓存配置信息
self.config = config
# 缓存账户信息
self.account = account
# 缓存APPKEY(因为存储的是base64后的值,所以需要base64解码一次)
self.app_key = base64.b64decode(self.config['api_geteway']['app_key']).decode('utf-8')
# 缓存APPSECRET(因为存储的是base64后的值,所以需要base64解码一次)
self.app_secret = base64.b64decode(self.config['api_geteway']['app_secret']).decode('utf-8')
# 缓存AESKEY(因为存储的是两次base64后的值,所以需要base64解码两次)
self.aes_key = base64.b64decode(base64.b64decode(self.config['aes_key']).decode('utf-8')).decode('utf-8')
self.AES = AES(self.aes_key)
self.lynkco_app_request = lynkco_app_request(self.app_key, self.app_secret)
def run(self):
"""线程开始的方法"""
print ("开始执行用户:" + self.account['username'] + "的任务 " + time.strftime('%Y-%m-%d %H:%M:%S'))
self.app_action()
print ("用户任务执行完成:" + self.account['username'] + "的任务 " + time.strftime('%Y-%m-%d %H:%M:%S'))
def app_action(self):
"""App端操作流程"""
# 先进行登录(不需要缓存RefreshToken进行刷新操作,每次执行都是用登录接口皆可,后续可以根据实际情况进行缓存优化)
username = self.account['username']
password = self.AES.encrypt(self.account['password'])
response = self.lynkco_app_request.login(username, password)
if response['code'] != 'success':
print("APP端操作用户:" + self.account['username'] + "失败,第1次登录失败:"+ response['message'] + time.strftime('%Y-%m-%d %H:%M:%S'))
loginSuccess = False
# 防止验证码识别失败,重试三次,每次间隔1秒
for i in range(3):
time.sleep(1)
response = self.lynkco_app_request.login(username, password)
if response['code'] != 'success':
print("APP端操作用户:" + self.account['username'] + "失败,第"+str(i+2)+"次登录失败:"+ response['message'] + time.strftime('%Y-%m-%d %H:%M:%S'))
else:
loginSuccess = True
break
if not loginSuccess:
return False
self.userinfo = response['data']
# 先获取用户信息,打印用户余额
response = self.lynkco_app_request.member_info(self.userinfo['centerTokenDto']['token'], self.userinfo['centerUserInfoDto']['id'])
if response['code'] != 'success':
print("APP端操作前用户:" + self.account['username'] + "获取用户信息失败 " + time.strftime('%Y-%m-%d %H:%M:%S'))
else:
self.member_info = response['data']
print("APP端操作前用户:" + self.account['username'] + "当前Co币余额为:" + self.member_info['point'] + " " + time.strftime('%Y-%m-%d %H:%M:%S'))
# 执行3次分享操作
for i in range(3):
response = self.lynkco_app_request.get_co_by_share(self.userinfo['centerTokenDto']['token'], self.userinfo['centerUserInfoDto']['id'])
# 执行分享后稍等1秒再执行下一次
time.sleep(1)
# 重新获取用户信息,打印用户余额
response = self.lynkco_app_request.member_info(self.userinfo['centerTokenDto']['token'], self.userinfo['centerUserInfoDto']['id'])
if response['code'] != 'success':
print("APP端操作后用户:" + self.account['username'] + "获取用户信息失败 " + time.strftime('%Y-%m-%d %H:%M:%S'))
else:
self.member_info = response['data']
print("APP端操作后用户:" + self.account['username'] + "当前Co币余额为:" + self.member_info['point'] + " " + time.strftime('%Y-%m-%d %H:%M:%S'))
print("APP端操作用户:" + self.account['username'] + "完成" + time.strftime('%Y-%m-%d %H:%M:%S'))
return True
``` |
{
"source": "1048693172/Bert_Classification_multitask_labels",
"score": 3
} |
#### File: 1048693172/Bert_Classification_multitask_labels/process.py
```python
import os
import random
import re
import jieba
high=[]
low=[]
#根据案由数量分成低频案由和高频案由
def binary(path,min,max,boundary):
for file in os.listdir('/home/nathan/Desktop/preprocess_data/high'):
if os.path.isfile(os.path.join('/home/nathan/Desktop/preprocess_data/high', file)):
os.remove(os.path.join('/home/nathan/Desktop/preprocess_data/high', file))
for file in os.listdir('/home/nathan/Desktop/preprocess_data/low'):
if os.path.isfile(os.path.join('/home/nathan/Desktop/preprocess_data/low', file)):
os.remove(os.path.join('/home/nathan/Desktop/preprocess_data/low', file))
files = os.listdir(path)
pat = re.compile(r'<a target.*?>|</a>|[a-zA-z\.\-]+')
for file in files:
with open(os.path.join(path, file), 'r') as f:
lines=f.readlines()
ll=[]
for line in lines:
li = line.strip().split('\t')
if len(li)<=3:
continue
li[3] = pat.sub('', li[3])
#设置原告诉称的长度限制
if li[2]!='无':
if min<len(li[3]) < max:
#添加案由,数据增强
l = []
for i, w in enumerate(li[3]):
if w == ',' or w == '。':
l.append(i)
if len(l)==0:
continue
index = random.choice(l)
li[3]=li[3][:index]+li[2]+li[3][index:]
ll.append(li[3]+'\t'+li[2]+'\n')
if len(ll)>=boundary:
with open('/home/nathan/Desktop/preprocess_data/high/%s' %file , 'w') as f:
f.write(''.join(ll))
print('write done!---%s' % file)
elif len(ll)>0:
with open('/home/nathan/Desktop/preprocess_data/low/%s' %file , 'w') as f:
f.write(''.join(ll))
print('write done!---%s' % file)
#4.1属于某类案由的案例数超过300,随机选300
def get_high(path,num):
for file in os.listdir('/home/nathan/Desktop/preprocess_data/high_sub'):
if os.path.isfile(os.path.join('/home/nathan/Desktop/preprocess_data/high_sub', file)):
os.remove(os.path.join('/home/nathan/Desktop/preprocess_data/high_sub', file))
'''
files = os.listdir(path)
for file in files:
str = ''
#统计某个案由的关键词的频率
dic = {}
with open(os.path.join(path, file), 'r') as f:
lines = f.readlines()
for line in lines:
li = line.strip().split('\t')
label_list = li[1].split()
for l in label_list:
dic[l] = dic.get(l, 0) + 1
#根据关键词频率为每个样本打分,选择分数排名前num个样本
t_list = []
with open(os.path.join(path, file), 'r') as f:
lines = f.readlines()
for line in lines:
li = line.strip().split('\t')
label_list = li[1].split()
score = 0
for label in label_list:
score += dic[label]
t_list.append([line, score])
t_list = sorted(t_list, key=lambda x: x[1], reverse=True)
for i in range(num):
t = t_list[i]
str += t[0]
with open('/home/nathan/PycharmProjects/MongoWusong/high_sub/%s' % file, 'w') as f:
f.write(str)
print('write done!---%s' % file)
'''
#随机选300
files=os.listdir(path)
for file in files:
str = ''
with open(os.path.join(path, file), 'r') as f:
lines=f.readlines()
random.shuffle(lines)
ll=lines[:num]
str=''.join(ll)
with open('/home/nathan/Desktop/preprocess_data/high_sub/%s'%file,'w') as f:
f.write(str)
print('write done!---%s'%file)
#4.2属于某类案由的案例数低于300,案例数补足至30
def get_low(path,l_num):
for file in os.listdir('/home/nathan/Desktop/preprocess_data/low_sub'):
if os.path.isfile(os.path.join('/home/nathan/Desktop/preprocess_data/low_sub', file)):
os.remove(os.path.join('/home/nathan/Desktop/preprocess_data/low_sub', file))
files=os.listdir(path)
for file in files:
str = ''
with open(os.path.join(path, file), 'r') as f:
lines=f.readlines()
random.shuffle(lines)
if len(lines)>=l_num:
for line in lines:
li = line.strip().split('\t')
text = li[0]
label = li[1]
str += text + '\t' + label + '\n'
else:
num=random.randint(l_num,l_num+10)
for line in lines:
li = line.strip().split('\t')
text = li[0]
label = li[1]
str += text + '\t' + label + '\n'
for i in range(num-len(lines)):
line = random.choice(lines)
li = line.strip().split('\t')
text = li[0]
text_list = re.split('[,。;:]', text)[:-1]
random.shuffle(text_list)
text=','.join(text_list)
label = li[1]
str += text + '\t' + label + '\n'
with open('/home/nathan/Desktop/preprocess_data/low_sub/%s'%file,'w') as f:
f.write(str)
print('write done!---%s'%file)
#5.预生成数据
def getData(highPath,lowPath):
files = os.listdir(highPath)
total_line=[]
for file in files:
with open(os.path.join(highPath, file), 'r') as f:
lines = f.readlines()
total_line.extend(lines)
files = os.listdir(lowPath)
for file in files:
with open(os.path.join(lowPath, file), 'r') as f:
lines = f.readlines()
total_line.extend(lines)
random.shuffle(total_line)
str=''.join(total_line)
with open('/home/nathan/Desktop/preprocess_data/data.txt','w') as f:
f.write(str)
#6.分词
def cut_data():
stopwords = [line.strip() for line in open('/home/nathan/PycharmProjects/MongoWusong/stopwords.txt', 'r', encoding='utf-8').readlines()]
i=0
str = ''
with open('/home/nathan/Desktop/preprocess_data/data.txt','r') as f:
fullstr=f.read()
pat = re.compile(r'(<a target.*?>|</a>|[0-9a-zA-z\.\-])+')
fullstr = pat.sub('', fullstr)
lines=fullstr.split('\n')
random.shuffle(lines)
for line in lines:
li=line.strip().split('\t')
if len(li)!=2:
print(i)
continue
text=li[0]
label=li[1]
text_li=list(jieba.cut(text))
for word in text_li:
if word not in stopwords:
if not word.isdigit():
if len(word)>1:
str += word
str += " "
i+=1
str=str.strip()
str+='\t'+label+'\n'
if i%3000==0:
with open('/home/nathan/Desktop/preprocess_data/data_cut.txt','a+') as f:
f.write(str)
str=''
print(i)
with open('/home/nathan/Desktop/preprocess_data/data_cut.txt', 'a+') as f:
f.write(str)
str = ''
print(i)
def clean_ch(filename):
with open(filename,'r') as f:
str=f.read()
pat = re.compile(r'[»ɑəΠΩАа‖‧Ⅻⅴ←↑↘↙∫∷≠⌒⒂⒇⒗┌┐┕┗┛┦┧┨┫┬┱┲╮╰╳▲◇〃となふ゜ァギネメヾㄎㄑㄣ㈨䴘䴙﹫hj�ǎɡГО┅┤△さ・‱#³úΔΧγ€⒁々㏎〞¨àèêíòμびㄐ£öāΓΛΡФХ」っ﹔﹕^Äнⅵ∙﹨gk∪//=×+%*…〔〕﹝﹞@‘’&ÍΗΤλг〝﹛﹜lºuyИ∮⒄ūˉˊˋ∠⒛┣┥ㄏㄔ¤´áìóùǚⅷ∽━■▪ぃアィイウェエガクグケコサザシジスタチッテトナヒプミムュルレロンヴ・。、]+')
str = pat.sub('', str)
with open(filename, 'w') as f:
f.write(str)
#7.生成训练和测试数据,验证数据
def getTrainTest():
with open('/home/nathan/Desktop/preprocess_data/data_cut.txt','r') as f:
lines=f.readlines()
random.shuffle(lines)
str_train=''
for i in range(int(len(lines)*0.7)):
str_train+=lines[i]
with open('/home/nathan/PycharmProjects/BERT_reason/data/train.txt', 'w') as f:
f.write(str_train)
str_test=''
for i in range(int(len(lines)*0.7),int(len(lines)*0.85)):
str_test+=lines[i]
with open('/home/nathan/PycharmProjects/BERT_reason/data/test.txt', 'w') as f:
f.write(str_test)
str_val = ''
for i in range(int(len(lines) * 0.85), int(len(lines))):
str_val += lines[i]
with open('/home/nathan/PycharmProjects/BERT_reason/data/dev.txt', 'w') as f:
f.write(str_val)
#8.原告诉称的最大长度
def maxLength():
with open('/home/nathan/PycharmProjects/BERT_reason/data/train.txt','r') as f:
lines=f.readlines()
max=0
for line in lines:
li=line.strip().split('\t')
l=len(li[0].split())
if l>max:
max=l
return max
#9.生成训练集的字典
def dict(filename):
dic={}
with open(filename,'r') as f:
lines=f.readlines()
for line in lines:
li=line.strip().split('\t')
text_li=li[0].split()
for i in text_li:
dic[i]=dic.get(i,0)+1
with open('/home/nathan/Desktop/preprocess_data/dic_cut_data.txt', 'w') as f:
li=list(dic.keys())
li.sort()
str='\n'.join(li)
f.write(str)
def new_dict(filename):
dic = {}
with open(filename, 'r') as f:
lines = f.readlines()
for line in lines:
li = line.strip().split('\t')
text_li = li[0].split()
for i in text_li:
dic[i] = dic.get(i, 0) + 1
str1=''
ll = sorted(dic.items(), key=lambda x:x[1], reverse=False)
for i in ll:
str1 += i[0] + '\t' + str(i[1]) + '\n'
with open('/home/nathan/Desktop/preprocess_data/dic_sort.txt', 'w') as f:
f.write(str1)
def sort(file):
str=''
with open(file,'r') as f:
lines=f.readlines()
ll=[]
for line in lines:
ll.append(tuple(line.strip().split('\t')))
ll=sorted(ll, key=lambda x: len(x[0]), reverse=False)
for i in ll:
str+=i[0]+'\t'+i[1]+'\n'
with open('val1.txt', 'w') as f:
f.write(str)
def count_key(path):
files=os.listdir(path)
for file in files:
dic={}
with open(os.path.join(path,file),'r') as f:
lines=f.readlines()
for line in lines:
li=line.strip().split('\t')
label_list=li[1].split()
for l in label_list:
dic[l]=dic.get(l,0)+1
str1=''
ll = sorted(dic.items(), key=lambda x: x[1], reverse=True)
for i in ll:
str1 += i[0] + '\t' + str(i[1]) + '\n'
with open('key_original/%s'%file,'w') as f:
f.write(str1)
def add_key(path):
files = os.listdir(path)
for file in files:
print('%s add key...'%file)
ss=set()
str=''
with open(os.path.join(path, file), 'r') as f:
lines = f.readlines()
for line in lines:
li = line.strip().split('\t')
label_list = li[1].split()
ss=ss|set(label_list)
for line in lines:
li = line.strip().split('\t')
text=li[0]
add_key=li[1].split()
for s in ss:
if re.search(s,text) and not re.search(s,li[1]):
add_key.append(s)
print('添加关键词%s'%s)
add_key.sort()
str += text+'\t'+' '.join(add_key)+'\n'
with open('add_key_original/%s' % file, 'w') as f:
f.write(str)
def add_all_key(path):
files = os.listdir(path)
ss=set()
key_files = os.listdir('/home/nathan/PycharmProjects/MongoWusong/key_original')
for key_file in key_files:
with open('/home/nathan/PycharmProjects/MongoWusong/key_original/'+key_file,'r') as f:
lines=f.readlines()
for line in lines:
li=line.strip().split('\t')
ss.add(li[0])
for file in files:
print('%s add key...' % file)
str = ''
with open(os.path.join(path, file), 'r') as f:
lines = f.readlines()
i=1
for line in lines:
li = line.strip().split('\t')
text = li[0]
add_key = li[1].split()
for s in ss:
pat=re.compile(s)
if pat.search(text):
if not pat.search(li[1]):
add_key.append(s)
print('添加关键词%s' % s)
add_key.sort()
str += text + '\t' + ' '.join(add_key) + '\n'
i+=1
if i%100==0:
with open('add_key_all_original/%s' % file, 'a+') as f:
f.write(str)
str=''
with open('add_key_all_original/%s' % file, 'a+') as f:
f.write(str)
if __name__=='__main__':
# count_all('/home/nathan/PycharmProjects/MongoWusong/data1/2011') #预处理
# classification('/home/nathan/PycharmProjects/MongoWusong/data/2011') # 根据案由分类
min=120 #句子长度最小值
max=1500 #句子长度最大值
boundary=1000 #高低频案由分界点
binary('/home/nathan/PycharmProjects/clean_test',min,max,boundary) # 1.根据案由数量分成低频案由和高频案由
num=1000 #从高频案由中随机取num个样本
get_high('/home/nathan/Desktop/preprocess_data/high',num) # 2.1属于某类案由的案例数超过300,随机选300
l_num=100 #低频案由低于l_num 的补足至l_num
get_low('/home/nathan/Desktop/preprocess_data/low',l_num) # 2.2属于某类案由的案例数低于300,案例数补足至30
#
highPath='/home/nathan/Desktop/preprocess_data/high_sub'
lowPath='/home/nathan/Desktop/preprocess_data/low_sub'
getData(highPath,lowPath) # 3.预生成数据
cut_data() # 4.分词----请先删除data_clean.txt
clean_ch('/home/nathan/Desktop/preprocess_data/data_cut.txt') # 7.生成训练集的字典
dict('/home/nathan/Desktop/preprocess_data/data_cut.txt')
getTrainTest() # 5.生成训练和测试数据,验证数据
new_dict('/home/nathan/PycharmProjects/BERT_reason/data/train.txt')
#
l=maxLength() # 6.原告诉称的最大长度
print('原告诉称的最大长度是%s'%l)
``` |
{
"source": "1048727525/FIE",
"score": 2
} |
#### File: 1048727525/FIE/main.py
```python
from solver import solver
import argparse
from utils import *
"""parsing and configuration"""
def parse_args():
desc = "Pytorch implementation of Face Illumination Enhancement Model"
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('--phase', type=str, default='train', help='[train / test]')
parser.add_argument('--dataset', type=str, default='YOUR_DATASET_NAME', help='dataset_name')
parser.add_argument('--iteration', type=int, default=300000, help='The number of training iterations')
parser.add_argument('--batch_size', type=int, default=1, help='The size of batch size')
parser.add_argument('--print_freq', type=int, default=1000, help='The number of image print freq')
parser.add_argument('--save_freq', type=int, default=10000, help='The number of model save freq')
parser.add_argument('--decay_flag', type=str2bool, default=False, help='The decay_flag')
parser.add_argument('--lr', type=float, default=0.0001, help='The learning rate')
parser.add_argument('--weight_decay', type=float, default=0.0001, help='The weight decay')
# Hyperparameter
parser.add_argument('--adv_weight', type=int, default=1, help='Weight for GAN')
parser.add_argument('--identity_weight', type=int, default=10, help='Weight for Identity')
parser.add_argument('--perceptual_weight', type=int, default=100, help='Weight for Perceptual')
parser.add_argument('--histogram_weight', type=int, default=100, help='Weight for Histogram')
parser.add_argument('--pixel_weight', type=float, default=0.01, help='Weight for Pixel')
parser.add_argument('--pixel_loss_interval', type=int, default=5, help='Interval for Pixel Loss Working')
parser.add_argument('--ch', type=int, default=64, help='base channel number per layer')
parser.add_argument('--n_res', type=int, default=4, help='The number of resblock')
parser.add_argument('--n_dis', type=int, default=6, help='The number of discriminator layer')
parser.add_argument('--img_size', type=int, default=112, help='The size of image')
parser.add_argument('--img_ch', type=int, default=3, help='The size of image channel')
parser.add_argument('--result_dir', type=str, default='YOUR_RESULT_NAME', help='Directory name to save the results')
parser.add_argument('--device', default = torch.device('cuda' if torch.cuda.is_available() else 'cpu'))
parser.add_argument('--resume', type=str2bool, default=False)
parser.add_argument('--expert_net_choice', type=str, default='senet50', choices=['senet50', 'moblieface'])
return check_args(parser.parse_args())
"""checking arguments"""
def check_args(args):
# --result_dir
check_folder(os.path.join("results", args.result_dir, 'model'))
check_folder(os.path.join("results", args.result_dir, 'img'))
check_folder(os.path.join("results", args.result_dir, 'test'))
# --epoch
try:
assert args.epoch >= 1
except:
print('number of epochs must be larger than or equal to one')
# --batch_size
try:
assert args.batch_size >= 1
except:
print('batch size must be larger than or equal to one')
return args
"""main"""
def main():
# parse arguments
args = parse_args()
if args is None:
exit()
# open session
gan = solver(args)
# build graph
gan.build_model()
if args.phase == 'train' :
gan.train()
print(" [*] Training finished!")
if args.phase == 'test' :
gan.test()
print(" [*] Test finished!")
if __name__ == '__main__':
main()
```
#### File: 1048727525/FIE/networks.py
```python
import torch
import torch.nn as nn
from torch.nn.parameter import Parameter
from utils import *
from model import Backbone
class ResnetGenerator(nn.Module):
def __init__(self, input_nc, output_nc, ngf=64, n_blocks=6, img_size=112):
assert(n_blocks >= 0)
super(ResnetGenerator, self).__init__()
self.input_nc = input_nc
self.output_nc = output_nc
self.ngf = ngf
self.n_blocks = n_blocks
self.img_size = img_size
DownBlock = []
DownBlock += [nn.ReflectionPad2d(3),
nn.Conv2d(input_nc, ngf, kernel_size=7, stride=1, padding=0, bias=False),
nn.InstanceNorm2d(ngf),
nn.ReLU(True)]
# Down-Sampling
n_downsampling = 2
for i in range(n_downsampling):
mult = 2**i
DownBlock += [nn.ReflectionPad2d(1),
nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=0, bias=False),
nn.InstanceNorm2d(ngf * mult * 2),
nn.ReLU(True)]
# Down-Sampling Bottleneck
mult = 2**n_downsampling
DownRes = []
for i in range(n_blocks):
DownRes += [ResnetBlock(ngf * mult, use_bias=False)]
self.MLP = nn.Sequential(nn.Linear(25, 256), nn.Linear(256, ngf * mult*2))
# Up-Sampling Bottleneck
for i in range(n_blocks):
setattr(self, 'UpRes_' + str(i+1), ResnetAdaILNBlock(ngf * mult, use_bias=False))
# Up-Sampling
UpBlock = []
for i in range(n_downsampling):
mult = 2**(n_downsampling - i)
UpBlock += [nn.Upsample(scale_factor=2, mode='nearest'),
nn.ReflectionPad2d(1),
nn.Conv2d(ngf * mult, int(ngf * mult / 2), kernel_size=3, stride=1, padding=0, bias=False),
ILN(int(ngf * mult / 2)),
nn.ReLU(True)]
UpBlock += [nn.ReflectionPad2d(3),
nn.Conv2d(ngf, output_nc, kernel_size=7, stride=1, padding=0, bias=False),
nn.Tanh()]
self.DownBlock = nn.Sequential(*DownBlock)
self.DownRes = nn.Sequential(*DownRes)
self.UpBlock = nn.Sequential(*UpBlock)
self.eliminate_black_func = eliminate_black(torch.device("cuda:0"))
def forward(self, input, s, device):
x = self.DownBlock(input)
heatmap0 = torch.sum(x, dim=1, keepdim=True)
x = self.DownRes(x)
heatmap1_0 = torch.sum(x, dim=1, keepdim=True)
heatmap1_1 = torch.sum(x, dim=1, keepdim=True)
h = self.MLP(s)
h = h.view(h.size(0), h.size(1))
gamma, beta = torch.chunk(h, chunks=2, dim=1)
for i in range(self.n_blocks):
x = getattr(self, 'UpRes_' + str(i+1))(x, gamma, beta)
heatmap2 = torch.sum(x, dim=1, keepdim=True)
out = self.UpBlock(x)
out = torch.mul(out+1, self.eliminate_black_func(input+1))-1
return out, heatmap0, heatmap1_0, heatmap1_1, heatmap2
class ResnetBlock(nn.Module):
def __init__(self, dim, use_bias):
super(ResnetBlock, self).__init__()
conv_block = []
conv_block += [nn.ReflectionPad2d(1),
nn.Conv2d(dim, dim, kernel_size=3, stride=1, padding=0, bias=use_bias),
nn.InstanceNorm2d(dim),
nn.ReLU(True)]
conv_block += [nn.ReflectionPad2d(1),
nn.Conv2d(dim, dim, kernel_size=3, stride=1, padding=0, bias=use_bias),
nn.InstanceNorm2d(dim)]
self.conv_block = nn.Sequential(*conv_block)
def forward(self, x):
out = x + self.conv_block(x)
return out
class ResnetAdaILNBlock(nn.Module):
def __init__(self, dim, use_bias):
super(ResnetAdaILNBlock, self).__init__()
self.pad1 = nn.ReflectionPad2d(1)
self.conv1 = nn.Conv2d(dim, dim, kernel_size=3, stride=1, padding=0, bias=use_bias)
self.norm1 = adaILN(dim)
self.relu1 = nn.ReLU(True)
self.pad2 = nn.ReflectionPad2d(1)
self.conv2 = nn.Conv2d(dim, dim, kernel_size=3, stride=1, padding=0, bias=use_bias)
self.norm2 = adaILN(dim)
def forward(self, x, gamma, beta):
out = self.pad1(x)
out = self.conv1(out)
out = self.norm1(out, gamma, beta)
out = self.relu1(out)
out = self.pad2(out)
out = self.conv2(out)
out = self.norm2(out, gamma, beta)
return out + x
class adaILN(nn.Module):
def __init__(self, num_features, eps=1e-5):
super(adaILN, self).__init__()
self.eps = eps
self.rho = Parameter(torch.Tensor(1, num_features, 1, 1))
self.rho.data.fill_(0.9)
def forward(self, input, gamma, beta):
in_mean, in_var = torch.mean(input, dim=[2, 3], keepdim=True), torch.var(input, dim=[2, 3], keepdim=True)
out_in = (input - in_mean) / torch.sqrt(in_var + self.eps)
ln_mean, ln_var = torch.mean(input, dim=[1, 2, 3], keepdim=True), torch.var(input, dim=[1, 2, 3], keepdim=True)
out_ln = (input - ln_mean) / torch.sqrt(ln_var + self.eps)
out = self.rho.expand(input.shape[0], -1, -1, -1) * out_in + (1-self.rho.expand(input.shape[0], -1, -1, -1)) * out_ln
out = out * gamma.unsqueeze(2).unsqueeze(3) + beta.unsqueeze(2).unsqueeze(3)
return out
class ILN(nn.Module):
def __init__(self, num_features, eps=1e-5):
super(ILN, self).__init__()
self.eps = eps
self.rho = Parameter(torch.Tensor(1, num_features, 1, 1))
self.gamma = Parameter(torch.Tensor(1, num_features, 1, 1))
self.beta = Parameter(torch.Tensor(1, num_features, 1, 1))
self.rho.data.fill_(0.0)
self.gamma.data.fill_(1.0)
self.beta.data.fill_(0.0)
def forward(self, input):
in_mean, in_var = torch.mean(input, dim=[2, 3], keepdim=True), torch.var(input, dim=[2, 3], keepdim=True)
out_in = (input - in_mean) / torch.sqrt(in_var + self.eps)
ln_mean, ln_var = torch.mean(input, dim=[1, 2, 3], keepdim=True), torch.var(input, dim=[1, 2, 3], keepdim=True)
out_ln = (input - ln_mean) / torch.sqrt(ln_var + self.eps)
out = self.rho.expand(input.shape[0], -1, -1, -1) * out_in + (1-self.rho.expand(input.shape[0], -1, -1, -1)) * out_ln
out = out * self.gamma.expand(input.shape[0], -1, -1, -1) + self.beta.expand(input.shape[0], -1, -1, -1)
return out
class Discriminator(nn.Module):
def __init__(self, input_nc, ndf=64, n_layers=5):
super(Discriminator, self).__init__()
model = [nn.ReflectionPad2d(1),
nn.utils.spectral_norm(
nn.Conv2d(input_nc, ndf, kernel_size=4, stride=2, padding=0, bias=True)),
nn.LeakyReLU(0.2, True)]
for i in range(1, n_layers - 2):
mult = 2 ** (i - 1)
model += [nn.ReflectionPad2d(1),
nn.utils.spectral_norm(
nn.Conv2d(ndf * mult, ndf * mult * 2, kernel_size=4, stride=2, padding=0, bias=True)),
nn.LeakyReLU(0.2, True)]
mult = 2 ** (n_layers - 2 - 1)
model += [nn.ReflectionPad2d(1),
nn.utils.spectral_norm(
nn.Conv2d(ndf * mult, ndf * mult * 2, kernel_size=4, stride=1, padding=0, bias=True)),
nn.LeakyReLU(0.2, True)]
# Class Activation Map
mult = 2 ** (n_layers - 2)
self.pad = nn.ReflectionPad2d(1)
self.conv = nn.utils.spectral_norm(
nn.Conv2d(ndf * mult, 1, kernel_size=4, stride=1, padding=0, bias=False))
self.model = nn.Sequential(*model)
def forward(self, input):
x = self.model(input)
heatmap = torch.sum(x, dim=1, keepdim=True)
x = self.pad(x)
out = self.conv(x)
return out, heatmap
class RhoClipper(object):
def __init__(self, min, max):
self.clip_min = min
self.clip_max = max
assert min < max
def __call__(self, module):
if hasattr(module, 'rho'):
w = module.rho.data
w = w.clamp(self.clip_min, self.clip_max)
module.rho.data = w
class se50_net(nn.Module):
def __init__(self, model_path):
super(se50_net, self).__init__()
self.model = Backbone(50, 0.5, "ir_se")
for p in self.model.parameters():
p.requires_grad = False
pre = torch.load(model_path, map_location="cpu")
self.model.load_state_dict(pre)
self.model.eval()
def get_feature(self, x):
"""
:param x: Images
:return: Embeddings of MobileFaceNets
"""
feature=self.model(x)
norm = torch.norm(feature, 2, 1, True)
feature = torch.div(feature, norm)
return feature
def get_layers(self, x, num):
return self.model.get_layers(x, num)
class Mobile_face_net(nn.Module):
def __init__(self, model_path):
super(Mobile_face_net, self).__init__()
self.model = MobileFaceNet(512)
for p in self.model.parameters():
p.requires_grad = False
pre = torch.load(model_path, map_location="cpu")
self.model.load_state_dict(pre)
self.model.eval()
def get_feature(self, x):
"""
:param x: Images
:return: Embeddings of MobileFaceNets
"""
feature=self.model(x)
norm = torch.norm(feature, 2, 1, True)
feature = torch.div(feature, norm)
return feature
```
#### File: 1048727525/FIE/solver.py
```python
import time, itertools
from dataset import ImageFolder
from torchvision import transforms
from torch.utils.data import DataLoader
from networks import *
from utils import *
from glob import glob
import random
import math
import numpy as np
from PIL import Image
import equalize_hist
class solver(object):
def __init__(self, args):
self.result_dir = args.result_dir
self.dataset = args.dataset
self.iteration = args.iteration
self.decay_flag = args.decay_flag
self.batch_size = args.batch_size
self.print_freq = args.print_freq
self.save_freq = args.save_freq
self.lr = args.lr
self.weight_decay = args.weight_decay
self.ch = args.ch
self.expert_net_choice = args.expert_net_choice
""" Weight """
self.adv_weight = args.adv_weight
self.identity_weight = args.identity_weight
self.perceptual_weight = args.perceptual_weight
self.histogram_weight = args.histogram_weight
self.pixel_weight = args.pixel_weight
self.pixel_loss_interval = args.pixel_loss_interval
""" Generator """
self.n_res = args.n_res
""" Discriminator """
self.n_dis = args.n_dis
self.img_size = args.img_size
self.img_ch = args.img_ch
self.device = args.device
self.resume = args.resume
self.train_transform = transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.Resize((self.img_size + 30, self.img_size+30)),
transforms.RandomCrop(self.img_size),
transforms.ToTensor(),
transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))
])
self.test_transform = transforms.Compose([
transforms.Resize((self.img_size, self.img_size)),
transforms.ToTensor(),
transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))
])
self.s_B_mean = [0., 0.01697547, 0.04589949, 0.06318113, 0.06832961, 0.06642341,
0.06319189, 0.06153597, 0.06103129, 0.06076431, 0.06144873, 0.06210399,
0.06227141, 0.06173987, 0.0589078, 0.05313862, 0.04477818, 0.03399517,
0.02332931, 0.01438512, 0.00798908, 0.00438292, 0.00263466, 0.00156257,
0.]
self.s_B_mean_tensor = torch.tensor(self.s_B_mean, dtype=torch.float32).repeat(1, 1)
self.s_B_mean_tensor = self.s_B_mean_tensor.to(self.device)
print()
print("##### Information #####")
print("# dataset : ", self.dataset)
print("# batch_size : ", self.batch_size)
print("# iteration per epoch : ", self.iteration)
print("# expert net : ", self.expert_net_choice)
print()
print("##### Generator #####")
print("# residual blocks : ", self.n_res)
print()
print("##### Discriminator #####")
print("# discriminator layer : ", self.n_dis)
print()
print("##### Weight #####")
print("# adv_weight : ", self.adv_weight)
print("# identity_weight : ", self.identity_weight)
print('# perceptual_weight : ', self.perceptual_weight)
print('# histogram_weight : ', self.histogram_weight)
print('# pixel_weight : ', self.pixel_weight)
print('# pixel_loss_interval : ', self.pixel_loss_interval)
def build_model(self):
if self.expert_net_choice == "senet50":
self.expert_net = se50_net("./other_models/arcface_se50/model_ir_se50.pth")
else:
self.expert_net = Mobile_face_net("./other_models/MobileFaceNet/model_mobilefacenet.pth")
self.expert_net.to(self.device)
# A:dark face B:norm face
self.trainA = ImageFolder(os.path.join('dataset', self.dataset, 'trainA'), self.train_transform)
self.trainB = ImageFolder(os.path.join('dataset', self.dataset, 'trainB'), self.train_transform)
self.testA = ImageFolder(os.path.join('dataset', self.dataset, 'testA'), self.test_transform)
self.trainA_loader = DataLoader(self.trainA, batch_size=self.batch_size, shuffle=True)
self.trainB_loader = DataLoader(self.trainB, batch_size=self.batch_size, shuffle=True)
self.testA_loader = DataLoader(self.testA, batch_size=1, shuffle=False)
self.genA2B = ResnetGenerator(input_nc=3, output_nc=3, ngf=self.ch, n_blocks=self.n_res, img_size=self.img_size).to(self.device)
self.disGA = Discriminator(input_nc=3, ndf=self.ch, n_layers=7).to(self.device)
self.disLA = Discriminator(input_nc=3, ndf=self.ch, n_layers=5).to(self.device)
""" Define Loss """
self.L1_loss = nn.L1Loss().to(self.device)
self.MSE_loss = nn.MSELoss().to(self.device)
self.BCE_loss = nn.BCEWithLogitsLoss().to(self.device)
""" Trainer """
self.G_optim = torch.optim.Adam(itertools.chain(self.genA2B.parameters()), lr=self.lr, betas=(0.5, 0.999), weight_decay=self.weight_decay)
self.D_optim = torch.optim.Adam(itertools.chain(self.disGA.parameters(), self.disLA.parameters()), lr=self.lr, betas=(0.5, 0.999), weight_decay=self.weight_decay)
""" Define Rho clipper to constraint the value of rho in AdaILN and ILN"""
self.Rho_clipper = RhoClipper(0, 1)
def tranfer_to_histogram(self, d):
value_list = []
for v in d:
_list = get_lum_distribution(cv2.cvtColor(tensor2im(v.unsqueeze(0)), cv2.COLOR_BGR2RGB))
value_list.append(_list)
value_tensor = torch.tensor(value_list, dtype=torch.float32)
return value_tensor.to(self.device)
def train(self):
self.genA2B.train(), self.disGA.train(), self.disLA.train()
start_iter = 1
if self.resume:
model_list = glob(os.path.join("results", self.result_dir, 'model', '*.pt'))
if not len(model_list) == 0:
model_list.sort()
start_iter = int(model_list[-1].split('_')[-1].split('.')[0])
self.load(os.path.join("results", self.result_dir, 'model'), start_iter)
print(" [*] Load SUCCESS")
if self.decay_flag and start_iter > (self.iteration // 2):
self.G_optim.param_groups[0]['lr'] -= (self.lr / (self.iteration // 2)) * (start_iter - self.iteration // 2)
self.D_optim.param_groups[0]['lr'] -= (self.lr / (self.iteration // 2)) * (start_iter - self.iteration // 2)
print("training start!")
start_time = time.time()
for step in range(start_iter, self.iteration + 1):
if self.decay_flag and step > (self.iteration // 2):
self.G_optim.param_groups[0]['lr'] -= (self.lr / (self.iteration // 2))
self.D_optim.param_groups[0]['lr'] -= (self.lr / (self.iteration // 2))
try:
real_A, _ = trainA_iter.next()
except:
trainA_iter = iter(self.trainA_loader)
real_A, _ = trainA_iter.next()
try:
real_B, _ = trainB_iter.next()
except:
trainB_iter = iter(self.trainB_loader)
real_B, _ = trainB_iter.next()
real_A, real_B = real_A.to(self.device), real_B.to(self.device)
s_tensor_B = self.tranfer_to_histogram(real_B)
# Update D
self.D_optim.zero_grad()
fake_A2B, _, _, _, _ = self.genA2B(real_A, s_tensor_B, self.device)
fake_B2B, _, _, _, _ = self.genA2B(real_B, s_tensor_B, self.device)
real_GB_logit, _ = self.disGA(real_B)
real_LB_logit, _ = self.disLA(real_B)
fake_GB_logit, _ = self.disGA(fake_A2B)
fake_LB_logit, _ = self.disLA(fake_A2B)
D_ad_loss_GB = self.MSE_loss(real_GB_logit, torch.ones_like(real_GB_logit).to(self.device))+self.MSE_loss(fake_GB_logit, torch.zeros_like(fake_GB_logit).to(self.device))
D_ad_loss_LB = self.MSE_loss(real_LB_logit, torch.ones_like(real_LB_logit).to(self.device))+self.MSE_loss(fake_LB_logit, torch.zeros_like(fake_LB_logit).to(self.device))
D_loss_B = self.adv_weight * (D_ad_loss_GB + D_ad_loss_LB)
Discriminator_loss = D_loss_B
Discriminator_loss.backward()
self.D_optim.step()
# Update G
self.G_optim.zero_grad()
fake_A2B, _, _, _, _ = self.genA2B(real_A, s_tensor_B, self.device)
fake_B2B, _, _, _, _ = self.genA2B(real_B, s_tensor_B, self.device)
fake_GB_logit, _ = self.disGA(fake_A2B)
fake_LB_logit, _ = self.disLA(fake_A2B)
G_ad_loss_GB = self.MSE_loss(fake_GB_logit, torch.ones_like(fake_GB_logit).to(self.device))
G_ad_loss_LB = self.MSE_loss(fake_LB_logit, torch.ones_like(fake_LB_logit).to(self.device))
G_identity_loss_B = self.L1_loss(fake_B2B, real_B)
G_loss_B = self.adv_weight * (G_ad_loss_GB + G_ad_loss_LB) + self.identity_weight * G_identity_loss_B
#perceptual_loss
perceptual_loss_A2B = self.L1_loss(self.expert_net.get_feature(real_A), self.expert_net.get_feature(fake_A2B))
perceptual_loss = (perceptual_loss_A2B) * self.perceptual_weight
#histogram loss
s_tensor_fake_A2B = self.tranfer_to_histogram(fake_A2B)
s_tensor_B = self.tranfer_to_histogram(real_B)
histogram_loss = self.MSE_loss(s_tensor_fake_A2B, s_tensor_B)*self.histogram_weight
#pixel loss
if step%self.pixel_loss_interval == 0:
pixel_loss = self.pixel_weight*(self.L1_loss(fake_A2B, real_A) + self.L1_loss(fake_B2B, real_B))
else:
pixel_loss = 0
Generator_loss = G_loss_B + perceptual_loss + histogram_loss + pixel_loss
Generator_loss.backward()
self.G_optim.step()
# clip parameter of AdaILN and ILN, applied after optimizer step
self.genA2B.apply(self.Rho_clipper)
print("[%5d/%5d] time: %4.4f d_loss: %.8f, g_loss: %.8f" % (step, self.iteration, time.time() - start_time, Discriminator_loss, Generator_loss))
print("identity loss : %.8f" % (self.identity_weight * G_identity_loss_B))
print("perceptual loss : %.8f" % (perceptual_loss))
print("histogram loss : %.8f" % (histogram_loss))
print("pixel loss : %.8f" % (pixel_loss))
with torch.no_grad():
if step % self.print_freq == 0:
train_sample_num = 5
test_sample_num = 5
A2B = np.zeros((self.img_size * 6, 0, 3))
self.genA2B.eval(), self.disGA.eval(), self.disLA.eval()
for _ in range(train_sample_num):
try:
real_A, _ = trainA_iter.next()
except:
trainA_iter = iter(self.trainA_loader)
real_A, _ = trainA_iter.next()
try:
real_B, _ = trainB_iter.next()
except:
trainB_iter = iter(self.trainB_loader)
real_B, _ = trainB_iter.next()
real_A, real_B = real_A.to(self.device), real_B.to(self.device)
fake_A2B, fake_A2B_heatmap0, fake_A2B_heatmap1_0, fake_A2B_heatmap1_1, fake_A2B_heatmap2 = self.genA2B(real_A, self.s_B_mean_tensor, self.device)
fake_B2B, fake_B2B_heatmap0, fake_B2B_heatmap1_0, fake_B2B_heatmap1_1, fake_B2B_heatmap2 = self.genA2B(real_B, self.s_B_mean_tensor, self.device)
A2B = np.concatenate((A2B, np.concatenate((RGB2BGR(tensor2numpy(denorm(real_A[0]))),
cam(tensor2numpy(fake_A2B_heatmap0[0]), self.img_size),
cam(tensor2numpy(fake_A2B_heatmap1_0[0]), self.img_size),
cam(tensor2numpy(fake_A2B_heatmap1_1[0]), self.img_size),
cam(tensor2numpy(fake_A2B_heatmap2[0]), self.img_size),
RGB2BGR(tensor2numpy(denorm(fake_A2B[0])))), 0)), 1)
for _ in range(test_sample_num):
try:
real_A, _ = testA_iter.next()
except:
testA_iter = iter(self.testA_loader)
real_A, _ = testA_iter.next()
real_A = real_A.to(self.device)
fake_A2B, fake_A2B_heatmap0, fake_A2B_heatmap1_0, fake_A2B_heatmap1_1, fake_A2B_heatmap2 = self.genA2B(real_A, self.s_B_mean_tensor, self.device)
A2B = np.concatenate((A2B, np.concatenate((RGB2BGR(tensor2numpy(denorm(real_A[0]))),
cam(tensor2numpy(fake_A2B_heatmap0[0]), self.img_size),
cam(tensor2numpy(fake_A2B_heatmap1_0[0]), self.img_size),
cam(tensor2numpy(fake_A2B_heatmap1_1[0]), self.img_size),
cam(tensor2numpy(fake_A2B_heatmap2[0]), self.img_size),
RGB2BGR(tensor2numpy(denorm(fake_A2B[0])))), 0)), 1)
cv2.imwrite(os.path.join("results", self.result_dir, 'img', 'A2B_%07d.png' % step), A2B * 255.0)
self.genA2B.train(), self.disGA.train(), self.disLA.train()
if step % self.save_freq == 0:
self.save(os.path.join("results", self.result_dir, 'model'), step)
if step % 1000 == 0:
params = {}
params['genA2B'] = self.genA2B.state_dict()
params['disGA'] = self.disGA.state_dict()
params['disLA'] = self.disLA.state_dict()
torch.save(params, os.path.join("results", self.result_dir, self.dataset + '_params_latest.pt'))
def load(self, dir, step):
params = torch.load(os.path.join(dir, self.dataset + '_params_%07d.pt' % step))
self.genA2B.load_state_dict(params['genA2B'])
self.disGA.load_state_dict(params['disGA'])
self.disLA.load_state_dict(params['disLA'])
def save(self, dir, step):
params = {}
params['genA2B'] = self.genA2B.state_dict()
params['disGA'] = self.disGA.state_dict()
params['disLA'] = self.disLA.state_dict()
torch.save(params, os.path.join(dir, self.dataset + '_params_%07d.pt' % step))
def test(self):
model_list = glob(os.path.join("results", self.result_dir, 'model', '*.pt'))
if not len(model_list) == 0:
model_list.sort()
iter = int(model_list[-1].split('_')[-1].split('.')[0])
self.load(os.path.join("results", self.result_dir, 'model'), iter)
print(" [*] Load SUCCESS")
else:
print(" [*] Load FAILURE")
return
self.genA2B.eval()
for i, (real_A, _) in enumerate(self.testA_loader):
real_A = real_A.to(self.device)
fake_A2B, fake_A2B_heatmap0, fake_A2B_heatmap1_0, fake_A2B_heatmap1_1, fake_A2B_heatmap2 = self.genA2B(real_A, self.s_B_mean_tensor, self.device)
A2B = np.concatenate((RGB2BGR(tensor2numpy(denorm(real_A[0]))), cam(tensor2numpy(fake_A2B_heatmap0[0]), self.img_size), cam(tensor2numpy(fake_A2B_heatmap1_0[0]), self.img_size), cam(tensor2numpy(fake_A2B_heatmap2[0]), self.img_size), RGB2BGR(tensor2numpy(denorm(fake_A2B[0])))), 0)
cv2.imwrite(os.path.join("results", self.result_dir, 'test', 'A2B_%d.png' % (i + 1)), A2B * 255.0)
``` |
{
"source": "1048727525/fnm_pytorch",
"score": 3
} |
#### File: 1048727525/fnm_pytorch/data_loader.py
```python
import os
import scipy
import numpy as np
from util import *
from PIL import Image
from torchvision import transforms
from torch.utils.data import Dataset, DataLoader
class sample_dataset(Dataset):
def __init__(self, list_path, img_root_path, crop_size, image_size, mode="train"):
self.img_name_list = read_txt_file(list_path)
self.img_root_path = img_root_path
transform = []
if mode == "train":
transform.append(transforms.ColorJitter(brightness=0.5, contrast=0, saturation=0, hue=0))
transform.append(transforms.RandomHorizontalFlip())
transform.append(transforms.CenterCrop(crop_size))
transform.append(transforms.Resize(image_size))
transform.append(transforms.ToTensor())
transform.append(transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)))
self.transform = transforms.Compose(transform)
transform_112 = []
if mode == "train":
transform_112.append(transforms.ColorJitter(brightness=0.5, contrast=0, saturation=0, hue=0))
transform_112.append(transforms.RandomHorizontalFlip())
transform_112.append(transforms.CenterCrop(crop_size))
transform_112.append(transforms.Resize(112))
transform_112.append(transforms.ToTensor())
transform_112.append(transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)))
self.transform_112 = transforms.Compose(transform_112)
def __len__(self):
return len(self.img_name_list)
def __getitem__(self, idx):
img_path = os.path.join(self.img_root_path, self.img_name_list[idx])
img = Image.open(img_path).convert('RGB')
return self.transform(img), self.transform_112(img)
def get_loader(list_path, img_root_path, crop_size=224, image_size=224, batch_size=16, mode="train", num_workers=8):
dataset = sample_dataset(list_path, img_root_path, crop_size, image_size)
data_loader = DataLoader(dataset=dataset, batch_size=batch_size, shuffle=(mode=='train'), num_workers=num_workers)
return data_loader
if __name__ == '__main__':
import cv2
profile_list_path = "../fnm/mpie/casia_gt.txt"
front_list_path = "../fnm/mpie/session01_front_demo.txt"
profile_path = "../../datasets/casia_aligned_250_250_jpg"
front_path = "../../datasets/session01_align"
crop_size = 224
image_size = 224
#dataset = sample_dataset(profile_list_path, profile_path, crop_size, image_size)
'''
for i, sample in enumerate(dataset):
cv2.imwrite("profile.jpg", tensor2im(sample["profile"]))
cv2.imwrite("front.jpg", tensor2im(sample["front"]))
if i==1:
break
'''
data_loader = get_loader(front_list_path, front_path, crop_size=224, image_size=224, batch_size=16, mode="train", num_workers=8)
for i, sample in data_loader:
print(sample.shape)
'''
for i, sample in enumerate(data_loader):
cv2.imwrite("profile.jpg", cv2.cvtColor(tensor2im(sample["profile"]), cv2.COLOR_BGR2RGB))
cv2.imwrite("front.jpg", cv2.cvtColor(tensor2im(sample["front"]), cv2.COLOR_BGR2RGB))
if i==1:
break
'''
```
#### File: 1048727525/fnm_pytorch/se50_net.py
```python
import torch
import torch.nn as nn
from senet_model import Backbone
class se50_net(nn.Module):
def __init__(self, model_path):
super(se50_net, self).__init__()
self.model = Backbone(50, 0.5, "ir_se")
for p in self.model.parameters():
p.requires_grad = False
pre = torch.load(model_path, map_location="cpu")
self.model.load_state_dict(pre)
self.model.eval()
def get_feature(self, x):
feature=self.model(x)
norm = torch.norm(feature, 2, (1, 2, 3), True)
feature = torch.div(feature, norm)
'''
print(feature.shape)
norm = torch.norm(feature, 2, 1, True)
print(norm)
feature = torch.div(feature, norm)
'''
return feature
def get_layers(self, x, num):
return self.model.get_layers(x, num)
def get_feature_vec(self, x):
feature=self.model.get_fea(x)
return feature
if __name__ == '__main__':
def im2tensor(img):
test_transform = transforms.Compose([
transforms.Resize((img.shape[0], img.shape[1])),
transforms.ToTensor(),
transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))
])
img = transforms.ToPILImage()(img)
return test_transform(img).unsqueeze(0).cuda()
import cv2
from PIL import Image
from torchvision import transforms
img_path = "../../dataset/dark_and_norm/testA/326_509.jpg"
img = cv2.imread(img_path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img_tensor = im2tensor(img)
model = se50_net("model_ir_se50.pth").cuda()
print(model.get_feature(img_tensor))
``` |
{
"source": "1049451037/cogdl",
"score": 3
} |
#### File: cogdl/layers/deepergcn_layer.py
```python
from typing import Optional
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.checkpoint import checkpoint
from .mlp_layer import MLP
from cogdl.utils import get_activation, mul_edge_softmax, get_norm_layer
class GENConv(nn.Module):
def __init__(
self,
in_feats: int,
out_feats: int,
aggr: str = "softmax_sg",
beta: float = 1.0,
p: float = 1.0,
learn_beta: bool = False,
learn_p: bool = False,
use_msg_norm: bool = False,
learn_msg_scale: bool = True,
norm: Optional[str] = None,
residual: bool = False,
activation: Optional[str] = None,
num_mlp_layers: int = 2,
edge_attr_size: Optional[list] = None,
):
super(GENConv, self).__init__()
self.use_msg_norm = use_msg_norm
self.mlp = MLP(in_feats, out_feats, in_feats * 2, num_layers=num_mlp_layers, activation=activation, norm=norm)
self.message_encoder = torch.nn.ReLU()
self.aggr = aggr
if aggr == "softmax_sg":
self.beta = torch.nn.Parameter(
torch.Tensor(
[
beta,
]
),
requires_grad=learn_beta,
)
else:
self.register_buffer("beta", None)
if aggr == "powermean":
self.p = torch.nn.Parameter(
torch.Tensor(
[
p,
]
),
requires_grad=learn_p,
)
else:
self.register_buffer("p", None)
self.eps = 1e-7
self.s = torch.nn.Parameter(torch.Tensor([1.0]), requires_grad=learn_msg_scale and use_msg_norm)
self.act = None if activation is None else get_activation(activation)
self.norm = None if norm is None else get_norm_layer(norm, in_feats)
self.residual = residual
if edge_attr_size is not None and edge_attr_size[0] > 0:
if len(edge_attr_size) > 1:
self.edge_encoder = BondEncoder(edge_attr_size, in_feats)
else:
self.edge_encoder = EdgeEncoder(edge_attr_size[0], in_feats)
else:
self.edge_encoder = None
def message_norm(self, x, msg):
x_norm = torch.norm(x, dim=1, p=2)
msg_norm = F.normalize(msg, p=2, dim=1)
msg_norm = msg_norm * x_norm.unsqueeze(-1)
return x + self.s * msg_norm
def forward(self, graph, x):
if self.norm is not None:
x = self.norm(x)
if self.act is not None:
x = self.act(x)
edge_index = graph.edge_index
dim = x.shape[1]
edge_msg = x[edge_index[1]]
if self.edge_encoder is not None and graph.edge_attr is not None:
edge_msg += self.edge_encoder(graph.edge_attr)
edge_msg = self.message_encoder(edge_msg) + self.eps
if self.aggr == "softmax_sg":
h = mul_edge_softmax(graph, self.beta * edge_msg.contiguous())
h = edge_msg * h
elif self.aggr == "softmax":
h = mul_edge_softmax(graph, edge_msg)
h = edge_msg * h
elif self.aggr == "powermean":
deg = graph.degrees()
torch.clamp_(edge_msg, 1e-7, 1.0)
h = edge_msg.pow(self.p) / deg[edge_index[0]].unsqueeze(-1)
elif self.aggr == "mean":
deg = graph.degrees()
deg_rev = deg.pow(-1)
deg_rev[torch.isinf(deg_rev)] = 0
h = edge_msg * deg_rev[edge_index[0]].unsqueeze(-1)
else:
raise NotImplementedError
h = torch.zeros_like(x).scatter_add_(dim=0, index=edge_index[0].unsqueeze(-1).repeat(1, dim), src=h)
if self.aggr == "powermean":
h = h.pow(1.0 / self.p)
if self.use_msg_norm:
h = self.message_norm(x, h)
if self.residual:
h = h + x
h = self.mlp(h)
return h
class ResGNNLayer(nn.Module):
"""
Implementation of DeeperGCN in paper `"DeeperGCN: All You Need to Train Deeper GCNs"` <https://arxiv.org/abs/2006.07739>
Parameters
-----------
conv : nn.Module
An instance of GNN Layer, recieving (graph, x) as inputs
n_channels : int
size of input features
activation : str
norm: str
type of normalization, ``batchnorm`` as default
dropout : float
checkpoint_grad : bool
"""
def __init__(
self,
conv,
in_channels,
activation="relu",
norm="batchnorm",
dropout=0.0,
out_norm=None,
out_channels=-1,
residual=True,
checkpoint_grad=False,
):
super(ResGNNLayer, self).__init__()
self.conv = conv
self.activation = get_activation(activation)
self.dropout = dropout
self.norm = get_norm_layer(norm, in_channels)
self.residual = residual
if out_norm:
self.out_norm = get_norm_layer(norm, out_channels)
else:
self.out_norm = None
self.checkpoint_grad = False
def forward(self, graph, x, dropout=None, *args, **kwargs):
h = self.norm(x)
h = self.activation(h)
if isinstance(dropout, float) or dropout is None:
h = F.dropout(h, p=self.dropout, training=self.training)
else:
if self.training:
h = h * dropout
if self.checkpoint_grad:
h = checkpoint(self.conv, graph, h, *args, **kwargs)
else:
h = self.conv(graph, h, *args, **kwargs)
if self.residual:
h = h + x
if self.out_norm:
return self.out_norm(h)
else:
return h
class EdgeEncoder(nn.Module):
def __init__(self, in_feats, out_feats, bias=False):
super(EdgeEncoder, self).__init__()
self.nn = nn.Linear(in_feats, out_feats, bias=bias)
def forward(self, edge_attr):
return self.nn(edge_attr)
class BondEncoder(nn.Module):
def __init__(self, bond_dim_list, emb_size):
super(BondEncoder, self).__init__()
self.bond_emb_list = nn.ModuleList()
for i, size in enumerate(bond_dim_list):
x = nn.Embedding(size, emb_size)
self.bond_emb_list.append(x)
def forward(self, edge_attr):
out = 0
for i in range(edge_attr.shape[1]):
out += self.bond_emb_list[i](edge_attr[:, i])
return out
```
#### File: cogdl/layers/sage_layer.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
from cogdl.utils import spmm
class MeanAggregator(object):
def __call__(self, graph, x):
graph.row_norm()
x = spmm(graph, x)
return x
class SumAggregator(object):
def __call__(self, graph, x):
x = spmm(graph, x)
return x
class SAGELayer(nn.Module):
def __init__(self, in_feats, out_feats, normalize=False, aggr="mean", dropout=0.0):
super(SAGELayer, self).__init__()
self.in_feats = in_feats
self.out_feats = out_feats
self.fc = nn.Linear(2 * in_feats, out_feats)
self.normalize = normalize
self.dropout = dropout
if aggr == "mean":
self.aggr = MeanAggregator()
elif aggr == "sum":
self.aggr = SumAggregator()
else:
raise NotImplementedError
def forward(self, graph, x):
out = self.aggr(graph, x)
out = torch.cat([x, out], dim=-1)
out = self.fc(out)
if self.normalize:
out = F.normalize(out, p=2.0, dim=-1)
return out
```
#### File: models/nn/graphsage.py
```python
from typing import Any
import random
import torch
import torch.nn as nn
import torch.nn.functional as F
from cogdl.data import Graph
from cogdl.layers import SAGELayer
from cogdl.trainers.sampled_trainer import NeighborSamplingTrainer
from cogdl.utils import get_activation, get_norm_layer
from .. import BaseModel, register_model
def sage_sampler(adjlist, edge_index, num_sample):
if adjlist == {}:
row, col = edge_index
row = row.cpu().numpy()
col = col.cpu().numpy()
for i in zip(row, col):
if not (i[0] in adjlist):
adjlist[i[0]] = [i[1]]
else:
adjlist[i[0]].append(i[1])
sample_list = []
for i in adjlist:
list = [[i, j] for j in adjlist[i]]
if len(list) > num_sample:
list = random.sample(list, num_sample)
sample_list.extend(list)
edge_idx = torch.as_tensor(sample_list, dtype=torch.long).t()
return edge_idx
@register_model("graphsage")
class Graphsage(BaseModel):
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
# fmt: off
parser.add_argument("--hidden-size", type=int, nargs='+', default=[128])
parser.add_argument("--num-layers", type=int, default=2)
parser.add_argument("--sample-size", type=int, nargs='+', default=[10, 10])
parser.add_argument("--dropout", type=float, default=0.5)
parser.add_argument("--batch-size", type=int, default=128)
parser.add_argument("--aggr", type=str, default="mean")
# fmt: on
@classmethod
def build_model_from_args(cls, args):
return cls(
args.num_features,
args.num_classes,
args.hidden_size,
args.num_layers,
args.sample_size,
args.dropout,
args.aggr,
)
def sampling(self, edge_index, num_sample):
return sage_sampler(self.adjlist, edge_index, num_sample)
def __init__(self, num_features, num_classes, hidden_size, num_layers, sample_size, dropout, aggr):
super(Graphsage, self).__init__()
assert num_layers == len(sample_size)
self.adjlist = {}
self.num_features = num_features
self.num_classes = num_classes
self.hidden_size = hidden_size
self.num_layers = num_layers
self.sample_size = sample_size
self.dropout = dropout
shapes = [num_features] + hidden_size + [num_classes]
self.convs = nn.ModuleList(
[SAGELayer(shapes[layer], shapes[layer + 1], aggr=aggr) for layer in range(num_layers)]
)
def mini_forward(self, graph):
x = graph.x
for i in range(self.num_layers):
edge_index_sp = self.sampling(graph.edge_index, self.sample_size[i]).to(x.device)
with graph.local_graph():
graph.edge_index = edge_index_sp
x = self.convs[i](graph, x)
if i != self.num_layers - 1:
x = F.relu(x)
x = F.dropout(x, p=self.dropout, training=self.training)
return x
def mini_loss(self, data):
return self.loss_fn(
self.mini_forward(data)[data.train_mask],
data.y[data.train_mask],
)
def predict(self, data):
return self.forward(data)
def forward(self, *args):
if isinstance(args[0], Graph):
return self.mini_forward(*args)
else:
device = next(self.parameters()).device
x, adjs = args
for i, (src_id, graph, size) in enumerate(adjs):
graph = graph.to(device)
output = self.convs[i](graph, x)
x = output[: size[1]]
if i != self.num_layers - 1:
x = F.relu(x)
x = F.dropout(x, p=self.dropout, training=self.training)
return x
def node_classification_loss(self, *args):
if isinstance(args[0], Graph):
return self.mini_loss(*args)
else:
x, adjs, y = args
pred = self.forward(x, adjs)
return self.loss_fn(pred, y)
def inference(self, x_all, data_loader):
device = next(self.parameters()).device
for i in range(len(self.convs)):
output = []
for src_id, graph, size in data_loader:
x = x_all[src_id].to(device)
graph = graph.to(device)
x = self.convs[i](graph, x)
x = x[: size[1]]
if i != self.num_layers - 1:
x = F.relu(x)
output.append(x.cpu())
x_all = torch.cat(output, dim=0)
return x_all
@staticmethod
def get_trainer(args):
if args.dataset not in ["cora", "citeseer", "pubmed"]:
return NeighborSamplingTrainer
if hasattr(args, "use_trainer"):
return NeighborSamplingTrainer
def set_data_device(self, device):
self.device = device
@register_model("sage")
class SAGE(BaseModel):
@staticmethod
def add_args(parser):
parser.add_argument("--hidden-size", type=int, default=128)
parser.add_argument("--num-layers", type=int, default=2)
parser.add_argument("--dropout", type=float, default=0.5)
parser.add_argument("--aggr", type=str, default="mean")
parser.add_argument("--norm", type=str, default="layernorm")
parser.add_argument("--activation", type=str, default="relu")
parser.add_argument("--normalize", action="store_true")
@classmethod
def build_model_from_args(cls, args):
return cls(
args.num_features,
args.num_classes,
args.hidden_size,
args.num_layers,
args.aggr,
args.dropout,
args.norm,
args.activation,
args.normalize if hasattr(args, "normalize") else False,
)
def __init__(
self,
in_feats,
out_feats,
hidden_size,
num_layers,
aggr="mean",
dropout=0.5,
norm=None,
activation=None,
normalize=False,
):
super(SAGE, self).__init__()
shapes = [in_feats] + [hidden_size] * (num_layers - 1) + [out_feats]
self.num_layers = num_layers
self.layers = nn.ModuleList(
[
SAGELayer(
shapes[i],
shapes[i + 1],
aggr=aggr,
normalize=normalize if i != num_layers - 1 else False,
dropout=dropout,
)
for i in range(num_layers)
]
)
if norm is not None:
self.norm_list = nn.ModuleList([get_norm_layer(norm, hidden_size) for _ in range(num_layers - 1)])
else:
self.norm_list = None
self.dropout = dropout
self.act = get_activation(activation)
def forward(self, graph):
x = graph.x
for i, layer in enumerate(self.layers):
x = layer(graph, x)
if i != self.num_layers - 1:
# x = F.dropout(x, self.dropout, training=self.training)
if self.norm_list is not None:
x = self.norm_list[i](x)
x = self.act(x)
return x
```
#### File: cogdl/operators/actnn.py
```python
import os
import torch
import torch.nn as nn
from torch.utils.cpp_extension import load
path = os.path.join(os.path.dirname(__file__))
try:
qdropout = load(
name="qdropout",
sources=[os.path.join(path, "actnn/actnn.cc"), os.path.join(path, "actnn/actnn.cu")],
verbose=False,
)
except Exception:
print("Please install actnn library first.")
qdropout = None
class QDropout(nn.Module):
def __init__(self, p=0.5):
super().__init__()
self.p = p
def forward(self, input: torch.Tensor) -> torch.Tensor:
return qdropout.act_quantized_dropout(input, self.p)
``` |
{
"source": "1049451037/Webpage_Textual_Extraction",
"score": 2
} |
#### File: lib/pextract/pextract.py
```python
def has_style(tag):
return tag.has_attr('style')
def has_class(tag):
return tag.has_attr('class')
def clean(soup):
if soup.name == 'br' or soup.name == 'img' or soup.name == 'p' or soup.name == 'div':
return
try:
ll = 0
for j in soup.strings:
ll += len(j.replace('\n', ''))
if ll == 0:
soup.decompose()
else:
for child in soup.children:
clean(child)
except Exception as e:
pass
def dfs(soup, v):
if soup.name == 'a' or soup.name == 'br':
return
try:
lt = len(soup.get_text())
ls = len(str(soup))
a = soup.find_all('a')
at = 0
for j in a:
at += len(j.get_text())
lvt = lt - at
v.append((soup, lt / ls * lvt))
for child in soup.children:
dfs(child, v)
except Exception as e:
pass
def extract(soup, text_only = True, remove_img = True):
filt = ['script', 'noscript', 'style', 'embed', 'label', 'form', 'input', 'iframe', 'head', 'meta', 'link', 'object', 'aside', 'channel']
if remove_img:
filt.append('img')
for ff in filt:
for i in soup.find_all(ff):
i.decompose()
for tag in soup.find_all(has_style):
del tag['style']
for tag in soup.find_all(has_class):
del tag['class']
clean(soup)
LVT = len(soup.get_text())
for i in soup.find_all('a'):
LVT -= len(i.get_text())
v = []
dfs(soup, v)
mij = 0
for i in range(len(v)):
if v[i][1] > v[mij][1]:
mij = i
if text_only:
res = v[mij][0].get_text()
else:
res = str(v[mij][0])
return res, v[mij][1] / LVT
``` |
{
"source": "1049884729/projecteulerByPython",
"score": 3
} |
#### File: 1049884729/projecteulerByPython/Question11.py
```python
chart = '''08 02 22 97 38 15 00 40 00 75 04 05 07 78 52 12 50 77 91 08
49 49 99 40 17 81 18 57 60 87 17 40 98 43 69 48 04 56 62 00
81 49 31 73 55 79 14 29 93 71 40 67 53 88 30 03 49 13 36 65
52 70 95 23 04 60 11 42 69 24 68 56 01 32 56 71 37 02 36 91
22 31 16 71 51 67 63 89 41 92 36 54 22 40 40 28 66 33 13 80
24 47 32 60 99 03 45 02 44 75 33 53 78 36 84 20 35 17 12 50
32 98 81 28 64 23 67 10 26 38 40 67 59 54 70 66 18 38 64 70
67 26 20 68 02 62 12 20 95 63 94 39 63 08 40 91 66 49 94 21
24 55 58 05 66 73 99 26 97 17 78 78 96 83 14 88 34 89 63 72
21 36 23 09 75 00 76 44 20 45 35 14 00 61 33 97 34 31 33 95
78 17 53 28 22 75 31 67 15 94 03 80 04 62 16 14 09 53 56 92
16 39 05 42 96 35 31 47 55 58 88 24 00 17 54 24 36 29 85 57
86 56 00 48 35 71 89 07 05 44 44 37 44 60 21 58 51 54 17 58
19 80 81 68 05 94 47 69 28 73 92 13 86 52 17 77 04 89 55 40
04 52 08 83 97 35 99 16 07 97 57 32 16 26 26 79 33 27 98 66
88 36 68 87 57 62 20 72 03 46 33 67 46 55 12 32 63 93 53 69
04 42 16 73 38 25 39 11 24 94 72 18 08 46 29 32 40 62 76 36
20 69 36 41 72 30 23 88 34 62 99 69 82 67 59 85 74 04 36 16
20 73 35 29 78 31 90 01 74 31 49 71 48 86 81 16 23 57 05 54
01 70 54 71 83 51 54 69 16 92 33 48 61 43 52 01 89 19 67 48'''
chatArray = [[0 for i in range(20)] for j in range(20)] # 初始化20x20的数组
def getArray():
'''
将字符串分割成二维数组
:return:
'''
tempArray = chart.replace("\n", " ").split(' ') # 将字符串分割成数组
for x in range(0, 20):
for y in range(0, 20):
chatArray[x][y] = int(tempArray[x * 20 + y])
def getUpToDown(number):
'''
从上到下
:param number:
:return:
'''
tempProduct = set()
for x in range(0, 21 - number):
products = 1
for y in range(x, x + number):
products *= int(chatArray[x][y])
tempProduct.add(products)
value = max(tempProduct)
print(value)
return value
def getLeftToRight(number):
'''
从左到右
:param number:
:return:
'''
tempProduct = set()
for x in range(0, 21 - number):
products = 1
for y in range(x, x+number):
products *= int(chatArray[x][y])
tempProduct.add(products)
value = max(tempProduct)
print(value)
return value
def getLeftUpToRightDown(number):
'''
从左上角到右下角,编写代码时可通过打印坐标值查看计算的方向是否正确
:param number:
:return:
'''
tempProduct = set()
for x in range(19,number,-1):
for y in range(0, 21 -number):
products = 1
for index in range(0, number):
# print("x:%s y:%s"%(x-index,y+index))#编写代码时可通过打印坐标值查看计算的方向是否正确
products *= int(chatArray[x-index][y + +index])
tempProduct.add(products)
value = max(tempProduct)
print(value)
return value
def getRightUpToLeftDown(number):
'''
从右上角到左下角
:param number:
:return:
'''
tempProduct = set()
for x in range(19 - number, -1, -1):
for y in range(19 - number, -1, -1):
products = 1
for index in range(number, 0, -1):
products *= int(chatArray[x + index][y + index])
tempProduct.add(products)
value = max(tempProduct)
print(value)
return value
def resolveQuestion(number):
getArray()
resultArray = set()
resultArray.add(getLeftToRight(number))
resultArray.add(getUpToDown(number))
resultArray.add(getLeftUpToRightDown(number))
resultArray.add(getRightUpToLeftDown(number))
print("max:%s" % max(resultArray))
resolveQuestion(4)
```
#### File: 1049884729/projecteulerByPython/Question12.py
```python
import math
print(int(math.sqrt(15)))
def divisors(integer):
'''
算法理由:如果一个数,能够整除某个数的平方根,那么肯定有一个比它大的数也能够整除;
所以只需检查interger的平方根以下的所有整数即可
比如数15,平方根整数是3,,所以只需要算1,2,3能否被15整除即可,能,则存在另一个被整除的,所以+2
:param integer:
:return:
'''
count = 0
divided = int(math.sqrt(integer))+1
for i in range(1, divided):
if integer % i is 0:
count += 2
return count
def getNumber(num):
'''
:param num: 因子数
:return:
'''
count=0
temp=1
index=1
while(count<num):
temp=int((index**2+index)/2)
count=divisors(temp)
if(count<num):
index+=1
print(temp)
getNumber(500)
```
#### File: 1049884729/projecteulerByPython/Question16.py
```python
import math
#method one
def getSum(number):
result=int(math.pow(2,number))
sum=0
print(result)
divide=10
while(result!=0):
sum+=result%divide
result=result//divide
return sum
#method two
def getSumTwo(number):
result=str(int(math.pow(2,number)))
listSum=list()
for i in result:
listSum.append(int(i))
return sum(listSum)
# result=getSum(1000)
result=getSumTwo(1000)
print(result)
```
#### File: 1049884729/projecteulerByPython/Question1.py
```python
class ThreeOrFive(object):
def __init__(self,maxnumber):
self.maxnumber=maxnumber
def calculte(self):
'''解决Question 1的问题'''
allSet=set()
for number in range(0,self.maxnumber):
if number%3==0 or number%5==0:
allSet.add(number)
print(number)
sumresult=sum(allSet)
print(sumresult)
example=ThreeOrFive(10)
example.calculte()
example=ThreeOrFive(1000)
example.calculte()
```
#### File: 1049884729/projecteulerByPython/Question3.py
```python
def prime(destNumber):
remainder = 1
while (destNumber > 1):
if destNumber % remainder == 0:
destNumber = destNumber / remainder
if (remainder != 1):
print(remainder)
remainder += 1
# prime(13195)
prime(600851475143)
```
#### File: 1049884729/projecteulerByPython/Question7.py
```python
def isPrime(numb):
'''判断是否是素数'''
for n in range(2,numb+1):
if not numb%n and n!=numb:
return False#能整除,则不是素数
return True
def indexPrime(index):
'''指定 index位的素'''
prime=1
tempIndex=1
while(tempIndex<=index):
prime += 1
if isPrime(prime):
tempIndex+=1
print(prime)
indexPrime(10001)
```
#### File: 1049884729/projecteulerByPython/Question9.py
```python
const=1000
def main():
a=1
b=1
c=1000-a-b
while(not pTrigle(a,b,c)):
b+=1
a=abc(b)
c = const - a - b
if(c<0):
break
print("a:%s b:%s c:%s"%(a,b,c))
print("result: %s"%(a*b*c))
def abc(b):
'''根据公式求出a和 b的关系'''
if (2 * const - 2 * b)==0 :
return b
a = int((const ** 2 - 2 * const * b) / (2 * const - 2 * b))
return a
def pTrigle(a,b,c):
'''判断是否符合勾股定理'''
if a**2+b**2==c**2:
return True
return False
main()
``` |
{
"source": "1049884729/StuffWithPython",
"score": 3
} |
#### File: StuffWithPython/chapter11/studywebbrowser.py
```python
"""
知识点2:用模块requests 下载web文件
需要安装模块:sudo pip install requests
"""
import requests,os
# url="http://p0.so.qhimgs1.com/t01aa7378e6f2c96450.jpg"
def downloadImg(url):
re=requests.get(url)
path=os.path.basename(url)#获取url的基本文件名
if re.status_code==requests.codes.ok:
img=open(os.path.join("./",path),"wb")
for chunk in re.iter_content(10000):#每次写入的缓存大小
img.write(chunk)
img.close()
"""
知识点3:用BeautifulSoup 模块解析Html
BeautifulSoup 需要安装,模块简写:bs4 (Beautiful Soup 第四版)
"""
import bs4
repic=requests.get("https://helentang.tuchong.com/")
if repic.status_code==requests.codes.ok:
content=repic.text
soup=bs4.BeautifulSoup(content,"lxml")
imgs=soup.select("img")
for img in imgs:
try:
imgurl=str(img.attrs.get("src"))
if imgurl != None :
if imgurl.startswith("//"):
imgRealUrl=imgurl.replace("//","http://")
print(imgRealUrl)
downloadImg(imgRealUrl)
except:
pass
```
#### File: StuffWithPython/chapter2/calculator.py
```python
class Count(object):
def __init__(self,a,b):
self.a=int(a);
self.b=int(b)
def add(self):
return self.b+self.a
def sub(self):
return self.a-self.b
```
#### File: StuffWithPython/chapter2/test.py
```python
from calculator import Count
import unittest
def setUpModule():
print("模块开始 Module\n")
def tearDownModule():
print("模块结束 Module\n")
class TestCount(unittest.TestCase):
def setUp(self):
print("unittest start")
def test_add(self):
j=Count(2,3)
self.assertEqual(j.add(),5)
def test_Not(self):
j=Count(23,3)
self.assertEqual(j.add(),26,msg="结果相等")
def tearDown(self):
print("unittest end")
class TestSub(unittest.TestCase):
@classmethod
def setUpClass(cls):
print("class unittest类开始 ")
def setUp(self):
print("unittest sub start")
@classmethod
def tearDownClass(cls):
print("class unittest tearDown类结束 ")
def tearDown(self):
print("unittest sub end")
@unittest.skipIf(3>2,"跳过")
def test_sub(self):
j=Count(5,2)
self.assertEqual(j.sub(),3)
print("unittest test_sub")
def test_sub2(self):
j=Count(105,21)
self.assertEqual(j.sub(),84)
print("unittest test_sub2")
if __name__=='__main__':
testSuit=unittest.TestSuite()
testSuit.addTest(TestSub("test_sub"))
testSuit.addTest(TestSub("test_sub2"))
testSuit.addTest(TestCount("test_Not"))
# testSuit.addTest(TestCount("test_add"))
runner=unittest.TextTestRunner()
runner.run(testSuit)
``` |
{
"source": "1049965823/venus",
"score": 2
} |
#### File: unit/api/test_search_action.py
```python
import unittest
from venus.modules.search.action import SearchCore
class TestSearchAction(unittest.TestCase):
def test_get_interval(self):
action = SearchCore()
want1 = "1s"
want2 = "1秒"
want3 = "1second"
end_time = 100000000
start_time = end_time - 50
res1, res2, res3 = action.get_interval(start_time, end_time)
self.assertEqual(want1, res1)
self.assertEqual(want2, res2)
self.assertEqual(want3, res3)
want1 = "10s"
want2 = "10秒"
want3 = "10seconds"
start_time = end_time - 500
res1, res2, res3 = action.get_interval(start_time, end_time)
self.assertEqual(want1, res1)
self.assertEqual(want2, res2)
self.assertEqual(want3, res3)
want1 = "30s"
want2 = "30秒"
want3 = "30seconds"
start_time = end_time - 1500
res1, res2, res3 = action.get_interval(start_time, end_time)
self.assertEqual(want1, res1)
self.assertEqual(want2, res2)
self.assertEqual(want3, res3)
want1 = "1m"
want2 = "1分钟"
want3 = "1minute"
start_time = end_time - 50 * 60
res1, res2, res3 = action.get_interval(start_time, end_time)
self.assertEqual(want1, res1)
self.assertEqual(want2, res2)
self.assertEqual(want3, res3)
want1 = "10m"
want2 = "10分钟"
want3 = "10minutes"
start_time = end_time - 500 * 60
res1, res2, res3 = action.get_interval(start_time, end_time)
self.assertEqual(want1, res1)
self.assertEqual(want2, res2)
self.assertEqual(want3, res3)
want1 = "30m"
want2 = "30分钟"
want3 = "30minutes"
start_time = end_time - 1500 * 60
res1, res2, res3 = action.get_interval(start_time, end_time)
self.assertEqual(want1, res1)
self.assertEqual(want2, res2)
self.assertEqual(want3, res3)
want1 = "1h"
want2 = "1小时"
want3 = "1hour"
start_time = end_time - 50 * 3600
res1, res2, res3 = action.get_interval(start_time, end_time)
self.assertEqual(want1, res1)
self.assertEqual(want2, res2)
self.assertEqual(want3, res3)
want1 = "3h"
want2 = "3小时"
want3 = "3hours"
start_time = end_time - 150 * 3600
res1, res2, res3 = action.get_interval(start_time, end_time)
self.assertEqual(want1, res1)
self.assertEqual(want2, res2)
self.assertEqual(want3, res3)
want1 = "6h"
want2 = "6小时"
want3 = "6hours"
start_time = end_time - 300 * 3600
res1, res2, res3 = action.get_interval(start_time, end_time)
self.assertEqual(want1, res1)
self.assertEqual(want2, res2)
self.assertEqual(want3, res3)
want1 = "12h"
want2 = "12小时"
want3 = "12hours"
start_time = end_time - 700 * 3600
res1, res2, res3 = action.get_interval(start_time, end_time)
self.assertEqual(want1, res1)
self.assertEqual(want2, res2)
self.assertEqual(want3, res3)
want1 = "24h"
want2 = "1天"
want3 = "1day"
start_time = end_time - 50 * 86400
res1, res2, res3 = action.get_interval(start_time, end_time)
self.assertEqual(want1, res1)
self.assertEqual(want2, res2)
self.assertEqual(want3, res3)
if __name__ == "__main__":
unittest.main()
``` |
{
"source": "104H/cvat",
"score": 2
} |
#### File: imgprocessingdetector/nuclio/main.py
```python
from model_handler import ModelHandler
import json
import io
import base64
from PIL import Image
labels = {0 : "crop", 1 : "weed"}
def init_context(context):
context.logger.info("Init context... 0%")
model = ModelHandler()
context.user_data.model_handler = model.infer
context.logger.info("Init context...100%")
def handler(context, event):
context.logger.info("call handler")
data = event.body
print(event)
buf = io.BytesIO(base64.b64decode(data["image"]))
image = Image.open(buf)
predictions = context.user_data.model_handler(image)
box, label = predictions
results = []
for box, label in zip(box, label):
results.append({
"confidence": 1,
"label": labels[label],
"points": box,
"type": "polygon"
})
return context.Response(body=json.dumps(results),
headers={},
content_type='application/json',
status_code=200)
``` |
{
"source": "1050669722/LeetCode-Answers",
"score": 4
} |
#### File: LeetCode-Answers/Python/problem0002_test02.py
```python
class Solution:
def addTwoNumbers(self, l1: ListNode, l2: ListNode) -> ListNode:
num1 = []
while l1:
num1.append(l1.val)
l1 = l1.next
num1 = int(''.join(reversed([str(n) for n in num1])))
num2 = []
while l2:
num2.append(l2.val)
l2 = l2.next
num2 = int(''.join(reversed([str(n) for n in num2])))
tmp = num1 + num2
num = []
while tmp:
num.append(tmp % 10)
tmp //= 10
l = ListNode(0)
t = l
for i, n in enumerate(num):
t.val = n
if i != len(num) - 1:
t.next = ListNode(0)
t = t.next
return l
```
#### File: LeetCode-Answers/Python/problem0003.py
```python
# solu = Solution()
# s = 'abcabcbb'
# s = 'bbbbb'
# s = 'pwwkew'
# s = ''
# s = ' '
# s = 'c'
# s = 'au'
# s = "kwssiouw"#fydhihvgjuejmzbudeybgigseylmohjtgodovyxgubphcrbfxcjfkpxqpkfdsqz"
# print(solu.lengthOfLongestSubstring(s))
# time2 = time.perf_counter()
# print(time2-time1)
class Solution:
def lengthOfLongestSubstring(self, s: str) -> int:
if len(s) <= 1:
return s
if len(set(s)) == 1:
return 1
p, q = 0, 0
count = 0
ans = 0
while q <= len(s)-1:
if self.fun(s[p:q+1]):
print(1, p, q)
ans = max(ans, q-p)
q += 1
else:
print(2, p, q)
ind = s[p:q+1].index(s[q]) + count
count = len(s[p:q+1])
p = ind + 1
ans = max(ans, q-p)
q += 1
ans = max(ans, q-p)
return ans
def fun(self, s):
if len(s) == len(set(s)):
return True
else:
return False
solu = Solution()
s = "abcabcbb"
# s = "bbbbb"
# # s = "pwwkew"
# # s = ''
# # s = 's'
print(solu.lengthOfLongestSubstring(s))
```
#### File: LeetCode-Answers/Python/problem0005_test02.py
```python
class Solution:
def longestPalindrome(self, s: str) -> str:
if len(s) == 0:
return ''
if len(s) == 1:
return s
if len(s) == 2:
if s[0] == s[1]:
return s
else:
return s[0]
dp = [1] * len(s)
ans = 1
res = s[0]
for k in range(len(dp)):
# res = s[k]
p, q = k-1, k+1
while p >= 0 and q <= len(s)-1:
if s[p] == s[q]:
dp[k] += 2
# print(s[p:q+1])
# print(dp[k])
# print(ans)
# print('\n')
if dp[k] > ans:
ans = dp[k]
# print(s[p:q+1])
res = s[p:q+1]
p -= 1
q += 1
else:
break
res1 = res
dp = [1] * (len(s)-1)
ans = 2
res = s[0:2]
for k in range(len(dp)):
if s[k] == s[k+1]:
res = s[k:k+2]
p, q = k-1, k+1+1
while p >= 0 and q <= len(s)-1:
if s[p] == s[q]:
dp[k] += 2
# print(s[p:q+1])
# print(dp[k])
# print(ans)
# print('\n')
if dp[k] > ans:
ans = dp[k]
# print(s[p:q+1])
res = s[p:q+1]
p -= 1
q += 1
else:
break
res2 = res
if len(res1) >= len(res2):
return res1
else:
return res2
solu = Solution()
s = "babad"
s = "cbbd"
s = 'ac'
print(solu.longestPalindrome(s))
```
#### File: LeetCode-Answers/Python/problem0007.py
```python
import time
time1 = time.perf_counter()
#class Solution():
# def reverse(self, x):
# a = [i for i in str(x)]
# a.reverse()
# for k in range(len(a)):
# if a[k] != '0':
# if a[-1] == '-':
# a = a[k:-1]
# a.insert(0,'-')
# if int(''.join(a)) < (-2)**31 or int(''.join(a)) > 2**31-1:
# return 0
# else:
# return int(''.join(a))
# elif int(''.join(a[k:])) < (-2)**31 or int(''.join(a[k:])) > 2**31-1:
# return 0
# else:
# return int(''.join(a[k:]))
# return x#还有一种情况应该在这里考虑
#class Solution():
# def reverse(self, x: int) -> int:
# p = -1 if x<0 else 1
# x = p * x
# x = list(str(x)) #[i for i in str(x)]
# x.reverse()
# x = int(''.join(x))
# x = p * x
# if x< -2 ** 31 or x>2**31 - 1:
# return 0
# return x
class Solution():
def reverse(self, x):
p = -1 if x<0 else 1
x *= p #符号先隐去
x = [i for i in str(x)] # list(str(x))
x.reverse()
x = p * int(''.join(x)) #符号再出现 #数字字符串可以用int()强制转换,但是列表不可以
if x > -2**(31) and x < 2**31-1:
return x
return 0
#x = 123
#x = -123
x = 120
#x = 0
#x = 1534236469
solu = Solution()
print(solu.reverse(x))
time2 = time.perf_counter()
print(time2-time1)
```
#### File: LeetCode-Answers/Python/problem0015_test02.py
```python
class Solution:
def threeSum(self, nums: List[int]) -> List[List[int]]:
if len(nums) < 3:
return []
nums.sort() #排序,则重复元素必出现在相邻位置
ans = []
for first in range(0, len(nums), 1): #排序和双指针,去除了一层复杂度
if nums[first] == nums[first - 1] and first > 0:
continue
third = len(nums) - 1
for second in range(first + 1, third):
if nums[second] == nums[second - 1] and second > first + 1:
continue
while second < third and nums[first] + nums[second] + nums[third] > 0:
third -= 1
if second == third:
break
if nums[first] + nums[second] + nums[third] < 0:
continue
if nums[first] + nums[second] + nums[third] == 0:
ans.append([nums[first], nums[second], nums[third]])
return ans
```
#### File: LeetCode-Answers/Python/problem0018_test02.py
```python
from typing import *
class Solution:
def fourSum(self, nums: List[int], target: int) -> List[List[int]]:
if len(nums) < 4:
return []
nums.sort()
ans = []
for first in range(0, len(nums), 1): #因为数组是排好序的,所以没有出现重复
if nums[first] == nums[first - 1] and first > 0:
continue
for second in range(first + 1, len(nums), 1):
if nums[second] == nums[second - 1] and second > first + 1:
continue
fourth = len(nums) - 1
for third in range(second + 1, len(nums), 1):
if nums[third] == nums[third - 1] and third > second + 1:
continue
while third < fourth and nums[first] + nums[second] + nums[third] + nums[fourth] > target:
fourth -= 1
if third == fourth:
break
if nums[first] + nums[second] + nums[third] + nums[fourth] < target:
continue
if nums[first] + nums[second] + nums[third] + nums[fourth] == target:
ans.append([nums[first], nums[second], nums[third], nums[fourth]])
return ans
```
#### File: LeetCode-Answers/Python/problem0019_test02.py
```python
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def removeNthFromEnd(self, head: ListNode, n: int) -> ListNode:
if not (head and head.next):
return None
tmp = head
head = ListNode(None)
head.next = tmp
low, fast = head, head
for _ in range(n+1):
fast = fast.next
while fast:
fast = fast.next
low = low.next
low.next = low.next.next
return head.next
```
#### File: LeetCode-Answers/Python/problem0021.py
```python
import time
time1 = time.perf_counter()
# Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def mergeTwoLists(self, l1: ListNode, l2: ListNode) -> ListNode:
# temp2 = l2
# l2 = ListNode(l1.val)
# l2.next = temp2
# temp1 = l1
L = ListNode(None)
temp = L #temp和L关联了起来,temp最后虽然没有了,但是给L造成的改变还是积累下来了
# temp = ListNode(None)
while l1 and l2:
if l1.val < l2.val:
temp.next = l1 #temp改变,L会跟着改变
l1 = l1.next
temp = temp.next #tenp重新赋值,它和L的关联也就取消了,但是新的值temp.next原本是和L.next关联的;所以下一步被改变的是L.next
# print(1)
else: # if l2.val <= l1.val: #上面if的产生值会影响下面if的条件判定,所以if-if和if-else还是不一样的
temp.next = l2
l2 = l2.next
temp = temp.next
# print(2)
# print(3)
if l1:
temp.next = l1
else:
temp.next = l2
return L.next #temp.next
'''
有的时候可能要写好之后再重构整理
'''
#class Solution:
# def mergeTwoLists(self, l1: ListNode, l2: ListNode) -> ListNode:
# res = ListNode(None)
# node = res
# while l1 and l2:
# if l1.val<l2.val:
# node.next,l1 = l1,l1.next #所以这样的赋值语句也是按照前后顺序的
# else:
# node.next,l2 = l2,l2.next
# node = node.next
# if l1:
# node.next = l1
# else:
# node.next = l2
# return res.next
a = ListNode(1)
a.next = ListNode(2)
a.next.next = ListNode(4)
b = ListNode(1)
b.next = ListNode(3)
b.next.next = ListNode(4)
#a = ListNode(2)
#b = ListNode(1)
solu = Solution()
c = solu.mergeTwoLists(a,b)
while c:
print(c.val)
c = c.next
time2 = time.perf_counter()
print(time2-time1)
```
#### File: LeetCode-Answers/Python/problem0024.py
```python
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def swapPairs(self, head: ListNode) -> ListNode:
if not (head and head.next):
return head
ans = ListNode(None)
pre = ans
cur = head
while cur and cur.next: #.next报错的位置不一样,如果不写'cur.next',则是在while里面报错,如果不写'cur'则是在while判断处报错,报错不是判断为假
tmp = cur.next #等号两边分别看做整体,关联了起来,下面分析是否是重新定义的时候,也要看做整体,重新定义会取消关联
cur.next = cur.next.next
tmp.next = cur
pre.next = tmp
pre = pre.next
pre = pre.next
cur = cur.next
return ans.next
```
#### File: LeetCode-Answers/Python/problem0036.py
```python
class Solution:
def isValidSudoku(self, board: list) -> bool:
temp = [0] * 9
for k in range(9):
temp[k] = [0] * 9
# print(temp)
for i, L in enumerate(board):
for j in range(len(L)):
if '0'<=board[i][j] and board[i][j]<='9':
temp[i][j] = int(board[i][j])
else:
temp[i][j] = 0
# mark = 1
for k in range(len(temp)):
if not self.judge(temp[k]):
# mark = 0
# break
return False
for q in range(len(temp[0])):
a = []
for p in range(len(temp)):
a.append(temp[p][q])
if not self.judge(a):
# mark = 0
# break
return False
for m in range(0,9,3):
for n in range(0,9,3):
a = []
for p in range(m,m+3):
for q in range(n,n+3):
a.append(temp[p][q])
# print(a,'\n')
if not self.judge(a):
# mark = 0
# break
return False
# print(temp)
return True#mark == 1#
def judge(self, nums):
d = {}
for k in range(len(nums)):
try:
d[nums[k]] += 1
except:
d[nums[k]] = 1
for key, value in d.items():
# print(k,value)
if value != 1 and key != 0:
return False
# print(d)
return True
class Solution:
def isValidSudoku(self, board):
"""
:type board: List[List[str]]
:rtype: bool
"""
# init data
rows = [{} for i in range(9)]
columns = [{} for i in range(9)]
boxes = [{} for i in range(9)]
# validate a board
for i in range(9):
for j in range(9):
num = board[i][j]
if num != '.':
num = int(num)
box_index = (i // 3 ) * 3 + j // 3
# keep the current cell value
rows[i][num] = rows[i].get(num, 0) + 1
columns[j][num] = columns[j].get(num, 0) + 1
boxes[box_index][num] = boxes[box_index].get(num, 0) + 1
# check if this value has been already seen before
if rows[i][num] > 1 or columns[j][num] > 1 or boxes[box_index][num] > 1:
return False
return True
#作者:LeetCode
#链接:https://leetcode-cn.com/problems/two-sum/solution/you-xiao-de-shu-du-by-leetcode/
#来源:力扣(LeetCode)
#著作权归作者所有。商业转载请联系作者获得授权,非商业转载请注明出处。
solu = Solution()
board = [
["5","3",".",".","7",".",".",".","."],
["6",".",".","1","9","5",".",".","."],
[".","9","8",".",".",".",".","6","."],
["8",".",".",".","6",".",".",".","3"],
["4",".",".","8",".","3",".",".","1"],
["7",".",".",".","2",".",".",".","6"],
[".","6",".",".",".",".","2","8","."],
[".",".",".","4","1","9",".",".","5"],
[".",".",".",".","8",".",".","7","9"]
]
#board = [
# ["8","3",".",".","7",".",".",".","."],
# ["6",".",".","1","9","5",".",".","."],
# [".","9","8",".",".",".",".","6","."],
# ["8",".",".",".","6",".",".",".","3"],
# ["4",".",".","8",".","3",".",".","1"],
# ["7",".",".",".","2",".",".",".","6"],
# [".","6",".",".",".",".","2","8","."],
# [".",".",".","4","1","9",".",".","5"],
# [".",".",".",".","8",".",".","7","9"]
# ]
print(solu.isValidSudoku(board))
```
#### File: LeetCode-Answers/Python/problem0048.py
```python
class Solution:
def rotate(self, matrix: list) -> None:
"""
Do not return anything, modify matrix in-place instead.
"""
for p in range(len(matrix)):
for q in range(p, len(matrix[0])):
matrix[p][q], matrix[q][p] = matrix[q][p], matrix[p][q]
for p in range(len(matrix)):
matrix[p].reverse()
solu = Solution()
matrix = [[1,2,3],
[4,5,6],
[7,8,9]]
matrix = [[ 5, 1, 9,11],
[ 2, 4, 8,10],
[13, 3, 6, 7],
[15,14,12,16]]
solu.rotate(matrix)
print(matrix)
```
#### File: LeetCode-Answers/Python/problem0050.py
```python
class Solution:
def myPow(self, x: float, n: int) -> float:
# if n == 1:
# return x
# if x!=0 and n == 0:
# return 1
# if x == 0 and n <= 0:
# return None
# if n>0:
# if n%2 == 0:
# return self.myPow(x, n//2) * self.myPow(x, n//2)
# else:
# return self.myPow(x, n//2) * self.myPow(x, n//2) * x
# if n<0:
# if n%2 == 0:
# return 1. / ( self.myPow(x, -n//2) * self.myPow(x, -n//2) )
# else:
# return 1. / ( self.myPow(x, -n//2) * self.myPow(x, -n//2) * x )
## if n == 1:
## return x
## if n == 0:
## return 1
# if n == 1:
# return x
# if x!=0 and n == 0:
# return 1
# if x == 0 and n <= 0:
# return None
# if n>0:
# temp = self.myPow(x, n//2)
# if n%2 == 0:
# return temp * temp
# else:
# return temp * temp * x
# if n<0:
# temp = self.myPow(x, -n//2)
# if n%2 == 0:
# return 1. / ( temp * temp )
# else:
# return 1. / ( temp * temp * x )
# if n == 1: return x
# if n == 0: return 1
# t = self.myPow(x, abs(n) // 2)
# if n % 2 == 0:
# t = t * t
# else: t = x * t * t
# if n < 0:
# return 1.0 /t
# return t
if n<0:
n = -n
x = 1. / x
ans = 1
cp = x
while n>=1:
ans = ans*cp**(n%2)
# cp **= 2
cp *= cp
n //= 2
return ans
solu = Solution()
x, n = 2.00000, 10
#x, n = 2.00000, 4
#x, n = 2.10000, 3
#x, n = 2.00000, -2
#x, n = 0.00001, 2147483647#2**31-1#
#x, n = 0.00000, -1
print(solu.myPow(x, n))
```
#### File: LeetCode-Answers/Python/problem0057.py
```python
class Solution():
def insert(self, intervals: list, newInterval: list) -> list:
# intervals.append(newInterval)
# intervals = self.merge_sort(intervals)
# k = -1
# for _ in range(len(intervals)):
# k += 1
# if k == len(intervals) - 1: break
# if intervals[k][-1] >= intervals[k+1][0]:
# intervals.insert(k, [intervals[k][0], max(intervals[k][-1],intervals[k+1][-1])])
# intervals.pop(k+1)
# intervals.pop(k+1)
# k -= 1
# return intervals
# intervals.append(newInterval)
# intervals = self.merge_sort(intervals)
# if intervals == []:
# intervals.append(newInterval)
# return intervals
# if len(intervals) == 1:
# if intervals[0][0] <= newInterval[0]:
# intervals.append(newInterval)
# else:
# intervals.insert(0, newInterval)
# if intervals[0][-1] >= intervals[1][0]:
# intervals.insert(0, [intervals[0][0], max(intervals[0][-1],intervals[1][-1])])
# intervals.pop(1)
# intervals.pop(1)
# return intervals
# else:
# head = 0
# tail = len(intervals) - 1
# while head < tail:
# mid = (head + tail) // 2
# if intervals[mid][0] < newInterval[0]:
# head = mid + 1
# elif intervals[mid][0] > newInterval[0]:
# tail = mid - 1
# else:
# break
# k = mid - 2
# intervals.insert(mid, newInterval)
# if (intervals[mid][0] == intervals[mid-1][0] or intervals[mid][0] == intervals[mid+1][0]) and mid-1>=0 and mid+1<=len(intervals)-1:
## print(1)
# pass
# elif intervals[mid][0] < intervals[mid-1][0] and mid-1>=0:
## print(2)
# # print(intervals)
# # print(mid)
# intervals[mid], intervals[mid-1] = intervals[mid-1], intervals[mid]
# k -= 1
# elif intervals[mid][0] > intervals[mid+1][0] and mid+1<=len(intervals)-1:
## print(3)
## print(intervals)
## print(mid)
# intervals[mid], intervals[mid+1] = intervals[mid+1], intervals[mid]
# k += 1
# # print(intervals)
# k = -1
# for _ in range(len(intervals)):
# k += 1
# if k == len(intervals) - 1: break
# if intervals[k][-1] >= intervals[k+1][0]:
# intervals.insert(k, [intervals[k][0], max(intervals[k][-1],intervals[k+1][-1])])
# intervals.pop(k+1)
# intervals.pop(k+1)
# k -= 1
# return intervals
mark = 0
count = 0
for m in range(len(intervals)):
if newInterval[0] <= intervals[m][0]:
mark = m
count += 1
break
if count == 0: mark = len(intervals)
intervals.insert(mark, newInterval)
k = -1
for _ in range(len(intervals)):
k += 1
if k == len(intervals) - 1: break
if intervals[k][-1] >= intervals[k+1][0]:
intervals.insert(k, [intervals[k][0], max(intervals[k][-1],intervals[k+1][-1])])
intervals.pop(k+1)
intervals.pop(k+1)
k -= 1
return intervals
def merge(self, left, right):
result = []
p, q = 0, 0
while p<len(left) and q<len(right):
if left[p][0] <= right[q][0]:
result.append(left[p])
p += 1
else:
result.append(right[q])
q += 1
result += left[p:]
result += right[q:]
return result
def merge_sort(self, List):
if len(List) <= 1:
return List
else:
num = len(List) // 2
left = self.merge_sort(List[:num])
right = self.merge_sort(List[num:])
return self.merge(left, right)
solu = Solution()
#intervals, newInterval = [[1,3],[6,9]], [2,5]
intervals, newInterval = [[1,2],[3,5],[6,7],[8,10],[12,16]], [4,8]
#intervals, newInterval = [], [5,7]
intervals, newInterval = [[1,5]], [2,3]
#intervals, newInterval = [[1,2],[3,5],[6,7],[8,10],[12,16]], [4,8]
print(solu.insert(intervals, newInterval))
```
#### File: LeetCode-Answers/Python/problem0063.py
```python
class Solution:
def uniquePathsWithObstacles(self, obstacleGrid: list) -> int:
m = len(obstacleGrid)
n = len(obstacleGrid[0])
a = []
for _ in range(m):
a.append([0]*n)
a[m-1][n-1] = 1 - obstacleGrid[m-1][n-1]
for k in range(n)[n-2::-1]: #注意list(range(0, -1, -1)) = [0],所以在列表长度为1时,居然有一轮循环
if obstacleGrid[m-1][k] == 1:
a[m-1][k] = 0
else:
try:
a[m-1][k] = a[m-1][k+1]
except:
pass
for k in range(m)[m-2::-1]:
if obstacleGrid[k][-1] == 1:
a[k][-1] = 0
else:
try:
a[k][-1] = a[k+1][-1]
except:
pass
for k in range(m-2, -1, -1):
for p in range(n-2, -1, -1):
if obstacleGrid[k][p] == 1:
a[k][p] = 0
else:
a[k][p] = a[k+1][p] + a[k][p+1]
return a[0][0]
solu = Solution()
obstacleGrid = [[0,0,0],
[0,1,0],
[0,0,0]]
obstacleGrid = [[1]]
obstacleGrid = [[0]]
print(solu.uniquePathsWithObstacles(obstacleGrid))
```
#### File: LeetCode-Answers/Python/problem0067_test02.py
```python
class Solution:
def addBinary(self, a: str, b: str) -> str:
a, b = list(a), list(b)
a.reverse()
b.reverse()
length = max(len(a), len(b))
if len(a) > len(b):
for _ in range(length - min(len(a), len(b))):
b.append('0')
elif len(a) < len(b):
for _ in range(length - min(len(a), len(b))):
a.append('0')
ans = []
carry = 0
for k in range(length):
ans.append( str( (int(a[k]) ^ int(b[k])) ^ carry ) )
# carry = ( int(a[k]) & int(b[k]) ) & carry
carry = ( int(a[k]) & int(b[k]) ) | ( int(a[k]) & carry ) | ( carry & int(b[k]) )
if carry:
ans.append( str(carry) )
return ''.join(reversed(ans))
solu = Solution()
a, b = '11', '1'
# a, b = '1010', '1011'
print(solu.addBinary(a, b))
```
#### File: LeetCode-Answers/Python/problem0070.py
```python
class Solution:
def climbStairs(self, n: int) -> int:
if n == 1:
return 1
if n == 2:
return 2
a = {}
a[1] = 1
a[2] = 2
for i in range(3, n+1):
a[i] = a[i-1] + a[i-2]
return a[n]
# if n not in a.keys():
# a[n] = self.climbStairs(n-1) + self.climbStairs(n-2)
# return a[n]
solu = Solution()
n = 2
n = 3
print(solu.climbStairs(n))
```
#### File: LeetCode-Answers/Python/problem0072.py
```python
class Solution:
def minDistance(self, word1: str, word2: str) -> int:
dp = [[0] * (len(word2)+1) for _ in range(len(word1)+1)]
for p in range(len(word1)+1):
dp[p][0] = p
for q in range(len(word2)+1):
dp[0][q] = q
for p in range(1, len(word1)+1):
for q in range(1, len(word2)+1):
if word1[p-1] == word2[q-1]: #在dp表中引入了空字符格
dp[p][q] = 1 + min(dp[p-1][q], dp[p][q-1], dp[p-1][q-1]-1)
else:
dp[p][q] = 1 + min(dp[p-1][q], dp[p][q-1], dp[p-1][q-1])
return dp[-1][-1]
```
#### File: LeetCode-Answers/Python/problem0072_test02.py
```python
class Solution:
def minDistance(self, word1: str, word2: str) -> int:
dp = [ [0] * (len(word2)+1) for _ in range(len(word1)+1) ]
for k in range(1, len(word2)+1):
dp[0][k] = k
for k in range(1, len(word1)+1):
dp[k][0] = k
for p in range(1, len(word1)+1):
for q in range(1, len(word2)+1):
if word1[p-1] == word2[q-1]:
dp[p][q] = 1 + min(dp[p-1][q-1]-1, dp[p-1][q], dp[p][q-1])
else:
dp[p][q] = 1 + min(dp[p-1][q-1], dp[p-1][q], dp[p][q-1])
return dp[-1][-1]
```
#### File: LeetCode-Answers/Python/problem0073_temp.py
```python
class Solution:
def setZeroes(self, matrix: list) -> None:
"""
Do not return anything, modify matrix in-place instead.
"""
d = {}
d['R'] = []
d['C'] = []
for r, val in enumerate(matrix):
for c, v in enumerate(val):
if v == 0:
d['R'].append(r)
d['C'].append(c)
d['R'] = list(set(d['R']))
d['C'] = list(set(d['C']))
for p in range(len(matrix)):
for q in range(len(matrix[p])):
if p in d['R'] or q in d['C']:
matrix[p][q] = 0
solu = Solution()
matrix = [
[0,1,2,0],
[3,4,5,2],
[1,3,1,5]
]
solu.setZeroes(matrix)
print(matrix)
```
#### File: LeetCode-Answers/Python/problem0075.py
```python
from typing import *
class Solution:
def sortColors(self, nums: List[int]) -> None:
"""
Do not return anything, modify nums in-place instead.
"""
# ptr = 0
# for k in range(len(nums)):
# if nums[k] == 0:
# nums[ptr], nums[k] = nums[k], nums[ptr]
# ptr += 1
#
# # ptr = 0
# for k in range(len(nums)):
# if nums[k] == 1:
# nums[ptr], nums[k] = nums[k], nums[ptr]
# ptr += 1
# p0, p1 = 0, 0
# for k in range(len(nums)):
# if nums[k] == 1:
# nums[p1], nums[k] = nums[k], nums[p1]
# p1 += 1
#
# if nums[k] == 0:
# nums[p0], nums[k] = nums[k], nums[p0]
# if p0 < p1:
# nums[p1], nums[k] = nums[k], nums[p1]
# p0 += 1
# p1 += 1
#
# # print(nums, 'p0: ', p0, ' ', 'p1: ',p1)
if len(nums) > 1:
p0, p2 = 0, len(nums) - 1
k = 0
while k <= p2:
# print(nums, p0, p2, k)
if nums[k] == 2:
while nums[k] == 2 and k <= p2:
# print(p2, k)
nums[p2], nums[k] = nums[k], nums[p2]
p2 -= 1
if nums[k] == 0:
nums[p0], nums[k] = nums[k], nums[p0]
p0 += 1
k += 1
# n = len(nums)
# p0, p2 = 0, n - 1
# i = 0
# while i <= p2:
# while i <= p2 and nums[i] == 2:
# nums[i], nums[p2] = nums[p2], nums[i]
# p2 -= 1
# if nums[i] == 0:
# nums[i], nums[p0] = nums[p0], nums[i]
# p0 += 1
# i += 1
solu = Solution()
nums = [2,0,2,1,1,0]
nums = [2,0,1]
nums = [1, 2, 0]
# nums = [2]
# nums = [2,2]
solu.sortColors(nums)
print(nums)
```
#### File: LeetCode-Answers/Python/problem0083.py
```python
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def deleteDuplicates(self, head: ListNode) -> ListNode:
# ans = ListNode(None)
# tmp = ans
# s = set()
# while head:
# if head.val not in s:
# s.add(head.val)
# tmp.val = head.val
# head = head.next
# if head:
# tmp.next = ListNode(None)
# tmp = tmp.next
# else:
# head = head.next
# return ans
ans = head
tmp = ans
while tmp and tmp.next:
if tmp.next.val != tmp.val:
tmp = tmp.next
else:
tmp.next = tmp.next.next
# tmp = tmp.next
# tmp = tmp.next
return ans
```
#### File: LeetCode-Answers/Python/problem0083_test02.py
```python
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def deleteDuplicates(self, head: ListNode) -> ListNode:
tmp = head
while tmp and tmp.next:
if tmp.next.val == tmp.val:
tmp.next = tmp.next.next
else:
tmp = tmp.next
return head
```
#### File: LeetCode-Answers/Python/problem0088.py
```python
class Solution:
def merge(self, nums1: list, m: int, nums2: list, n: int) -> None:
"""
Do not return anything, modify nums1 in-place instead.
"""
# ans = []
# nums1 = nums1[:m]
# nums2 = nums2[:n]
# p, q = 0, 0
# while p<len(nums1) and q<len(nums2):
# if nums1[p] <= nums2[q]:
# ans.append(nums1[p])
# p += 1
# else:
# ans.append(nums2[q])
# q += 1
# ans += nums1[p:]
# ans += nums2[q:]
## nums1 = ans
# return ans
# ans = []
# nums1 = nums1[:m]
# nums2 = nums2[:n]
for k in range(len(nums1)-m):
nums1.pop()
for k in range(len(nums2)-n):
nums2.pop()
p, q = 0, 0
while p<len(nums1) and q<len(nums2):
if nums1[p] <= nums2[q]:
# ans.append(nums1[p])
p += 1
else:
# ans.append(nums2[q])
nums1.insert(p, nums2[q])
q += 1
# ans += nums1[p:]
# ans += nums2[q:]
# nums1 = ans
temp = nums2[q:]
while temp:
nums1.append(temp.pop(0))
return None
solu = Solution()
#nums1 = [1,2,3,0,0,0]
#m = 3
#nums2 = [2,5,6]
#n = 3
nums1 = [2,0]
m = 1
nums2 = [1]
n = 1
print(solu.merge(nums1, m, nums2, n))
print(nums1)
```
#### File: LeetCode-Answers/Python/problem0108.py
```python
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def sortedArrayToBST(self, nums: list) -> TreeNode:
if nums == []:
return None
num = len(nums) // 2
T = TreeNode(nums[num])
T.left = self.sortedArrayToBST(nums[:num])
T.right = self.sortedArrayToBST(nums[num+1:])
return T
```
#### File: LeetCode-Answers/Python/problem0125.py
```python
class Solution:
def isPalindrome(self, s: str) -> bool:
# a = []
# for k in s:
# if ('a'<=k.lower() and k.lower()<='z') or ('0'<=k and k<='9'):
# a.append(k.lower())
## print(a)
# return a == a[::-1]
p = 0
q = len(s) - 1
while p < q: #循环套循环也不一定增加了时间复杂度
while (p < q) and (not(('a'<=s[p].lower() and s[p].lower()<='z') or ('0'<=s[p] and s[p]<='9'))):
p += 1
while (p < q) and (not(('a'<=s[q].lower() and s[q].lower()<='z') or ('0'<=s[q] and s[q]<='9'))):
q -= 1
# print(s[p], s[q])
if s[p].lower() != s[q].lower():
return False
p += 1
q -= 1
return True
solu = Solution()
s = "A man, a plan, a canal: Panama"
s = "race a car"
s = "`l;`` 1o1 ??;l`"
print(solu.isPalindrome(s))
```
#### File: LeetCode-Answers/Python/problem0129.py
```python
class Solution:
def sumNumbers(self, root: TreeNode) -> int:
def worker(curroot, pretotal):
if not curroot:
return 0
total = pretotal * 10 + curroot.val
if not (curroot.left or curroot.right):
return total
else:
return worker(curroot.left, total) + worker(curroot.right, total)
return worker(root, 0)
```
#### File: LeetCode-Answers/Python/problem0148.py
```python
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
#class Solution(object):
# def sortList(self, head):
# """
# :type head: ListNode
# :rtype: ListNode
# """
# a = list()
# while head:
# a.append(head.val)
# head = head.next
## a = sorted(a)
## a.reverse()
# a = sorted(a, reverse = True)
## print(a)
# head = ListNode(None)
# temp = head
# while temp:
# temp.next = ListNode(a.pop())
## print(temp)
# temp = temp.next
# return head.next
#class Solution:
# def mergeTwoLists(self, l1: ListNode, l2: ListNode) -> ListNode:
# if l1 and l2:
# if l1.val > l2.val:
# l1, l2 = l2, l1
# l1.next = self.mergeTwoLists(l1.next, l2)
# return l1 or l2
#
# def sortList(self, head: ListNode) -> ListNode:
# if not (head and head.next): return head
# pre, slow, fast = None, head, head
# while fast and fast.next: pre, slow, fast = slow, slow.next, fast.next.next
# pre.next = None
# return self.mergeTwoLists(*map(self.sortList, (head, slow)))
```
#### File: LeetCode-Answers/Python/problem0153.py
```python
class Solution:
def maxProduct(self, nums: list) -> int:
if nums == []:
return []
MAX = 1
MIN = 1
Max = -100000
# ans = [nums[0]]
for k in range(0, len(nums)):
if nums[k] < 0:
MAX, MIN = MIN, MAX
MAX = max(nums[k], MAX*nums[k])
MIN = min(nums[k], MIN*nums[k])
Max = max(Max, MAX)
# print(Max)
# if nums[k] <= MAX*nums[k]:
# MAX = MAX*nums[k]
# ans.append(nums[k])
# else:
# MAX = nums[k]
# ans = [nums[k]]
# if nums[k] <= MAX*nums[k]:
# MIN = nums[k]
# ans = [nums[k]]
#
# else:
# MIN = MIN*nums[k]
# ans.append(nums[k])
return Max#ans#
solu = Solution()
nums = [2,3,-2,4]
nums = [-2,0,-1]
#nums = [2,3,-2,4]
nums = [-4,-3,-2]
print(solu.maxProduct(nums))
```
#### File: LeetCode-Answers/Python/problem0162.py
```python
class Solution:
def findPeakElement(self, nums: list) -> int:
ans = 0 #可能的索引答案
before = nums[0] #每轮的前一个数
for k in range(1, len(nums)):
if before > nums[k]:
ans = k-1
break
else:
ans = k
before = nums[k]
return ans
solu = Solution()
nums = [1,2,3,1]
#nums = [1,2,1,3,5,6,4]
print(solu.findPeakElement(nums))
```
#### File: LeetCode-Answers/Python/problem0163.py
```python
class Solution:
def findMissingRanges(self, nums: list, lower: int, upper: int) -> list:
if len(nums) == 1 and nums[0] == lower:
return [str(lower+1) + '->' + str(upper)]
if len(nums) == 1 and nums[0] == upper:
return [str(lower) + '->' + str(upper-1)]
if len(nums) == 2 and nums[0] == lower and nums[-1] == upper:
return []
a = set(range(lower, upper+1))
a = list(a - set(nums))
a = self.summaryRanges(a)
b = []
for k in a:
if type(k) == int:
b.append(str(k))
else:
b.append(str(k[0])+'->'+str(k[-1]))
return b
def summaryRanges(self, nums: list) -> list:
if nums == []:
return nums
a = [nums[0]]
for k in range(1, len(nums)):
if type(a[-1]) != list and nums[k] - a[-1] == 1:
a.append([a[-1], nums[k]])
a.pop(-2)
elif type(a[-1]) == list and nums[k] - a[-1][-1] == 1:
a.append([a[-1][-1], nums[k]])
else:
a.append(nums[k])
k = -1
for _ in range(len(a)):
k += 1
if k == len(a) - 1: #a[k] == a[-1]:
break
if (type(a[k])==list) * (type(a[k+1])==list) == 1:
if a[k][-1] >= a[k+1][0]:
a.insert(k, [a[k][0], max(a[k][-1],a[k+1][-1])])
a.pop(k+1)
a.pop(k+1)
k -= 1
else:
continue
return a
solu = Solution()
nums, lower, upper = [0, 1, 3, 50, 75], 0, 99,
nums, lower, upper = [], 0, 99,
nums, lower, upper = list(range(0,100)), 0, 99
nums, lower, upper = list(range(0,99)), 0, 99
nums, lower, upper = [2147483647], 0, 2147483647
nums, lower, upper = [-2147483648,2147483647], -2147483648, 2147483647
print(solu.findMissingRanges(nums, lower, upper))
```
#### File: LeetCode-Answers/Python/problem0167.py
```python
class Solution():
def twoSum(self, numbers: list, target: int) -> list:
if len(numbers) <= 1:
return [None, None]
else:
p = 0
q = len(numbers) - 1
while p < q:
if numbers[p] + numbers[q] < target:
p += 1
elif numbers[p] + numbers[q] > target:
q -= 1
else:
return [p+1, q+1]
return [None, None]
solu = Solution()
numbers, target = [2, 7, 11, 15], 9
print(solu.twoSum(numbers, target))
```
#### File: LeetCode-Answers/Python/problem0202.py
```python
class Solution:
def isHappy(self, n: int) -> bool:
# while n != 1:
# a = n
# temp = 0
# while a != 0:
# temp += (a % 10) ** 2
# a //= 10
# n = temp
## if n == 1:
# return True
d = {0:1, 16:1, 37:1, 58:1, 89:1, 145:1, 42:1, 20:1, 4:1}
while n != 1:
try:
if d[n]:
return False
except:
pass
res = 0
while n:
tmp = n % 10
tmp **= 2
res += tmp
n //= 10
n = res
return True
# for n in range(10, 31):
# print('---', n ,'---')
# while n != 1:
# print(n)
# res = 0
# while n:
# tmp = n % 10
# tmp **= 2
# res += tmp
# n //= 10
# n = res
# return True
'''
16
37
58
89
145
42
20
4
'''
solu = Solution()
n = 19
n = 0
print(solu.isHappy(n))
```
#### File: LeetCode-Answers/Python/problem0220.py
```python
class Solution():
def containsNearbyAlmostDuplicate(self, nums: list, k: int, t: int) -> bool:
d = {}
for i in range(len(nums)):
try:
if i - d[nums[i]] <= k:
return True
else:
d[nums[i]] = i
except:
d[nums[i]] = i
return False
solu = Solution()
nums, k, t = [1,2,3,1], 3, 0
nums, k, t = [1,0,1,1], 1, 2
nums, k, t = [1,5,9,1,5,9], 2, 3
print(solu.containsNearbyAlmostDuplicate(nums, k, t))
```
#### File: LeetCode-Answers/Python/problem0222.py
```python
class Solution:
def countNodes(self, root: TreeNode) -> int:
def preorderTraversal(root):
if not root:
return []
ans = []
ans.append(root.val)
ans += preorderTraversal(root.left)
ans += preorderTraversal(root.right)
return ans
return len(preorderTraversal(root))
```
#### File: LeetCode-Answers/Python/problem0233.py
```python
class Solution:
def countDigitOne(self, n: int) -> int:
# count = 0
# for k in range(1, n+1):
# for p in list(str(k)):
# if p == '1':
# count += 1
# return count
# count = 0
# while n > 9:
# m = len(str(n)) - 1
# tmp = 10**m - 1
## print(tmp)
## print(self.fun(tmp))
# count += self.fun(tmp)
# n -= tmp
# return count + 1
# tmp = n
# n = 10 ** (len(str(n)) - 1) - 1
# tmp -= n
# count = 0
# while n:
# count += self.fun(n)
# n //= 10
# return count
#
# def fun(self, n):
# tmp = 0
# count = 1
# m = len(str(n))
# for k in range(1, m):
# tmp += count
# count = tmp * 9 + 10 ** k
# return count
# count = 0
#
# if n == 10**len(str(n))-1:
# while n:
# count += self.fun(n)
# n //= 10
# return count
#
# count += self.countDigitOne( 10 ** (len(str(n))-1)-1 )
#
# tmp = n - 10 ** ( len(str(n))-1 )
# count += self.countDigitOne( tmp )
#
# count += tmp + 1
#
## tmp = n
## n = 10 ** (len(str(n)) - 1) - 1
## tmp -= n
## count = 0
## while n:
## count += self.fun(n)
## n //= 10
## return count
#
# def fun(self, n):
# tmp = 0
# count = 1
# m = len(str(n))
# for k in range(1, m):
# tmp += count
# count = tmp * 9 + 10 ** k
# return count
if n <= 0: #特殊情况
return 0
if n <= 9:
return 1
count = 0
if n == 10**len(str(n))-1: #只含9
while n:
count += self.fun(n)
n //= 10
return count
# while :
# count += self.countDigitOne(10**(len(str(n))-1)-1)
# n = n - 10**(len(str(n))-1)
# if n <= 10**(len(str(n))-1):
# count += n+1#min(10**(len(str(n))-1), n+1)#
# count += self.countDigitOne(n)
# else:
## count += 10**(len(str(n))-1)#min(10**(len(str(n))-1), n+1)#
# tmp = int(''.join(list(str(n))[1:]))
# count += self.countDigitOne(tmp)
#
# while :
# if n > 10**(len(str(n)-1)-1):
count = 0
m = int(list(str(n))[0]) #直接看最高位是几 #有3个部分可能含有1
for k in range(m):
count += self.countDigitOne( 10**(len(str(n))-1)-1 ) #=1=
# print(k, self.countDigitOne( 10**(len(str(n))-1)-1 ))
if m == 1: #=2=
tmp = ''.join(list(str(n))[1:])
print(tmp, type(tmp))
count += int(tmp) + 1
else:
count += 10 ** (len(str(n))-1)
# print(10 ** (len(str(n))-1))
tmp = ''.join(list(str(n))[1:])
# print(tmp, type(str(tmp)))
count += self.countDigitOne( int(tmp) ) #=3=
# print(count)
return count
def fun(self, n): #找规律
tmp = 0
count = 1
m = len(str(n))
for k in range(1, m):
tmp += count
count = tmp * 9 + 10 ** k
return count
solu = Solution()
#n = 13
#n = 3184191
n = 9999
#n = 1000
#n = 9
n = 1024
n = 999
n = 1999
n = 1789
#n = 2789
n = 2
n = 20
#n = 21
n = -1
print(solu.countDigitOne(n))
#print(solu.fun(7))
class Solution(object):
def countDigitOne(self, n):
"""
用递归做的,可以改成记忆化搜索,加快时间
"""
if n<=0: return 0
if n<10: return 1
last = int(str(n)[1:])
power = 10**(len(str(n))-1)
high = int(str(n)[0])
if high == 1:
return self.countDigitOne(last) + self.countDigitOne(power-1) + last+1
else:
return self.countDigitOne(last) + high*self.countDigitOne(power-1) + power
```
#### File: LeetCode-Answers/Python/problem0234_test03.py
```python
class Solution:
def isPalindrome(self, head: ListNode) -> bool:
# tmp1 = head
# tmp2 = head
# ans = []
# while tmp1:
# ans.append(tmp1.val)
# tmp1 = tmp1.next
# return ans == list(reversed(ans))
```
#### File: LeetCode-Answers/Python/problem0246.py
```python
class Solution:
def isStrobogrammatic(self, num: str) -> bool:
if len(num) == 1:
return num == '0' or num == '8' or num == '1'
if num[-1] == '0':
return False
d = {'0':'0', '6':'9', '8':'8', '9':'6', '1':'1'}
tmp = list(num)
for k in range(len(tmp)):
try:
tmp[k] = d[tmp[k]]
except:
return False
tmp.reverse()
return num == ''.join(tmp)
solu = Solution()
num = '69'
num = '88'
num = '962'
num = '1'
print(solu.isStrobogrammatic(num))
```
#### File: LeetCode-Answers/Python/problem0252.py
```python
class Solution:
def canAttendMeetings(self, intervals: list) -> bool:
if intervals == []:
return True
intervals = self.merge_sort(intervals)
original_intervals = intervals.copy()
k = -1
for _ in range(len(intervals)):
k += 1
# print(len(intervals))
if k == len(intervals)-1:
break
else:
if intervals[k][-1] > intervals[k+1][0]:
intervals.insert(k, [intervals[k][0], max(intervals[k][-1], intervals[k+1][-1])])
intervals.pop(k+1)
intervals.pop(k+1)
k -= 1
else:
continue
if intervals == original_intervals:
return True
else:
return False
# print(original_intervals)
# return intervals
def merge(self, left, right):
p, q = 0, 0
ans = []
while p<len(left) and q<len(right):
if left[p][0] <= right[q][0]:
ans.append(left[p])
p += 1
else:
ans.append(right[q])
q += 1
ans += left[p:]
ans += right[q:]
return ans
def merge_sort(self, List):
if len(List) <= 1:
return List
else:
num = len(List) // 2
left = self.merge_sort(List[:num])
right = self.merge_sort(List[num:])
return self.merge(left, right)
solu = Solution()
intervals = []
intervals = [[0,30],[5,10],[15,20]]
#intervals = [[7,10],[2,4]]
intervals = [[13,15],[1,13]]
#print(solu.merge_sort(intervals))
print(solu.canAttendMeetings(intervals))
```
#### File: LeetCode-Answers/Python/problem0290.py
```python
class Solution:
def wordPattern(self, pattern: str, str: str) -> bool:
str = str.split()
d = {}
# print(pattern, str)
if len(pattern) != len(str):
return False
s = set()
for k in range(len(pattern)):
if pattern[k] in d:
if d[pattern[k]] != str[k]:
return False
else:
if str[k] not in s:
d[pattern[k]] = str[k]
s.add(str[k])
else:
return False
return True
solu = Solution()
pattern, str = "jquery", "jquery"
pattern, str = "abba", "dog dog dog dog"
print(solu.wordPattern(pattern, str))
```
#### File: LeetCode-Answers/Python/problem0294.py
```python
class Solution:
def canWin(self, s: str) -> bool:
ans = self.generatePossibleNextMoves(s)
print(ans)
count = 0
for state in ans:
# print(state)
for k in range(len(state)-1):
if state[k:k+2] == '++':
count += 1
break
# return True
if count == len(ans):
return False
else:
return True
def generatePossibleNextMoves(self, s):
res = []
for i in range(len(s) - 1):
if s[i:i+2] == "++":
res.append(s[:i] + "--" + s[i+2:]) #用到了字符串拼接
return res
solu = Solution()
s = "++++"
s = "+++++"
s = "++++++"
print(solu.canWin(s))
```
#### File: LeetCode-Answers/Python/problem0322.py
```python
class Solution:
def coinChange(self, coins: list, amount: int) -> int:
# if amount == 0:
# return 0
# if not coins or amount < min(coins):
# return -1
# if 1 not in coins and amount not in coins and amount%coins[0] != 0:
# return -1
dp = [amount+1] * (amount+1) #0至amount共有amount+1个元素;每一个元素初始化为amount+1,这是对于每个金额至多的硬币数
dp[0] = 0 #凑出0需要0个硬币
for m in range(1, amount+1): #边界条件不容易把握
for coin in coins:
if m >= coin: #if amount>=coin:也能AC
dp[m] = min(dp[m], dp[m-coin] + 1)
print(dp) #因为硬币的最小面值是1,所以硬币数不应该大于amount
return dp[amount] if dp[amount] <= amount else -1
solu = Solution()
coins, amount = [1, 2, 5], 11
coins, amount = [2], 3
print(solu.coinChange(coins, amount))
```
#### File: LeetCode-Answers/Python/problem0328_test02.py
```python
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def oddEvenList(self, head: ListNode) -> ListNode:
# if not (head and head.next and head.next):
# return head
# odd, even = head, head.next
# ans1 = ListNode(None)
# a1 = ans1
# ans2 = ListNode(None)
# a2 = ans2
# # tmp = head
# # while odd and odd.next and even and even.next: #cur是cur.next的前提,现在没有了.next,even或者odd来取代了,所以这样写条件是合理的
# # while odd and odd.next odd.next.next: #cur是cur.next的前提,现在没有了.next,even或者odd来取代了,所以这样写条件是合理的
# while odd and odd.next: #cur是cur.next的前提,现在没有了.next,even或者odd来取代了,所以这样写条件是合理的
# a1.next = odd
# tmp = odd.next
# odd.next = odd.next.next
# odd = tmp
# odd = odd.next
# if a1.next:
# a1 = a1.next
# while even and even.next:
# a2.next = even
# tmp = even.next
# even.next = even.next.next
# even = tmp
# even = even.next
# a2 = a2.next
# a2 = a2.next
# a1.next = a2
# return ans1
if not (head and head.next and head.next):
return head
oddhead, evenhead = head, head.next
odd, even = oddhead, evenhead
while odd and even and even.next:
tmp1 = odd.next.next
tmp2 = even.next.next
odd.next = tmp1
even.next = tmp2
odd = tmp1
even = tmp2
odd.next = evenhead
return oddhead
```
#### File: LeetCode-Answers/Python/problem0352.py
```python
class SummaryRanges():
def __init__(self):
"""
Initialize your data structure here.
"""
self.stream = []
self.List = []
def addNum(self, val: int) -> None:
self.stream.append(val)
self.val = val
def getIntervals(self) -> list:
if len(self.stream) == 0:
return []
elif len(self.stream) == 1:
self.List.append([self.stream[0], self.stream[0]])
return self.List
else:
newInterval = [self.val, self.val]
mark = 0
count = 0
for m in range(len(self.List)):
# print(m)
# print(newInterval[0])
# print(self.List[m][0])
if newInterval[0] <= self.List[m][0]:
mark = m
count += 1
break
if count == 0: mark = len(self.List)
self.List.insert(mark, newInterval)
k = -1
for _ in range(len(self.List)):
k += 1
if k == len(self.List) - 1: break
if self.List[k][-1] - self.List[k+1][0] >= -1:
self.List.insert(k, [self.List[k][0], max(self.List[k][-1],self.List[k+1][-1])])
self.List.pop(k+1)
self.List.pop(k+1)
k -= 1
return self.List
# Definition for an interval.
# class Interval:
# def __init__(self, s=0, e=0):
# self.start = s
# self.end = e
#class SummaryRanges:
#
# def __init__(self):
# """
# Initialize your data structure here.
# """
# self._list = []
#
#
# def addNum(self, val: int) -> None:
# import bisect
# if val not in self.contain:
# bisect.insort(self._list, val)
#
#
#
# def getIntervals(self) -> List[Interval]:
# res = []
# n = len(self._list)
# i = 0
# while i < n:
# j = i
# while j < n - 1 and self._list[j]+ 1 == self._list[j+1]:
# j += 1
# res.append(Interval(self._list[i], self._list[j]))
# i = j + 1
# return res
# Your SummaryRanges object will be instantiated and called as such:
# obj = SummaryRanges()
# obj.addNum(val)
# param_2 = obj.getIntervals()
SR = SummaryRanges()
a = [1,3,7,2,6]
while a:
val = a.pop(0)
SR.addNum(val)
param = SR.getIntervals()
print(param)
```
#### File: LeetCode-Answers/Python/problem0360.py
```python
from typing import List
class Solution:
def sortTransformedArray(self, nums: List[int], a: int, b: int, c: int) -> List[int]:
# # for k, num in enumerate(nums):
# # nums[k] = a*num**2 + b*num + c
# nums = list(map(lambda x: a*x**2 + b*x + c, nums))
# min_ = min(nums)
# nums = [num-min_ for num in nums]
# max_ = max(nums)
# tmp = [0] * (max_+1)
# # print(nums)
# for num in nums:
# tmp[num] += 1
# nums = []
# for k, t in enumerate(tmp):
# for _ in range(t):
# nums.append(k)
# nums = list(map(lambda x: x+min_, nums))
# return nums
nums = list(map(lambda x: a*x**2 + b*x + c, nums))
return sorted(nums)
# nums = [-4,-2,2,4]; a = 1; b = 3; c = 5
nums = [-99,-98,-94,-92,-87,-82,-71,-62,-57,-57,-45,-39,-36,-23,-21,-14,-3,3,12,12,16,19,27,27,28,41,47,51,52,60,70,77,78,88]
a = -34
b = 96
c = -67
solu = Solution()
print(solu.sortTransformedArray(nums, a, b, c))
```
#### File: LeetCode-Answers/Python/problem0394.py
```python
class Solution:
def decodeString(self, s: str) -> str:
from collections import defaultdict
stack = []
mark = 0
d_nums = defaultdict(list)
d_letters = defaultdict(list)
for k in range(len(s)):
if s[k] == '[':
tmp = int(''.join(d_nums[mark]))
d_nums[mark] = []
d_nums[mark].append(tmp)
del tmp
elif s[k] == ']':
mark -= 1
if mark == 0:
for _ in range(d_nums[mark+1][0]):
stack.append(''.join(d_letters[mark+1]))
else:
for _ in range(d_nums[mark+1][0]):
d_letters[mark].append(''.join(d_letters[mark+1]))
d_letters[mark+1] = []
d_nums[mark+1] = []
elif s[k].isdigit():
if k-1 >= 0:
if not s[k-1].isdigit():
mark += 1
else:
mark += 1
d_nums[mark].append(s[k])
elif mark == 0:
stack.append(s[k])
elif s[k] != '[':
d_letters[mark].append(s[k])
return ''.join(stack)
# solu = Solution()
# # s = "3[a]2[bc]"
# # s = "3[a2[c]]"
# # s = "2[abc]3[cd]ef"
# s = "100[leetcode]"
# print(solu.decodeString(s))
class Solution:
def decodeString(self, s: str) -> str:
stack, res, multi = [], "", 0
for c in s:
if c == '[':
stack.append([multi, res])
res, multi = "", 0
elif c == ']':
cur_multi, last_res = stack.pop()
res = last_res + cur_multi * res
elif '0' <= c <= '9':
multi = multi * 10 + int(c)
else:
res += c
return res
# # 作者:jyd
# # 链接:https://leetcode-cn.com/problems/decode-string/solution/decode-string-fu-zhu-zhan-fa-di-gui-fa-by-jyd/
# # 来源:力扣(LeetCode)
# # 著作权归作者所有。商业转载请联系作者获得授权,非商业转载请注明出处。
```
#### File: LeetCode-Answers/Python/problem0434.py
```python
class Solution:
def countSegments(self, s: str) -> int:
return len(s.split())
solu = Solution()
s = "Hello, my name is John"
print(solu.countSegments(s))
```
#### File: LeetCode-Answers/Python/problem0442.py
```python
class Solution:
def findDuplicates(self, nums: list) -> list:
# Sum = 1
# k = 0
# while Sum:
# Sum ^= nums[k]
# if Sum == 0:
# k += 1
# d = {}
# for k in nums:
# try:
# d[k] += 1
# except:
# d[k] = 1
# ans = []
# for k, v in d.items():
# if v == 2:
# ans.append(k)
# return ans
res = []
for i in range(len(nums)):
num = abs(nums[i])
if nums[num-1] > 0: #1<= a[i] <= n
nums[num-1] *= -1
else:
res.append(num)
return res
solu = Solution()
nums = [4,3,2,7,8,2,3,1]
print(solu.findDuplicates(nums))
```
#### File: LeetCode-Answers/Python/problem0475.py
```python
from typing import List
class Solution:
def findRadius(self, houses: List[int], heaters: List[int]) -> int:
# 存放每个房屋与加热器的最短距离
res = []
# 排序
houses.sort()
heaters.sort()
for c in houses:
# 二分查找,在heaters中寻找与房屋 c 最近的加热器
left = 0
right = len(heaters) - 1
while left < right:
# mid = (left + right) >> 1
mid = (left + right) // 2
if heaters[mid] < c:
left = mid + 1
else:
right = mid
# 若找到的值等于 c ,则说明 c 房屋处放有一个加热器,c 房屋到加热器的最短距离为 0
if heaters[left] == c:
res.append(0)
# 若该加热器的坐标值小于 c ,说明该加热器的坐标与 c 之间没有别的加热器
elif heaters[left] < c:
res.append(c - heaters[left])
# 若该加热器的坐标值大于 c 并且left不等于 0 ,说明 c 介于left和left-1之间,
# 房屋到加热器的最短距离就是left和left - 1处加热器与 c 差值的最小值
elif left:
res.append(min(heaters[left] - c, c - heaters[left - 1]))
else:
res.append(heaters[left] - c)
return max(res)
# houses, heaters = [1,2,3], [2]
# # houses, heaters = [1,2,3,4], [1,4]
# # houses, heaters = [1,5], [2]
# # houses, heaters = [1,5], [10]
# solu = Solution()
# print(solu.findRadius(houses, heaters))
```
#### File: LeetCode-Answers/Python/problem0476.py
```python
class Solution:
def findComplement(self, num: int) -> int:
i = 1
# 最高位为1,其余为0,刚好比num大然后用这个数减去1就是我们要找的数
while num >= i:
i = i << 1 # 每次向左移1位 i=0b1000
return i-1-num
#作者:wang-hao-bi0glUtbvU
#链接:https://leetcode-cn.com/problems/two-sum/solution/ren-sheng-ku-duan-wo-yong-python3-by-wang-hao-bi0g/
#来源:力扣(LeetCode)
#著作权归作者所有。商业转载请联系作者获得授权,非商业转载请注明出处。
solu = Solution()
num = 5
#num = 1
print(solu.findComplement(num))
```
#### File: LeetCode-Answers/Python/problem0507.py
```python
class Solution:
def checkPerfectNumber(self, num: int) -> bool:
if num % 2 != 0:
return False
factors = [1]
p, q = 2, num//2
factors.append(p)
factors.append(q)
while p <= q:
p += 1
if num % p == 0:
q = num // p
if p <= q:
factors.append(p)
factors.append(q)
else:
pass
return sum(factors) == num
num = 28
num = 36
solu = Solution()
print(solu.checkPerfectNumber(num))
```
#### File: LeetCode-Answers/Python/problem0520.py
```python
class Solution:
def detectCapitalUse(self, word: str) -> bool:
count = 0
for k in word:
if 'A'<=k and k<='Z':
count += 1
if count == len(word):
return True
count = 0
for k in word:
if 'a'<=k and k<='z':
count += 1
if count == len(word):
return True
count = 0
if 'A'<=word[0] and word[0]<='Z':
for k in range(1, len(word)):
if 'a'<=word[k] and word[k]<='z':
count += 1
if count + 1 == len(word):
return True
return False
solu = Solution()
word = 'USA'
#word = 'FlaG'
#word = 'leetcode'
#word = 'Google'
print(solu.detectCapitalUse(word))
```
#### File: LeetCode-Answers/Python/problem0561.py
```python
class Solution:
def arrayPairSum(self, nums: list) -> int:
# nums.sort()
# Sum = 0
# for k in range(len(nums)):
# if k%2 == 1:
# Sum += min(nums[k], nums[k-1])
# return Sum
nums.sort()
Sum = 0
for k in range(len(nums)):
if k%2 == 0:
Sum += nums[k]
return Sum
solu = Solution()
nums = [1,4,3,2]
print(solu.arrayPairSum(nums))
```
#### File: LeetCode-Answers/Python/problem0598.py
```python
from typing import List
from collections import Counter
# class Solution:
# def maxCount(self, m: int, n: int, ops: List[List[int]]) -> int:
# if not m or not n:
# return None
# mat = [ [0]*n for _ in range(m) ]
# for op in ops:
# for p in range(op[0]):
# for q in range(op[1]):
# mat[p][q] += 1
# mat_ = []
# for t in mat:
# mat_ += t
# # print(mat_)
# d = Counter(mat_)
# # print(d)
# max_ = max(d.keys())
# # print(list(d.values()))
# # print(max_)
# return d[max_]
class Solution:
def maxCount(self, m: int, n: int, ops: List[List[int]]) -> int:
if not ops:
return m*n
m, n = ops[0][0], ops[0][1]
for op in ops:
m = min(m, op[0])
n = min(n, op[1])
return m*n
solu = Solution()
m = 3
n = 3
operations = [[2,2],[3,3]]
print(solu.maxCount(m,n,operations))
```
#### File: LeetCode-Answers/Python/problem0599.py
```python
from typing import List
# from collections import Counter
class Solution:
def findRestaurant(self, list1: List[str], list2: List[str]) -> List[str]:
count = len(list1)-1 + len(list2)-1
# d = Counter(list1)
d = {}
for k, s in enumerate(list1):
d[s] = k
for k, s in enumerate(list2):
if s in d:
if d[s] + k < count:
count = d[s] + k
ans = []
for k, s in enumerate(list2):
if s in d:
if d[s] + k == count:
ans.append(s)
return ans
```
#### File: LeetCode-Answers/Python/problem0645.py
```python
class Solution:
def findErrorNums(self, nums: List[int]) -> List[int]:
# nums.sort()
# for k in range(1, len(nums)):
# if nums[k] == nums[k-1]:
# # tmp = nums.copy()
# return [ nums[k], list( set(range(1, len(nums)+1)) - set(nums) )[0] ]
d = {}
for num in nums:
try:
d[num] += 1
except:
d[num] = 1
for k, v in d.items():
if v == 2:
break
return [ k, list( set(range(1, len(nums)+1)) - set(nums) )[0] ]
```
#### File: LeetCode-Answers/Python/problem0646.py
```python
class Solution:
def findLongestChain(self, pairs: list) -> int:
# pairs = sorted(pairs, key = lambda x: x[1], reverse = False)
## print(pairs)
# d = {}
# for p in range(len(pairs)):
# d[p] = []
# d[p].append(pairs[p])
# for k in d.keys():
# if d[k][-1][-1] < pairs[p][0]:
# d[k].append(pairs[p])
# print(d)
# Max = 0
# for k, v in d.items():
## if len(v) > Max:
## Max = len(v)
# Max = max(Max, len(v))
# return Max
# pairs = sorted(pairs, key = lambda x: x[0])
## print(pairs)
# d = {}
# for p in range(len(pairs))[::-1]:
# d[p] = []
# print(pairs[p])
# d[p].append(pairs[p])
# for k in d.keys():
# print(d[k][-1][0], pairs[p][-1])
# if d[k][-1][0] > pairs[p][-1]:
# d[k].append(pairs[p])
# print(d)
# Max = 0
# for k, v in d.items():
## if len(v) > Max:
## Max = len(v)
# Max = max(Max, len(v))
# return Max
# a = sorted(pairs)
# h = a[0]
# ans = []
# for block in a[1:]:
# if h[1] < block[0]:
# ans.append(h)
# h = block
# elif h[1] > block[1]:
# h = block
# return len(ans) + 1
pairs = sorted(pairs)
temp = pairs[0]
ans = []
for k in pairs[1:]:
if temp[1] < k[0]:
ans.append(temp)
temp = k
elif temp[1] > k[1]:
temp = k
# print(ans)
print(k)
print(temp)
ans.append(temp) #此时的temp已经满足上面两种temp=k之一,只是上面最后一轮没有加
# print(ans)
return len(ans)
solu = Solution()
#pairs = [[1,2], [2,3], [3,4]]
#pairs = [[3,4], [2,3], [1,2]]
#pairs = [[7,9], [4,5], [7,9], [-7,-1], [0,10], [3,10], [3,6], [2,3]]
#pairs = [[-7, -1], [0, 10], [2, 3], [3, 10], [3, 6], [4, 5], [7, 9], [7, 9]]
#pairs = [[-7, -1], [0, 10], [2, 3], [3, 10], [3, 6], [-1, 5], [7, 9]]
pairs = [[-7, -1], [-1, 5], [0, 10], [2, 3], [3, 10], [3, 6], [3, 9]]
pairs = [[-7, -1], [-1, 5], [0, 10], [2, 3], [3, 10], [3, 6], [7, 9]]
print(solu.findLongestChain(pairs))
```
#### File: LeetCode-Answers/Python/problem0654.py
```python
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def constructMaximumBinaryTree(self, nums: List[int]) -> TreeNode:
if not nums:
return None
max_ = max(nums)
ind = nums.index(max_)
root = TreeNode(max_)
nums1 = nums[:ind]
nums2 = nums[ind+1:]
root.left = self.constructMaximumBinaryTree(nums1)
root.right = self.constructMaximumBinaryTree(nums2)
return root
```
#### File: LeetCode-Answers/Python/problem0657.py
```python
class Solution:
def judgeCircle(self, moves: str) -> bool:
# stack = []
# for move in moves:
# stack.append(move)
# if len(stack) > 1 and (stack[-2:] == ['U','D'] or stack[-2:] == ['D','U'] or stack[-2:] == ['L','R'] or stack[-2:] == ['R','L']):
# stack.pop()
# stack.pop()
# print(stack)
# return not stack
count = {'L':0, 'R':0, 'U':0, 'D':0}
for move in moves:
count[move] += 1
return count['L'] == count['R'] and count['U'] == count['D']
solu = Solution()
moves = "UD"
#moves = 'LL'
#moves = "RLUURDDDLU"
print(solu.judgeCircle(moves))
```
#### File: LeetCode-Answers/Python/problem0673.py
```python
class Solution(object):
def findNumberOfLIS(self, nums):
N = len(nums)
if N <= 1: return N
lengths = [0] * N #lengths [i] = longest ending in nums[i]
counts = [1] * N #count[i] = number of longest ending in nums[i]
for j in range(len(nums)):
for i in range(j):
if nums[i] < nums[j]:
if lengths [i] >= lengths [j]:
lengths [j] = 1 + lengths [i]
counts[j] = counts[i]
elif lengths [i] + 1 == lengths [j]:
counts[j] += counts[i]
longest = max(lengths)
return sum(c for i, c in enumerate(counts) if lengths [i] == longest)
# 作者:LeetCode
# 链接:https://leetcode-cn.com/problems/number-of-longest-increasing-subsequence/solution/zui-chang-di-zeng-zi-xu-lie-de-ge-shu-by-leetcode/
# 来源:力扣(LeetCode)
# 著作权归作者所有。商业转载请联系作者获得授权,非商业转载请注明出处。
```
#### File: LeetCode-Answers/Python/problem0693.py
```python
class Solution:
def hasAlternatingBits(self, n: int) -> bool:
# a = []
# while n:
# tmp = n % 2
# a.append(tmp)
# if len(a) >=2 and a[-1] == a[-2]:
# return False
# n //= 2
# return True
a = [0] * 2
count = 0
while n:
tmp = n % 2
n //= 2
a[count%2] = tmp
count += 1
if count>=2 and a[0] == a[1]:
return False
return True
solu = Solution()
n = 5
n = 7
n = 11
#n = 10
print(solu.hasAlternatingBits(n))
```
#### File: LeetCode-Answers/Python/problem0714.py
```python
class Solution:
def maxProfit(self, prices: List[int], fee: int) -> int:
'''
不限制交易次数,但是有手续费,所以交易次数较少比较好
这题与前两个股票题不一样,思路上就不一样,这里用到了动态规划
cash, hold这两个概念比较抽象
有的动态规划是一个dp,二维
这里的动态规划是两个dp,都是一维
'''
# if not prices:
# return 0
# cash, hold = 0, -prices[0]
# for k in range(1, len(prices)): #持股相当于负债
# cash = max(cash, hold+prices[k]-fee) #对应卖出(卖出需要fee)
# hold = max(hold, cash - prices[k]) #对应买入(买入不需要fee)
# return cash
if not prices:
return 0
cash, hold = 0, prices[0]
for k in range(1, len(prices)): #持股相当于负债
'''
max和min则对应了
如果不值得
则
不卖或不买
'''
cash = max(cash, prices[k]-hold-fee) #对应卖出(卖出需要fee)
hold = min(hold, prices[k]-cash) #对应买入(买入不需要fee)
return cash
```
#### File: LeetCode-Answers/Python/problem0718.py
```python
class Solution:
def findLength(self, A: list, B: list) -> int:
# dp = [ [0]*len(A) for _ in range(len(B)) ]
# for k in range(len(A)):
# dp[0][k] = int(A[k] == B[0])
# for k in range(len(B)):
# dp[k][0] = int(B[k] == A[0])
# # print(dp)
# for i in range(1, len(B)):
# for j in range(1, len(A)):
# # print(dp[i][j])
# # print(B[i], A[j])
# # print(dp[i-1][j], dp[i][j-1])
# dp[i][j] = min( int(B[i] == A[j]) + max(dp[i-1][j], dp[i][j-1]), i+1, j+1 )
# print(dp)
# return dp[-1][-1]
dp = [ [0]*(len(A)+1) for _ in range(len(B)+1) ]
for i in range(len(B)-1, -1, -1):
for j in range(len(A)-1, -1, -1):
if B[i] == A[j]:
dp[i][j] = dp[i+1][j+1] + 1
# print(dp)
# return max( (max(row) for row in dp) ) #迭代器
return max( [max(row) for row in dp] ) #列表
# solu = Solution()
# A, B = [1,2,3,2,1], [3,2,1,4,7]
# A, B = [0,0,0,0,0], [0,0,0,0,0]
# A, B = [0,1,1,1,1], [1,0,1,0,1]
# print(solu.findLength(A, B))
```
#### File: LeetCode-Answers/Python/problem0791.py
```python
class Solution:
def customSortString(self, S: str, T: str) -> str:
# d = {}
# for k, s in enumerate(S):
# d[s] = k
# tmp = set(T) - set(S)
# for t in tmp:
# d[t] = -1
# T = list(T)
# T.sort(key = lambda x: d[x])
# T = ''.join(T)
# return T
# from collections import Counter
# d = Counter(T)
# ans = []
# tmp = set(T) - set(S)
# for s in S:
# for _ in range(d[s]):
# ans.append(s)
# for t in tmp:
# for _ in range(d[t]):
# ans.append(t)
# return ''.join(ans)
from collections import Counter
d = Counter(T)
ans = []
for s in S:
ans.append(d[s]*s)
d[s] = 0
for e in d:
ans.append(d[e]*e)
return ''.join(ans)
S, T = "cba", "abcd"
solu = Solution()
print(solu.customSortString(S, T))
```
#### File: LeetCode-Answers/Python/problem0832.py
```python
class Solution:
def flipAndInvertImage(self, A: list) -> list:
for a in A:
p, q = 0, len(a)-1
while p<=q:
if p == q:
a[p] = 1 - a[p]
break
a[p], a[q] = a[q], a[p]
a[p] = 1 - a[p]
a[q] = 1 - a[q]
p += 1
q -= 1
return A
solu = Solution()
#A = [[1,1,0],[1,0,1],[0,0,0]]
A = [[1,1,0,0],[1,0,0,1],[0,1,1,1],[1,0,1,0]]
print(solu.flipAndInvertImage(A))
```
#### File: LeetCode-Answers/Python/problem0852.py
```python
class Solution:
def peakIndexInMountainArray(self, A: List[int]) -> int:
if not A:
return None
#前向差分
for k in range(1, len(A)-1):
if A[k-1]<A[k] and A[k]>A[k+1]:
return k
return None
#最大值
return A.index(max(A))
#二分查找
p, q = 0, len(A)-1
while p < q:
k = (p+q) // 2
if A[k-1]<A[k]<A[k+1]:
p = k+1
elif A[k-1]>A[k]>A[k+1]:
q = k-1
else:
return k
return p
```
#### File: LeetCode-Answers/Python/problem0859.py
```python
class Solution:
def buddyStrings(self, A: str, B: str) -> bool:
# if not A and not B:
# return False
## if A == B:
## return False
## if (A and not B) or (not A and B):
## return False
# if len(A) != len(B):
# return False
# a = []
# for k in range(len(A)):
# if A[k] != B[k]:
# a.append(k)
## if len(a) == 0:
## return True
## print(len(a))
# if len(a) != 2 and len(a) != 0:
## print(-3)
# return False
# elif len(a) == 2:
# A = list(A)
# A[a[0]], A[a[1]] = A[a[1]], A[a[0]]
# A = ''.join(A)
# if A == B:
# return True
# else:
## print(-2)
# return False
# else:
## print(-1)
# for k in range(1, len(A)):
# if A[k] != A[k-1]:
# return False
# return True
# if len(A) != len(B): return False
# if A == B:
# seen = set()
# for a in A:
# if a in seen:
# return True
# seen.add(a)
# return False
# else:
# pairs = []
# for a, b in itertools.izip(A, B):
# if a != b:
# pairs.append((a, b))
# if len(pairs) >= 3: return False
# return len(pairs) == 2 and pairs[0] == pairs[1][::-1]
# 长度不同直接false
if len(A) != len(B): return False
# 由于必须交换一次,在相同的情况下,交换相同的字符
if A == B and len(set(A)) < len(A): return True
# 使用 zip 进行匹配对比,挑出不同的字符对
dif = [(a, b) for a, b in zip(A, B) if a != b]
# 对数只能为2,并且对称,如 (a,b)与(b,a)
return len(dif) == 2 and dif[0] == dif[1][::-1]
solu = Solution()
A, B = 'ab', 'ba'
A, B = 'ab', 'ab'
A, B = 'aa', 'aa'
#A, B = 'aaaaaaabc', 'aaaaaaacb'
#A, B = '', 'aa'
print(solu.buddyStrings(A, B))
```
#### File: LeetCode-Answers/Python/problem0883.py
```python
class Solution:
def projectionArea(self, grid: list) -> int:
ans = 0
for element in grid:
ans += max(element)
# print([0] * len(grid))
# print(( [0] * len(grid) ) * len(grid[0]))
grid_copy = [( [0] * len(grid) ) for _ in range(len(grid[0]))]
# print(grid_copy)
for p in range(len(grid)):
for q in range(len(grid[0])):
# print(p,q)
grid_copy[q][p] = grid[p][q]
if grid[p][q] != 0:
ans += 1
for element in grid_copy:
ans += max(element)
return ans
# solu = Solution()
# grid = [[1,2],[3,4]]#[[1,2],[3,4],[5,6]]
# print(solu.projectionArea(grid))
```
#### File: LeetCode-Answers/Python/problem0884.py
```python
class Solution:
def uncommonFromSentences(self, A: str, B: str) -> list:
S = A + ' ' + B
S = S.split()
# print(A, B)
# print(S)
from collections import Counter
d = Counter(S)
# print(d)
# d = {}
# for s in S:
# try:
# d[s] += 1
# except:
# d[s] = 1
# print(d)
ans = []
for key, value in d.items():
if value == 1:
ans.append(key)
return ans
solu = Solution()
A, B = "this apple is sweet", "this apple is sour"
print(solu.uncommonFromSentences(A, B))
```
#### File: LeetCode-Answers/Python/problem0896.py
```python
class Solution:
def isMonotonic(self, A: list) -> bool:
if len(A)<=1:
return True
if A[1] > A[0]:
return self.fun1(A)
elif A[1] < A[0]:
return self.fun2(A)
else:
for k in range(2, len(A)):
if A[k] > A[k-1]:
return self.fun1(A[k:])
elif A[k] < A[k-1]:
return self.fun2(A[k:])
return True
def fun1(self, A):
for k in range(2, len(A)):
if A[k] < A[k-1]:
return False
return True
def fun2(self, A):
for k in range(2, len(A)):
if A[k] > A[k-1]:
return False
return True
solu = Solution()
A = [1,2,2,3]
#A = [6,5,4,4]
#A = [1,3,2]
#A = [1,2,4,5]
#A = [1,1,1]
A = [1,1,0]
print(solu.isMonotonic(A))
```
#### File: LeetCode-Answers/Python/problem0905.py
```python
class Solution:
def sortArrayByParity(self, A: list) -> list:
# temp1, temp2 = [], []
# for k in A:
# if k%2 == 0:
# temp1.append(k)
# else:
# temp2.append(k)
# return temp1 + temp2
# k = 0
# for _ in range(len(A)):
# if A[k]%2 != 0:
## print(-1)
# A.append(A[k])
## print(A)
# del A[k]
## print(A)
# else:
# k += 1
# return A
# p = 0
# while p<len(A):
# if A[p]%2 != 0:
# A.append(A[p])
# del A[p]
# length = len(A)
# for k in range(length):
# if A[k]%2 == 0:
# A.append(A[k])
## print(A)
# for k in range(length):
# if A[k]%2 != 0:
# A.append(A[k])
## print(A)
# return A[length:]
# if len(A) <= 1:
# return A
# p, q = 0, 1
# while p<len(A) and q < len(A):
# if A[q]%2 != 0:
# q += 1
# if A[p]%2 == 0:
# p += 1
p, q = 0, len(A)-1
while p<q:
# print(p,q)
while A[p]%2 == 0 and p<q:#
# print(-1)
p += 1
while A[q]%2 != 0 and p<q:#
q -= 1
# if p<q:
A[p], A[q] = A[q], A[p]
return A
solu = Solution()
A = [3,1,2,4]
#A = [0,1,2]
#A = [0, 2]
print(solu.sortArrayByParity(A))
```
#### File: LeetCode-Answers/Python/problem0942.py
```python
class Solution:
def diStringMatch(self, S: str) -> list:
# if not S:
# return []
# A = list(range(len(S)+1))
# S = list(S)
# d = {}
# print(A)
# print(S)
# for k in range(len(A)):
# d[k] = [k]
#
## for p in S:
# for k in range(len(S)):
# print(d)
# if S[k] == 'I':
# for p in range(len(A)):
# for key, value in d.items():
# if A[p]>value[-1]:
# d[key].append(k)
# elif S[k] == 'D':
# for p in range(len(A)):
# for key, value in d.items():
# if A[p]<value[-1]:
# d[key].append(k)
# print(d)
# Max = 0
# mark = 0
# for k, v in d.items():
# if len(v)>Max:
# mark = k
# return d[mark]
#
## for k in S:
## if k == 'I':
## A.append(A[-1]+1)
## elif k == 'D':
## A.append(A[-1]-1)
## return A
ans = []
i = 0
j = len(S)
for s in S:
if s == 'I':
ans.append(i)
i += 1
else:
ans.append(j)
j -= 1
ans.append(i)
return ans
solu = Solution()
S = "IDID"
print(solu.diStringMatch(S))
```
#### File: LeetCode-Answers/Python/problem0950.py
```python
class Solution:
def deckRevealedIncreasing(self, deck: list) -> list:
deck.sort()
n = len(deck)
ans = [deck[-1]]
deck.pop()
# print(ans)
# print(deck)
while len(ans) < n:
ans.insert(0, deck.pop())
if len(ans) == n:
break
ans.insert(0, ans.pop())
# ans.insert(0, ans[-1])
# ans.pop()
# print(ans)
return ans
solu = Solution()
deck = [17,13,11,2,3,5,7]
print(solu.deckRevealedIncreasing(deck))
```
#### File: LeetCode-Answers/Python/problem0970.py
```python
class Solution:
def powerfulIntegers(self, x: int, y: int, bound: int) -> list:
if x == 1 and y == 1:
if bound >= 2:
return [2]
else:
return []
if x != 1 and y == 1:
import numpy as np
ans = []
log_x = np.log(bound)/np.log(x)
log_x = int(log_x)+1
for i in range(log_x):
tmp = x**i + 1
if tmp <= bound:
ans.append(tmp)
return list(set(ans))
if x == 1 and y != 1:
import numpy as np
ans = []
log_y = np.log(bound)/np.log(y)
log_y = int(log_y)+1
for j in range(log_y):
tmp = 1 + y**j
if tmp <= bound:
ans.append(tmp)
return list(set(ans))
if x != 1 and y != 1:
import numpy as np
ans = []
log_x, log_y = np.log(bound)/np.log(x), np.log(bound)/np.log(y)
log_x, log_y = int(log_x)+1, int(log_y)+1
for i in range(log_x):
for j in range(log_y):
tmp = x**i + y**j
if tmp <= bound:
ans.append(tmp)
return list(set(ans))
solu = Solution()
x, y, bound = 2, 3, 10
x, y, bound = 2, 1, 10
print(solu.powerfulIntegers(x, y, bound))
```
#### File: LeetCode-Answers/Python/problem0976.py
```python
class Solution:
def largestPerimeter(self, A: List[int]) -> int:
perimeter = 0
A.sort(reverse=True)
for k in range(len(A)-2):
if self.fun(A[k], A[k+1], A[k+2]):
perimeter = max(perimeter, A[k] + A[k+1] + A[k+2])
return perimeter
return perimeter
def fun(self, a, b, c):
# if a+b>c and a+c>b and b+c>a:
if b+c>a:
return True
else:
return False
```
#### File: LeetCode-Answers/Python/problem0977.py
```python
class Solution:
def sortedSquares(self, A: list) -> list:
# B = []
# for k in A:
# B.append(k**2)
## print(B)
## C = self.merge_sort(B)
## C = sorted(B)
# B = self.MergeSort_fun2(B)
# return B#C#
a = []
mark = 0
temp = 0
for k in A:
if mark == 0 and k>=0:
mark += 1
temp = A.index(k)
a.reverse()
a.append(k**2)
# a = self.MergeSort_fun1(a[:temp], a[temp:])
# print(a)
a = self.merge(a[:temp], a[temp:])
return a
def merge(self, left, right):
ans = []
p, q = 0, 0
while p<len(left) and q<len(right):
if left[p] <= right[q]:
ans.append(left[p])
p += 1
else:
ans.append(right[q])
q += 1
ans += left[p:]
ans += right[q:]
return ans
def merge_sort(self, List):
# print(List)
if len(List)<=1:
return List
num = len(List) // 2
left = self.merge_sort(List[:num])
right = self.merge_sort(List[num:])
print(self.merge(left, right))
return self.merge(left, right)
def MergeSort_fun1(self, left, right):
result = []
i, j = 0, 0
while i<len(left) and j<len(right): #注意,左右穿插排列大小 #这里的i, j像是指针
if left[i] <= right[j]:
result.append(left[i])
i += 1
else:
result.append(right[j])
j += 1
result += left[i:] #是拼接不能用.append()
result += right[j:]
return result
def MergeSort_fun2(self, List):
if len(List) <= 1: #这一判断必须得有,否则一直递归下去,因为底层总会到只含有一个元素的列表
return List
num = len(List) // 2
left = self.MergeSort_fun2(List[:num])
right = self.MergeSort_fun2(List[num:])
# print(self.MergeSort_fun1(left, right))
return self.MergeSort_fun1(left, right)
solu = Solution()
A = [-4,-1,0,3,10]
#A = [-7,-3,2,3,11]
print(solu.sortedSquares(A))
#print(solu.merge_sort(A))
#print(solu.MergeSort_fun2(A))
```
#### File: LeetCode-Answers/Python/problem1002.py
```python
class Solution:
def commonChars(self, A: List[str]) -> List[str]:
if len(A) <= 1:
return []
ans = []
keys = set(A[0])
for k in keys:
tmp = min(a.count(k) for a in A)
for _ in range(tmp):
ans.append(k)
return ans
```
#### File: LeetCode-Answers/Python/problem1003.py
```python
class Solution:
def isValid(self, S: str) -> bool:
stack = []
for s in iter(S):
if len(stack) >= 2 and ''.join(stack[-2:]) + s == 'abc':
stack.pop()
stack.pop()
else:
stack.append(s)
return not stack
S = "abc"
# S = "aabcbc"
S = "abcabcababcc"
S = "abccba"
S = "cababc"
solu = Solution()
print(solu.isValid(S))
```
#### File: LeetCode-Answers/Python/problem1005.py
```python
class Solution:
def largestSumAfterKNegations(self, A: List[int], K: int) -> int:
for _ in range(K):
# A.sort()
A = self.CountSort(A)
A[0] *= -1
return sum(A)
def CountSort(self, nums):
MIN = min(nums)
for k in range(len(nums)):
nums[k] -= MIN
tmp = [0] * (max(nums)+1)
for num in nums:
tmp[num] += 1
ans = []
for k in range(len(tmp)):
for _ in range(tmp[k]):
ans.append(k)
for k in range(len(ans)):
ans[k] += MIN
return ans
```
#### File: LeetCode-Answers/Python/problem1010.py
```python
class Solution:
def numPairsDivisibleBy60(self, time: list) -> int:
# # ans = 0
# # for p in range(len(time)):
# # for q in range(p+1, len(time)):
# # if (time[p] + time[q]) % 60 == 0:
# # ans += 1
# # return ans
# ans_1, ans_2 = 0, 0
# time = [t%60 for t in time]
# d = {}
# # for k in range(len(time)):
# # if time[k] == 0:
# # try:
# # d[0]
# # ans += 1
# # d[0] = k
# # except:
# # d[0] = k
# # else:
# # try:
# # d[60-time[k]]
# # ans += 1
# # except:
# # d[time[k]] = k
# # return ans
# for t in time:
# try:
# d[t] += 1
# except:
# d[t] = 1
# # print(d)
# for t in d.keys():
# if t == 0 or t == 30:
# num = 1
# for n in range(1, d[t]+1):
# num *= n
# ans_1 += num//2
# # print(t, num//2)
# else:
# try:
# ans_2 += d[t]*d[60-t]
# except:
# pass
# return ans_1 + ans_2//2
# from collections import defaultdict
# d = defaultdict(int)
# d = {}
# ans = 0
# time = [x%60 for x in time]
# for t in time:
# r = (60-t) % 60
# if r in d.keys():
# ans += d[r]
# else:
# d[t] += 1
# return ans
# 预处理:把数组中的元素全都模 60
time = [t % 60 for t in time]
from collections import defaultdict
d = defaultdict(int)
res = 0
for t in time:
# 1、先计数
# 针对 [0, 0, 0] 这一类特殊用例,要模 60
residue = (60 - t) % 60
if residue in d:
res += d[residue]
# 2、再记录频数
d[t] += 1
return res
solu = Solution()
# time = [30,20,150,100,40]
time = [60, 60, 60]
print(solu.numPairsDivisibleBy60(time))
# solu.numPairsDivisibleBy60(time)
```
#### File: LeetCode-Answers/Python/problem1013.py
```python
from typing import List
class Solution:
def canThreePartsEqualSum(self, A: List[int]) -> bool:
sum_ = sum(A)
tmp = sum_//3
if 3*tmp != sum_:
return False
sum_ = 0
mark = 0
for a in A:
sum_ += a
if sum_ == tmp:
sum_ = 0
mark += 1
return mark == 3
solu = Solution()
A = [0,2,1,-6,6,-7,9,1,2,0,1]
A = [0,2,1,-6,6,7,9,-1,2,0,1]
# A = [3,3,6,5,-2,2,5,1,-9,4]
print(solu.canThreePartsEqualSum(A))
```
#### File: LeetCode-Answers/Python/problem1014.py
```python
class Solution:
def maxScoreSightseeingPair(self, A: List[int]) -> int:
place_0_value = A[0] + 0
ans = 0
for k in range(1, len(A)):
ans = max(ans, place_0_value + A[k]-k)
place_0_value = max(place_0_value, A[k]+k)
return ans
```
#### File: LeetCode-Answers/Python/problem1029.py
```python
from typing import List
class Solution:
def twoCitySchedCost(self, costs: List[List[int]]) -> int:
# costs.sort(reverse = True, key = lambda x: abs(x[0]-x[1]))
# A, B = [], []
# d = {}
# for cost in costs:
# if cost[0] < cost[1]:
# A.append(cost)
# else:
# B.append(cost)
# if str(cost) in d:
# d[str(cost)] += 1
# else:
# d[str(cost)] = 1
# A.sort(reverse = True, key = lambda x: x[0])
# B.sort(reverse = True, key = lambda x: x[1])
# ans = 0
# for cost in costs:
# if cost[0] < cost[1]:
# ans += cost[0]
# d[str(cost)] -= 1
# mark = 0
# while B:
# tmp = B.pop()
# if d[str(tmp)] > 0:
# ans += tmp[1]
# d[str(tmp)] -= 1
# mark = 1
# break
# if mark == 0:
# while A:
# tmp = A.pop()
# if d[str(tmp)] > 0:
# ans += tmp[1]
# d[str(tmp)] -= 1
# mark = 1
# break
# if mark == 0:
# return ans
# else:
# ans += cost[1]
# d[str(cost)] -= 1
# mark = 0
# while A:
# tmp = A.pop()
# if d[str(tmp)] > 0:
# ans += tmp[0]
# d[str(tmp)] -= 1
# mark = 1
# break
# if mark == 0:
# while B:
# tmp = B.pop()
# if d[str(tmp)] > 0:
# ans += tmp[0]
# d[str(tmp)] -= 1
# mark = 1
# break
# if mark == 0:
# return ans
costs.sort(key = lambda x: x[0] - x[1]) #x[0] - x[1]可以理解为去A的有利程度
ans = 0
n = len(costs) // 2
# for k in range(0, n):
# ans += costs[k][0] + costs[k+n][1]
# return ans
for k in range(0, n):
ans += costs[k][0]
for k in range(n, 2*n):
ans += costs[k][1]
return ans
solu = Solution()
costs = [[10,20],[30,200],[400,50],[30,20]]
print(solu.twoCitySchedCost(costs))
```
#### File: LeetCode-Answers/Python/problem1037.py
```python
from typing import List
import copy
class Solution:
def isBoomerang(self, points: List[List[int]]) -> bool:
tmp_points = copy.deepcopy(points)
for k in range(len(tmp_points)):
tmp_points[k] = tuple(tmp_points[k])
if len(set(tmp_points)) != len(tmp_points):
return False
del tmp_points
p1, p2, p3 = points[0], points[1], points[2]
if p1[0] == p2[0]:
k1 = 'inf'
else:
k1 = str(float( (p2[1]-p1[1])/(p2[0]-p1[0]) ))
if p2[0] == p3[0]:
k2 = 'inf'
else:
k2 = str(float( (p3[1]-p2[1])/(p3[0]-p2[0]) ))
# if k1.isdigit(): print(1); k1 = float(k1)
# if k2.isdigit(): print(2); k1 = float(k2)
# if k1 != 'inf': print(1); k1 = float(k1)
# if k2 != 'inf': print(2); k2 = float(k2)
# print(k1, k2)
return k1 != k2
solu = Solution()
points = [[1,1],[2,3],[3,2]]
# points = [[1,1],[2,2],[3,3]]
points = [[1,0],[0,0],[2,0]]
print(solu.isBoomerang(points))
```
#### File: LeetCode-Answers/Python/problem1047.py
```python
class Solution:
def removeDuplicates(self, S: str) -> str:
'''
有点像消消乐
'''
stack = [S[0]]
p, q = 1, 0
while p < len(S):
# p += 1
if q < 0:
stack.append(S[p])
q += 1
else:
# print(p, q, stack)
if S[p] == stack[q]:
stack.pop()
q -= 1
else:
stack.append(S[p])
q += 1
p += 1
return ''.join(stack)
'''
accaca
'''
# solu = Solution()
# S = 'accaca'
# print(solu.removeDuplicates(S))
# class Solution:
# def removeDuplicates(self, S: str) -> str:
# output = []
# for ch in S:
# if output and ch == output[-1]:
# output.pop()
# else:
# output.append(ch)
# return ''.join(output)
# 作者:LeetCode
# 链接:https://leetcode-cn.com/problems/remove-all-adjacent-duplicates-in-string/solution/shan-chu-zi-fu-chuan-zhong-de-suo-you-xiang-lin-zh/
# 来源:力扣(LeetCode)
# 著作权归作者所有。商业转载请联系作者获得授权,非商业转载请注明出处。
```
#### File: LeetCode-Answers/Python/problem1051.py
```python
class Solution:
def heightChecker(self, heights: list) -> int:
temp = self.merge_sort(heights)
res = 0
for k in range(len(heights)):
if temp[k] != heights[k]:
res += 1
return res
def merge(self, left, right):
p, q =0, 0
ans = []
while p<len(left) and q<len(right):
if left[p]<=right[q]:
ans.append(left[p])
p += 1
else:
ans.append(right[q])
q += 1
ans += left[p:]
ans += right[q:]
return ans
def merge_sort(self, List):
if len(List)<=1:
return List
num = len(List) // 2
left = self.merge_sort(List[:num])
right = self.merge_sort(List[num:])
return self.merge(left, right)
solu = Solution()
heights = [1,1,4,2,1,3]
print(solu.heightChecker(heights))
```
#### File: LeetCode-Answers/Python/problem1078.py
```python
from typing import List
class Solution:
def findOcurrences(self, text: str, first: str, second: str) -> List[str]:
# d = {}
# d[first] = second
# text = text.split()
# ans = []
# for k in range(len(text)-2):
# if text[k] in d:
# if text[k+1] == d[text[k]]:
# ans.append(text[k+2])
# else:
# pass
# else:
# pass
# return ans
text = text.split()
tmp = zip(text, text[1:], text[2:])
ans = []
for e in tmp:
if e[0] == first and e[1] == second:
ans.append(e[2])
return ans
text = 'alice is a good girl she is a good student'
first = 'a'
second = 'good'
text = 'we will we will rock you'
first = 'we'
second = 'will'
solu = Solution()
print(solu.findOcurrences(text, first, second))
```
#### File: LeetCode-Answers/Python/problem1108.py
```python
class Solution:
def defangIPaddr(self, address: str) -> str:
# while '.' in address:
# address.replace('.','[.]')
# return adress
# return address.replace('.','[.]')
stack = []
for k in address:
if k != '.':
stack.append(k)
else:
stack.append('[')
stack.append(k)
stack.append(']')
return ''.join(stack)
solu = Solution()
address = "1.1.1.1"
#address = "255.100.50.0"
print(solu.defangIPaddr(address))
```
#### File: LeetCode-Answers/Python/problem1119.py
```python
class Solution:
def removeVowels(self, S: str) -> str:
s = set(['a','e','i','o','u'])
ans = []
for s0 in S:
if s0 not in s: #在集合中使用in是O(1)的时间复杂度
ans.append(s0)
# S = ans
return ''.join(ans)
```
#### File: LeetCode-Answers/Python/problem1128.py
```python
from typing import List
class Solution:
def numEquivDominoPairs(self, dominoes: List[List[int]]) -> int:
# d = {}
# t = {}
# for dmn in dominoes:
# if (tuple(dmn), tuple(reversed(dmn))) in d:
# t[(tuple(dmn), tuple(reversed(dmn)))] += d[(tuple(dmn), tuple(reversed(dmn)))]
# d[(tuple(dmn), tuple(reversed(dmn)))] += 1
# elif (tuple(reversed(dmn)), tuple(dmn)) in d:
# t[(tuple(reversed(dmn)), tuple(dmn))] += d[(tuple(reversed(dmn)), tuple(dmn))]
# d[(tuple(reversed(dmn)), tuple(dmn))] += 1
# else:
# d[(tuple(dmn), tuple(reversed(dmn)))] = 1
# t[(tuple(dmn), tuple(reversed(dmn)))] = 0
# # print(d, t)
# # print(t)
# return sum(t.values())
d = {}
# t = {}
for dmn in dominoes:
if (tuple(dmn), tuple(reversed(dmn))) in d:
# t[(tuple(dmn), tuple(reversed(dmn)))] += d[(tuple(dmn), tuple(reversed(dmn)))]
d[(tuple(dmn), tuple(reversed(dmn)))] += 1
elif (tuple(reversed(dmn)), tuple(dmn)) in d:
# t[(tuple(reversed(dmn)), tuple(dmn))] += d[(tuple(reversed(dmn)), tuple(dmn))]
d[(tuple(reversed(dmn)), tuple(dmn))] += 1
else:
d[(tuple(dmn), tuple(reversed(dmn)))] = 1
# t[(tuple(dmn), tuple(reversed(dmn)))] = 0
# print(d, t)
# print(t)
return sum([sum(range(v)) for v in d.values()])
solu = Solution()
dominoes = [[1,2],[2,1],[3,4],[5,6]]
dominoes = [[1,2],[2,1],[1,2],[2,1],[3,4],[5,6]]
print(solu.numEquivDominoPairs(dominoes))
from scipy.special import comb, perm
mark = 1
for k in range(2, 100):
if comb(k, 2) != sum(range(0, k)):
mark = 0
break
print(mark == 1)
```
#### File: LeetCode-Answers/Python/problem1134.py
```python
class Solution:
def isArmstrong(self, N: int) -> bool:
nums = [int(x) for x in str(N)]
k = len(nums)
Sum = sum([x**k for x in nums])
return Sum == N
```
#### File: LeetCode-Answers/Python/problem1150.py
```python
from typing import List
from collections import Counter
# class Solution:
# def isMajorityElement(self, nums: List[int], target: int) -> bool:
# d = Counter(nums)
# return d[target] > len(nums)//2
# class Solution:
# def isMajorityElement(self, nums: List[int], target: int) -> bool:
# ans = 0
# for num in nums:
# if num == target:
# ans += 1
# return ans > len(target)//2
class Solution:
def isMajorityElement(self, nums: List[int], target: int) -> bool:
if not nums:
return False
if len(nums) == 1:
return nums[0] == target
p, q = 0, len(nums)-1
while p < q:
if nums[p] > target:
return False
elif nums[p] < target:
p += 1
if nums[q] < target:
return False
elif nums[q] > target:
q -= 1
if nums[p] == nums[q] == target:
return q - p + 1 > len(nums)//2
```
#### File: LeetCode-Answers/Python/problem1184.py
```python
from typing import List
class Solution:
def distanceBetweenBusStops(self, distance: List[int], start: int, destination: int) -> int:
start, destination = min(start, destination), max(start, destination)
# print(start, destination)
tmp = destination - len(distance)
# print(tmp)
ans1 = sum(distance[start:destination])
# print(distance[start:destination])
# ans2 = sum(distance[start-1:tmp-1:-1])
ans2 = sum(distance[0:start]) + sum(distance[destination:])
# print(distance[start-1:tmp-1:-1])
return min(ans1, ans2)
# solu = Solution()
# distance, start, destination = [1,2,3,4], 0, 1
# distance, start, destination = [1,2,3,4], 0, 2
# distance, start, destination = [1,2,3,4], 0, 3
# distance, start, destination = [7,10,1,12,11,14,5,0], 7, 2
# print(solu.distanceBetweenBusStops(distance, start, destination))
```
#### File: LeetCode-Answers/Python/problem1196.py
```python
from typing import List
class Solution:
def maxNumberOfApples(self, arr: List[int]) -> int:
arr.sort()
ans = 0
weight = 0
for a in arr:
weight += a
if weight <= 5000:
ans += 1
else:
break
return ans
```
#### File: LeetCode-Answers/Python/problem1249.py
```python
class Solution:
def minRemoveToMakeValid(self, s: str) -> str:
mark = 0
num_left = 0
stack = []
for letter in s:
if letter == '(':
mark += 1
num_left += 1
elif letter == ')':
mark -= 1
if mark < 0:
mark += 1
else:
stack.append(letter) # 到这一步,stack中没有不合法的')',不合法的右括号是指没有'('搭配的')'
if mark == 0:
return ''.join(stack)
else:
s = []
tmp = num_left - mark
count = 0
for letter in stack:
if letter == '(':
if count < tmp:
s.append(letter)
count += 1
else:
s.append(letter)
return ''.join(s)
s = "lee(t(c)o)de)"
# s = "a)b(c)d"
# s = "(a(b(c)d)"
# s = "())()((("
solu = Solution()
print(solu.minRemoveToMakeValid(s))
```
#### File: LeetCode-Answers/Python/problem1252.py
```python
from typing import List
class Solution:
def oddCells(self, n: int, m: int, indices: List[List[int]]) -> int:
mat = [[0]*m for _ in range(n)]
for ind in indices:
n, m = ind[0], ind[1]
mat[n][m] += 1
ans = 0
for p in range(len(mat)):
for q in range(len(mat[0])):
if mat[p][q] % 2 != 0:
ans += 1
return ans
n = 2; m = 3; indices = [[0,1],[1,1]]
solu = Solution()
print(solu.oddCells(n, m, indices))
```
#### File: LeetCode-Answers/Python/problem1260.py
```python
from typing import List
class Solution:
def shiftGrid(self, grid: List[List[int]], k: int) -> List[List[int]]:
m, n = len(grid), len(grid[0])
k %= m*n
ans = []
for g in grid:
for e in g:
ans.append(e)
tmp = []
for p in range(len(ans)):
tmp.append(ans[p-k])
ans = []
for p in range(0, len(tmp), n):
ans.append(list(tmp[p:p+n]))
return ans
```
#### File: LeetCode-Answers/Python/problem1299.py
```python
from typing import List
class Solution:
def replaceElements(self, arr: List[int]) -> List[int]:
max_ = arr[-1]
for k in range(len(arr)-2, -1, -1):
tmp = max_
max_ = max(max_, arr[k])
arr[k] = tmp
arr[-1] = -1
return arr
solu = Solution()
arr = [17,18,5,4,6,1]
print(solu.replaceElements(arr))
```
#### File: LeetCode-Answers/Python/problem1346.py
```python
class Solution:
def checkIfExist(self, arr: List[int]) -> bool:
# d = {}
# arr.sort(key=lambda x: abs(x), reverse=True)
# for i, a in enumerate(arr):
# if 2 * a in d:
# return True
# else:
# d[a] = i
# return False
d = set()
arr.sort(key = lambda x: abs(x), reverse=True)
for a in arr:
if 2 * a in d:
return True
else:
d.add(a)
return False
```
#### File: LeetCode-Answers/Python/problem1365_test02.py
```python
# smallerThanIt = {}
# for t in tmp:
# smallerThanIt[t] = lastIdx[t] - (count[t] - 1)
# ans = []
# for num in nums:
# ans.append(smallerThanIt[num])
# return ans
class Solution:
def smallerNumbersThanCurrent(self, nums: List[int]) -> List[int]:
tmp = [0] * (max(nums) + 1)
for num in nums:
tmp[num] += 1
ans = []
for num in nums:
ans.append(sum(tmp[:num]))
return ans
```
#### File: LeetCode-Answers/Python/problem1385.py
```python
class Solution:
def findTheDistanceValue(self, arr1: List[int], arr2: List[int], d: int) -> int:
# count = 0
# for a1 in arr1:
# tmp = 0
# for a2 in arr2:
# if abs(a1 - a2) > d:
# tmp += 1
# if tmp == len(arr2):
# count += 1
# return count
arr2.sort()
count = 0
for a1 in arr1:
idx = self.fun(a1, arr2)
# print(idx)
if 0 < idx < len(arr2) and arr2[idx] == a1:
if 0 > d:
count += 1
elif idx == 0:
if arr2[idx] - a1 > d:
count += 1
elif idx == len(arr2):
if a1 - arr2[idx - 1] > d:
count += 1
elif 0 < idx < len(arr2):
if a1 - arr2[idx - 1] > d and arr2[idx] - a1 > d:
count += 1
return count
def fun(self, a, nums):
'''
a在升序数组nums中应该排第几
'''
i, j = 0, len(nums) - 1
while i < j:
m = (i + j) // 2
if nums[m] < a:
i = m + 1
elif nums[m] > a:
j = m - 1
else:
return m
idx = min(i, j)
if idx < 0:
return 0
if idx > len(nums) - 1:
return (len(nums) - 1) + 1
if nums[idx] < a:
return idx + 1
else:
return (idx - 1) + 1
```
#### File: LeetCode-Answers/Python/problem1672.py
```python
class Solution:
def maximumWealth(self, accounts: List[List[int]]) -> int:
# import sys
# max_ = -sys.maxsize
# for account in accounts:
# if sum(account) > max_:
# max_ = sum(account)
# return max_
# accounts.sort(key=lambda x: sum(x), reverse=True)
# return sum(accounts[0])
return sum(sorted(accounts, key=lambda x: sum(x), reverse=True)[0])
```
#### File: LeetCode-Answers/Python/problemLCP19.py
```python
class Solution:
def minimumOperations(self, leaves: str) -> int:
# from collections import Counter
# d = Counter(leaves)
def isYellow(leaf):
if leaf == 'y':
return 1
else:
return 0
def isRed(leaf):
if leaf == 'r':
return 1
else:
return 0
tmp = [[0, 0, 0] for _ in range(len(leaves))] #行表示秋叶索引,列表示状态序号
# 第0状态
tmp[0][0] = isYellow(leaves[0])
for i, leaf in enumerate(leaves):
if i < 1:
continue
# print(i, leaf)
tmp[i][0] = tmp[i - 1][0] + isYellow(leaf)
# print(tmp)
# 第1状态
# tmp[1][1] = min(tmp[0][0], tmp[0][1]) + isRed(leaves[1])
tmp[1][1] = tmp[0][0] + isRed(leaves[1])
for i, leaf in enumerate(leaves):
if i < 2:
continue
tmp[i][1] = min(tmp[i - 1][0], tmp[i - 1][1]) + isRed(leaf)
# 第2状态
# tmp[2][2] = min(tmp[1][1], tmp[1][2]) + isYellow(leaves[2])
tmp[2][2] = tmp[1][1] + isYellow(leaves[2])
for i, leaf in enumerate(leaves):
if i < 3:
continue
tmp[i][2] = min(tmp[i - 1][1], tmp[i - 1][2]) + isYellow(leaf)
# print(tmp)
return tmp[len(leaves) - 1][2]
```
#### File: LeetCode-Answers/Python/problemLCP1.py
```python
class Solution:
def game(self, guess: List[int], answer: List[int]) -> int:
ans = 0
for k in range(len(guess)):
if guess[k] == answer[k]:
ans += 1
return ans
```
#### File: LeetCode-Answers/Python/problem面试题05.py
```python
class Solution:
def replaceSpace(self, s: str) -> str:
# return s.replace(' ', '%20')
ans = []
for letter in s:
if letter == ' ':
ans.append('%20')
else:
ans.append(letter)
return ''.join(ans)
solu = Solution()
s = "We are happy."
s = ' '
ans = solu.replaceSpace(s)
print(ans)
```
#### File: LeetCode-Answers/Python/剑指offer53-I.py
```python
class Solution:
def search(self, nums: List[int], target: int) -> int:
# if len(nums) == 1:
# return 1 if nums[0] == target else 0
tmp = []
i, j = 0, len(nums) - 1
while i <= j:
m = (i + j) // 2
if nums[m] == target:
tmp.append(m)
if i < len(nums) and nums[i] == target:
tmp.append(i)
i += 1
elif nums[m] > target:
j = m - 1
if j >= 0 and nums[j] == target:
tmp.append(j)
else:
i = m + 1
if i < len(nums) and nums[i] == target:
tmp.append(i)
return len(set(tmp))
```
#### File: LeetCode-Answers/Python/堆排序_递归.py
```python
class Solution:
def sortArray(self, nums: list) -> list:
# return self.heap_sort(nums)
self.heap_sort(nums)
return nums
# def adjust_heap(self, nums, size, k):
# lchild = 2*k+1
# rchild = 2*k+2
# Max = k
# if k < size // 2: # while lchild < size:
# if lchild < size and nums[lchild] > nums[Max]:
# Max = lchild
# if rchild < size and nums[rchild] > nums[Max]:
# Max = rchild
# if Max != k:
# nums[Max], nums[k] = nums[k], nums[Max]
# self.adjust_heap(nums, size, Max)
## else:
## break
## k = Max
## lchild = 2*k+1
## rchild = 2*k+2
#
# def build_heap(self, nums, size):
# for k in range(size//2)[::-1]:
# self.adjust_heap(nums, size, k)
#
# def heap_sort(self, nums):
# size = len(nums)
# self.build_heap(nums, size)
# for k in range(size)[::-1]:
# nums[0], nums[k] = nums[k], nums[0]
# self.adjust_heap(nums, k, 0)
# return nums
def adjust_heap(self, nums, size, k):
lchild = 2*k + 1
rchild = 2*k + 2
Max = k
if k < size//2:
while lchild < size and nums[lchild] > nums[Max]:
Max = lchild
while rchild < size and nums[rchild] > nums[Max]:
Max = rchild
if Max != k:
nums[k], nums[Max] = nums[Max], nums[k]
self.adjust_heap(nums, size, Max)
def build_heap(self, nums, size):
for k in range(size//2)[::-1]:
self.adjust_heap(nums, size, k)
def heap_sort(self, nums):
size = len(nums)
self.build_heap(nums, size)
for k in range(size)[::-1]:
nums[0], nums[k] = nums[k], nums[0]
self.adjust_heap(nums, k, 0)
# int[] a = new int[]{1,23,234,234,22,1,-1,0,3};
# for (int i = a.length/2 -1; i >= 0; i--) { // 构建堆,因为a.length/2后面的数据都不会有左右节点了
# buildHeap(a, i, a.length);
# }
# System.out.println("开始排序前,构建的大根堆:");
# for (int i : a) {
# System.out.print(i + " ");
# }
#
# System.out.println();
#
# // 开始不断的交换,移除,构建堆
# for (int i = a.length - 1; i >=0; i--) {
# swap(a, 0, i); // 每次都把堆中第一个数和最后一个数交换
# buildHeap(a, 0, i); // 第二个参数是i,就相当于是把i(最后一个数)移除堆,剩下的重新构建堆
# System.out.println("第" + (a.length - i) + "次排序后的数组:");
# for (int j : a) {
# System.out.print(j + " ");
# }
# System.out.println();
# }
#
#
# public class HeapSort {
# public static void buildHeap(int[] a, int i, int n) {
# int leftChild = i * 2 + 1;
# int rightChild = i * 2 + 2;
# int largest = i;
# /**
# * 循环找出父节点和左右节点中最大的值,
# * 并将最大值节点交换为父节点
# */
# while (leftChild < n) {
# if (a[leftChild] > a[i]) {
# largest = leftChild;
# }
# if (rightChild < n && a[rightChild] > a[largest]) {
# largest = rightChild;
# }
# if (largest != i) {
# swap(a, largest, i);
# } else {
# break;
# }
#
# i = largest;
# leftChild = i * 2 + 1;
# rightChild = i * 2 + 2;
# }
#
# }
#
# public static void swap(int[] a, int i, int j) {
# int tmp = a[i];
# a[i] = a[j];
# a[j] = tmp;
# }
#
# public static void main(String[] args) {
# int[] a = new int[]{1,23,234,234,22,1,-1,0,3};
# for (int i = a.length/2 -1; i >= 0; i--) { // 构建堆,因为a.length/2后面的数据都不会有左右节点了
# buildHeap(a, i, a.length);
# }
# System.out.println("开始排序前,构建的大根堆:");
# for (int i : a) {
# System.out.print(i + " ");
# }
#
# System.out.println();
#
# // 开始不断的交换,移除,构建堆
# for (int i = a.length - 1; i >=0; i--) {
# swap(a, 0, i); // 每次都把堆中第一个数和最后一个数交换
# buildHeap(a, 0, i); // 第二个参数是i,就相当于是把i(最后一个数)移除堆,剩下的重新构建堆
# System.out.println("第" + (a.length - i) + "次排序后的数组:");
# for (int j : a) {
# System.out.print(j + " ");
# }
# System.out.println();
# }
#
# }
# }
solu = Solution()
nums = [3,7,6,4,1,9]
print(solu.sortArray(nums))
```
#### File: LeetCode-Answers/Python/最长上升子序列.py
```python
class Solution:
def lengthOfLIS(self, nums: list) -> int:
'''
从开始到最后,
遍历每一个字母,
以每一个字母为子序列末尾字母,
动态规划,
状态
和
状态方程
'''
# if not nums:
# return 0
# dp = [1] * len(nums)
# for p in range(1, len(nums)):
# temp = [0]
# for q in range(p):
## print(p,q)
## print(dp[p],dp[q])
# if nums[p] > nums[q]:
## print(-1)
# temp.append(dp[q])
## print(temp)
# dp[p] = max(temp) + 1
## print(dp)
# return max(dp)
if not nums:
return 0
dp = [1] * len(nums)
for p in range(1, len(nums)):
for q in range(p):
if nums[p] > nums[q]:
dp[p] = max(dp[p], dp[q]+1)
return max(dp)
solu = Solution()
nums = [10,9,2,5,3,7,101,18]
nums = []
nums = [1,3,6,7,9,4,10,5,6]
print(solu.lengthOfLIS(nums))
``` |
{
"source": "105071076/pw-projects",
"score": 4
} |
#### File: stancode projects/breakout game/breakoutgraphics.py
```python
from campy.graphics.gwindow import GWindow
from campy.graphics.gobjects import GOval, GRect, GLabel
from campy.gui.events.mouse import onmouseclicked, onmousemoved
import random
BRICK_SPACING = 5 # Space between bricks (in pixels). This space is used for horizontal and vertical spacing.
BRICK_WIDTH = 40 # Height of a brick (in pixels).
BRICK_HEIGHT = 15 # Height of a brick (in pixels).
BRICK_ROWS = 10 # Number of rows of bricks.
BRICK_COLS = 10 # Number of columns of bricks.
BRICK_OFFSET = 50 # Vertical offset of the topmost brick from the window top (in pixels).
BALL_RADIUS = 10 # Radius of the ball (in pixels).
PADDLE_WIDTH = 75 # Width of the paddle (in pixels). 75
PADDLE_HEIGHT = 15 # Height of the paddle (in pixels). 15
PADDLE_OFFSET = 100 # Vertical offset of the paddle from the window bottom (in pixels).
INITIAL_Y_SPEED = 7 # Initial vertical speed for the ball.
MAX_X_SPEED = 5 # Maximum initial horizontal speed for the ball.
class BreakoutGraphics:
def __init__(self, ball_radius=BALL_RADIUS, paddle_width=PADDLE_WIDTH,
paddle_height=PADDLE_HEIGHT, paddle_offset=PADDLE_OFFSET,
brick_rows=BRICK_ROWS, brick_cols=BRICK_COLS,
brick_width=BRICK_WIDTH, brick_height=BRICK_HEIGHT,
brick_offset=BRICK_OFFSET, brick_spacing=BRICK_SPACING,
title='Breakout'):
self.brick_rows = brick_rows
self.brick_cols = brick_cols
# Create a graphical window, with some extra space
self.window_width = brick_cols * (brick_width + brick_spacing) - brick_spacing
self.window_height = brick_offset + 3 * (brick_rows * (brick_height + brick_spacing) - brick_spacing)
self.window = GWindow(width=self.window_width, height=self.window_height, title=title)
self.ball_radius = BALL_RADIUS
self.ball = GOval(ball_radius * 2, ball_radius * 2)
self.ball.filled = 'True'
# Center a filled ball in the graphical window
self.window.add(self.ball, x=(self.window.width - self.ball.width) / 2,
y=(self.window.height - self.ball.height) / 2)
# Create a paddle
self.paddle = GRect(width=paddle_width, height=paddle_height)
self.paddle.filled = True
self.paddle_offset = PADDLE_OFFSET
self.paddle.fill_color = 'black'
self.window.add(self.paddle, x=(self.window.width - self.paddle.width) / 2,
y=self.window.height - paddle_offset)
# Draw bricks
for i in range(brick_rows):
for j in range(brick_cols):
self.brick = GRect(width=brick_width, height=brick_height)
self.brick.filled = 'True'
self.brick.color = 'black'
self.window.add(self.brick, x=(brick_width * j + brick_spacing * j),
y=brick_offset + brick_height * i + brick_spacing * i)
if (i+j)% 2 == 0:
self.brick.fill_color = 'ivory'
else:
self.brick.fill_color = 'black'
# Create label when user hits 10 balls
self.half = GLabel("WOW! TEN BRICKS ALREADY! KEEP GOING!")
self.half.font = '-15'
# Score cnt
self.score_cnt = 0
self.score = GLabel('SCORE:' + str(self.score_cnt))
self.score.font = 'Helvetica-30-bold'
self.score.color = 'silver'
self.vx = 0
self.vy = 0
# Life
self.life = GLabel('LIVES:')
self.life.font = 'Helvetica-30'
self.life.color = 'plum'
self.life1 = GOval(20, 20)
self.life1.filled = True
self.life1.fill_color = 'plum'
self.life1.color = 'plum'
self.life2 = GOval(20, 20)
self.life2.filled = True
self.life2.fill_color = 'plum'
self.life2.color = 'plum'
self.life3 = GOval(20, 20)
self.life3.filled = True
self.life3.fill_color = 'plum'
self.life3.color = 'plum'
# Default initial velocity for the ball
self.__dx = 0
self.__dy = 0
self.lives = 3
onmouseclicked(self.start_ball)
onmousemoved(self.track_paddle)
def start_ball(self, event):
if (self.__dx == 0) and (self.__dy == 0) and (not self.lives == 0):
self.window.add(self.ball, x=(self.window.width - self.ball.width) / 2,
y=(self.window.height - self.ball.height) / 2)
self.ball.fill_color = 'black'
self.__dx = random.randint(1, MAX_X_SPEED)
self.__dy = INITIAL_Y_SPEED
if random.random() > 0.5:
self.__dx = -self.__dx
if random.random() > 0.5:
self.__dy = -self.__dy
return True
def track_paddle(self, e):
self.paddle.x = e.x - self.paddle.width / 2
# make sure the paddle will always be in the window
if e.x <= 0:
self.paddle.x = 0
elif e.x > self.window.width - self.paddle.width / 2:
self.paddle.x = self.window.width - self.paddle.width
def check_wall(self):
if self.ball.x <= 0 or self.ball.x + self.ball.width > self.window.width:
self.__dx = -self.__dx
if self.ball.y <= 0:
self.__dy = -self.__dy
def check_pts(self):
p1 = self.window.get_object_at(self.ball.x, self.ball.y)
p2 = self.window.get_object_at(self.ball.x, self.ball.y + self.ball_radius * 2)
p3 = self.window.get_object_at(self.ball.x + self.ball_radius * 2, self.ball.y)
p4 = self.window.get_object_at(self.ball.x + self.ball_radius * 2,
self.ball.y + self.ball_radius * 2)
if p2 is not None:
# determine which item the ball hits
if self.ball.y > self.window.height/2:
if self.__dy > 0:
self.__dy = -self.__dy
self.ball.fill_color = 'red'
else:
if p2 != self.half and p2 != self.score \
and p2 != self.life and p2!=self.life1 and p2!= self.life2 and p2!= self.life3 :
self.window.remove(p2)
self.score_cnt += 1
self.score.text = ("SCORE: " + str(self.score_cnt))
self.__dy = -self.__dy
self.ball.fill_color = 'gold'
elif p4 is not None:
if self.ball.y > self.window.height/2:
if self.__dy > 0:
self.__dy = -self.__dy
self.ball.fill_color = 'red'
else:
if p4 != self.half and p4 != self.score \
and p4 != self.life and p4!=self.life1 and p4 != self.life2 and p4 != self.life3:
self.window.remove(p4)
self.score_cnt += 1
self.score.text = ("SCORE: " + str(self.score_cnt))
self.__dy = -self.__dy
self.ball.fill_color = 'gold'
elif p1 is not None:
if self.ball.y > self.window.height/2:
if self.__dy > 0:
self.__dy = -self.__dy
self.ball.fill_color = 'red'
else:
if p1 != self.half and p1 != self.score \
and p1 != self.life and p1 != self.life1 and p1 != self.life2 and p1 != self.life3:
self.window.remove(p1)
self.score_cnt += 1
self.score.text = ("SCORE: " + str(self.score_cnt))
self.__dy = -self.__dy
self.ball.fill_color = 'gold'
elif p3 is not None:
if self.ball.y > self.window.height/2:
if self.__dy > 0:
self.__dy = -self.__dy
self.ball.fill_color = 'red'
else:
if p3 != self.half and p3 != self.score \
and p3 != self.life and p3 !=self.life1 and p3 != self.life2 and p3 != self.life3:
self.window.remove(p3)
self.score_cnt += 1
self.score.text = ("SCORE: " + str(self.score_cnt))
self.__dy = -self.__dy
self.ball.fill_color = 'gold'
def score_run(self):
if self.vx == 0 and self.vy == 0:
self.vx = 5
self.vy = 3
elif (self.score.x <= 0) or (self.score.x + self.score.width >= self.window.width):
self.vx = -self.vx
self.score.color = 'salmon'
elif (self.score.y - self.score.height <= 0) or (self.score.y >= self.window.height)and self.vy > 0:
self.vy = -self.vy
self.score.color = 'powderblue'
self.score.move(self.vx, self.vy)
def cheer(self):
# Cheering words
obj = self.window.get_object_at(x=(self.window.width - self.half.width) / 2,
y=(self.window.height - self.half.height) / 2)
if obj is None and self.score_cnt == 10:
self.window.add(self.half, x=(self.window.width - self.half.width) / 2,
y=(self.window.height - self.half.height) / 2)
if self.score_cnt == 15:
self.window.remove(self.half)
def get_dx(self):
return self.__dx
def get_dy(self):
return self.__dy
def set_dx(self, new_speed):
self.__dx = -self.__dx
def set_dy(self, new_speed):
self.__dy = -self.__dy
def set2_dx(self, new_speed2):
self.__dx = 0
def set2_dy(self, new_speed2):
self.__dy = 0
```
#### File: stancode projects/hangman/hangman.py
```python
import random
# This constant controls the number of guess the player has.
N_TURNS = 7
def main():
"""
TODO: Play hangman and count the lives.
"""
ans = random_word()
intro(ans)
live_cnt = N_TURNS
temp_ans = ''
for i in range(len(ans)):
temp_ans += '-'
while True:
if live_cnt == 0:
break
input_ch = input('Your guess: ')
input_ch = input_ch.upper() # case-insensitive
if not input_ch.isalpha():
print("illegal format.")
else:
if len(input_ch) != 1:
print("illegal format.")
else:
temp_ans = check(ans,temp_ans,input_ch)
if temp_ans == ans:
print("You are correct!\nYou win!!")
print("The word was: "+ans)
break
print("The word looks like " + temp_ans)
for i in range(7):
if ans.find(input_ch) == -1:
live_cnt = live_cnt - 1 #
if live_cnt != 0:
print("There is no " + input_ch + "'s in the word.")
print("You have " + str(live_cnt) + " guesses left.")
else:
print("You are completely hung :(")
print("The word was: "+ans)
break
else:
print("You have "+str(live_cnt)+" guesses left.")
break
def intro(ans):
"""
The introduction of the game.
:param ans: The word to guess. It shows the number of the letters.
"""
word = ''
for ch in ans:
word += '-'
print('The word looks like: '+word)
print('You have '+str(N_TURNS)+' guesses left.')
def random_word():
num = random.choice(range(9))
if num == 0:
return "NOTORIOUS"
elif num == 1:
return "GLAMOROUS"
elif num == 2:
return "CAUTIOUS"
elif num == 3:
return "DEMOCRACY"
elif num == 4:
return "BOYCOTT"
elif num == 5:
return "ENTHUSIASTIC"
elif num == 6:
return "HOSPITALITY"
elif num == 7:
return "BUNDLE"
elif num == 8:
return "REFUND"
def check(ans, temp_ans, input_ch):
"""
Check if the user guessed the right letter.
:param ans: The correct word string.
:param temp_ans:Every temporarily answer when the user guess a letter.
:param input_ch: The character the user input.
:return: return to the temporarily answer when the user do a new guess.
"""
for i in range(len(ans)):
if input_ch in ans[i]:
temp_ans = temp_ans[:i] + ans[i] + temp_ans[i+1:]
return temp_ans
##### DO NOT EDIT THE CODE BELOW THIS LINE #####
if __name__ == '__main__':
main()
``` |
{
"source": "1052847690/3dpwn",
"score": 2
} |
#### File: 3dpwn/lib/chromium.py
```python
from opcodes import *
from struct import pack, unpack
from hgcm import *
SHCRGL_GUEST_FN_WRITE = 2
SHCRGL_GUEST_FN_READ = 3
SHCRGL_GUEST_FN_WRITE_READ = 4
SHCRGL_GUEST_FN_SET_VERSION = 6
SHCRGL_GUEST_FN_INJECT = 9
SHCRGL_GUEST_FN_SET_PID = 12
SHCRGL_GUEST_FN_WRITE_BUFFER = 13
SHCRGL_GUEST_FN_WRITE_READ_BUFFERED = 14
SHCRGL_GUEST_FN_GET_CAPS_LEGACY = 15
SHCRGL_GUEST_FN_GET_CAPS_NEW = 16
CR_MESSAGE_OPCODES = 0x77474c01
CR_MESSAGE_WRITEBACK = 0x77474c02
CR_MESSAGE_ERROR = 0x77474c0b
CR_MESSAGE_REDIR_PTR = 0x77474c0d
OFFSET_CONN_CLIENT = 0x248 # p &((CRConnection*)0)->pClient
OFFSET_CONN_HOSTBUF = 0x238 # p &((CRConnection*)0)->pHostBuffer
OFFSET_CONN_HOSTBUFSZ = 0x244 # p &((CRConnection*)0)->cbHostBuffer
OFFSET_CONN_FREE = 0xd8 # p &((CRConnection*)0)->Free
def set_version(client):
hgcm_call(client, SHCRGL_GUEST_FN_SET_VERSION, [9, 1])
def alloc_buf(client, sz, msg='a'):
buf,_,_,_ = hgcm_call(client, SHCRGL_GUEST_FN_WRITE_BUFFER, [0, sz, 0, msg])
return buf
def crmsg(client, msg, bufsz=0x1000):
''' Allocate a buffer, write a Chromium message to it, and dispatch it. '''
assert len(msg) <= bufsz
buf = alloc_buf(client, bufsz, msg)
# buf,_,_,_ = hgcm_call(client, SHCRGL_GUEST_FN_WRITE_BUFFER, [0, bufsz, 0, msg])
_, res, _ = hgcm_call(client, SHCRGL_GUEST_FN_WRITE_READ_BUFFERED, [buf, "A"*bufsz, 1337])
return res
def create_context(client):
'''
Initialize OpenGL state enough that we can use Chromium properly.
The call to GLXMakeCurrent is important for some of the PoCs to work.
'''
msg = (
pack("<III", 0x77474c01, 0x41414141, 1)
+ '\0\0\0' + chr(CR_EXTEND_OPCODE)
+ 'aaaa'
+ pack("<I", CR_CREATECONTEXT_EXTEND_OPCODE)
+ ':0'.ljust(256,'\0')
+ pack("<II", 0x25, 0)
)
res = crmsg(client, msg)
ctx, = unpack("<I", res[24:28])
msg = (
pack("<III", 0x77474c01, 0x41414141, 1)
+ '\0\0\0' + chr(CR_EXTEND_OPCODE)
+ 'aaaa'
+ pack("<I", CR_WINDOWCREATE_EXTEND_OPCODE)
+ ':0'.ljust(256,'\0')
+ pack("<I", 0x25)
)
res = crmsg(client, msg)
win, = unpack("<I", res[24:28])
msg = (
pack("<III", 0x77474c01, 0x41414141, 1)
+ '\0\0\0' + chr(CR_EXTEND_OPCODE)
+ 'aaaa'
+ pack("<I", CR_MAKECURRENT_EXTEND_OPCODE)
+ pack("<III", win, 0x400002, ctx)
)
crmsg(client, msg)
``` |
{
"source": "1053254202/BDtranslate",
"score": 2
} |
#### File: 1053254202/BDtranslate/translate.py
```python
import threading
import time
import tkinter
import hashlib
import random
from tkinter.filedialog import *
import pyperclip
import requests
import json
import shutil
path = ''
def openFolder():
os.system('explorer.exe /n,%s' % path)
def changeFolder(pat):
pt.delete(0, tkinter.END)
filepath = askdirectory(title="修改存放日志文件夹", initialdir=pat)
global path
path = filepath.replace('/', '\\')
pt.insert(10, path)
if (os.path.exists(pat + "\\translate.log")):
if (os.path.exists(filepath + "\\translate.log")):
os.remove(filepath + "\\translate.log")
shutil.move(pat + "\\translate.log", filepath + "\\translate.log")
def openFile():
os.system('cmd /c %s' % (path + "\\translate.log"))
def baiduInterface(q):
appid = "20190808000325167"
orilan = "auto"
to = ""
if (65 <= ord(q[0].upper()) <= 90):
to = "zh"
else:
to = "en"
salt = str(random.randint(1235467890, 9087654321))
secretKey = "<KEY>"
com = appid + q + salt + secretKey
sign = hashlib.md5(com.encode(encoding='UTF-8')).hexdigest()
url = "http://api.fanyi.baidu.com/api/trans/vip/translate?q=" + q + "&from=" + orilan + "&to=" + to + "&appid=" + appid + "&salt=" + salt + "&sign=" + sign
html = requests.get(url)
try:
src = json.loads(html.text)["trans_result"][0]["src"]
dst = json.loads(html.text)["trans_result"][0]["dst"]
result.delete(0, tkinter.END)
result.insert(10, dst)
if (path != ''):
with open(path + "/translate.log", "at+") as file:
file.write(src + "\t\t\t" + dst + '\n')
except Exception as msg:
result.delete(0, tkinter.END)
if ("error_code" in json.loads(html.text)):
if (path != ""):
with open(path + "/translate.log", "at+") as file:
file.write(json.loads(html.text)["error_code"] + "\t\t\t" + json.loads(html.text)["error_msg"])
if json.loads(html.text)["error_code"] == "54003":
result.insert(10, "别那么快!!")
else:
result.insert(10, "原文不能包含+号")
def fanyi(*event):
q = txt.get()
if (q == ""):
result.insert(10, "请输入。。。")
else:
baiduInterface(q)
def run():
curValue = ""
lastValue = ""
while True:
curValue = pyperclip.paste()
try:
if curValue != lastValue:
lastValue = curValue
q = curValue
baiduInterface(q)
time.sleep(0.1)
except KeyboardInterrupt:
break
window = tkinter.Tk()
window.title("翻译")
window.iconbitmap('C:\\Users\\rookie\\PycharmProjects\\python\\Reptile\\翻译\\favicon.ico')
width = 200
height = 120
screenHeight = window.winfo_screenheight()
screenWidth = window.winfo_screenwidth()
align = '%dx%d+%d+%d' % (width, height, screenWidth - 1.1 * width, screenHeight - 2.1 * height)
window.geometry(align)
window.resizable(width=False, height=False)
txt = tkinter.Entry(window)
txt.grid(row=0, column=0, sticky=tkinter.W + tkinter.E + tkinter.N + tkinter.S, padx=5, pady=5)
txt.bind("<Return>", fanyi)
result = tkinter.Entry(window)
result.grid(row=1, column=0, sticky=tkinter.W + tkinter.E + tkinter.N + tkinter.S, padx=5, pady=5)
var = tkinter.StringVar()
var.set(path)
pt = tkinter.Entry(window, textvariable=var)
pt.grid(row=2, column=0, sticky=tkinter.W + tkinter.E + tkinter.N + tkinter.S, padx=5, pady=5)
btn = tkinter.Button(window, text="翻译", command=fanyi)
btn.grid(row=0, column=1, sticky=tkinter.W + tkinter.E + tkinter.N + tkinter.S, padx=5, pady=5)
folder = tkinter.Button(window, text='目录', command=openFolder)
folder.grid(row=1, column=1, sticky=tkinter.W, padx=5, pady=5)
change = tkinter.Button(window, text='修改', command=lambda: changeFolder(pat=path))
change.grid(row=2, column=1, sticky=tkinter.W, padx=5, pady=5)
threading.Thread(target=run).start()
window.mainloop()
``` |
{
"source": "1054518207/undergraduate",
"score": 3
} |
#### File: python crawl/12306ticket/query_train.py
```python
import requests
import urllib3
import time
urllib3.disable_warnings(urllib3.exceptions.InsecurePlatformWarning)
from station_code.stations import stations
class queryTrain(object):
def __init__(self, req, headers):
self.req = req
self.headers = headers
def query_trict(self, from_station, to_station, date):
# 通过输入的地点,获取到地点-code
from_station = stations.get(from_station)
to_station = stations.get(to_station)
date = date
url = 'https://kyfw.12306.cn/otn/leftTicket/queryZ?leftTicketDTO.train_date={}&leftTicketDTO.from_station={}&leftTicketDTO.to_station={}&purpose_codes=ADULT'.format(date, from_station, to_station)
# 请求url,并设置不验证O
try:
response = self.req.get(url, headers = self.headers, verify=False)
response.encoding = 'utf-8'
print(response.text)
# 得到我们需要的数据
availabel_trains = response.json()['data']['result']
# 但是那个格式我们不能直接使用,那么就需要进行把数据格式化一下
availabel_trains = [i.split('|') for i in availabel_trains]
return availabel_trains
except:
print("Error")
if __name__ == '__main__':
headers = {
"User-Agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:58.0) Gecko/20100101 Firefox/58.0"
}
# 输入目的地,结束地,开始时间
print("请输入目的地,结束地,开始时间。例如:烟台 淄博 2018-02-19")
query_input_data = "淄博 烟台南 2018-03-04"
# query_input_data = input()
sp = query_input_data.split(" ")
print("输入结果: 出发地:{},目的地:{},出发日期:{}".format(sp[0], sp[1], sp[2]))
# 查询票
qt = queryTrain(requests.session(), headers)
query_ticket_data = qt.query_trict(sp[0], sp[1], sp[2])
print("输出车次数据")
print(query_input_data)
```
#### File: python crawl/chen/PieTest.py
```python
from mysql import connector
import matplotlib.pyplot as plt
import numpy as np
dbuser = "root"
dbname = "orcid"
dbpassword = ""
sql = "SELECT COUNT(country),country FROM info GROUP BY country ORDER BY COUNT(country) DESC LIMIT 8"
def func(pct, allvals):
absolute = int(pct/100.*np.sum(allvals))
return "{:.1f}%\n({:d})".format(pct, absolute)
if __name__ == '__main__':
cnx = connector.connect(user=dbuser, database=dbname, password=<PASSWORD>)
cursor = cnx.cursor()
cursor.execute(sql)
data = cursor.fetchall()
cursor.close()
cnx.close()
cname = []
cdata = []
for item in data:
cdata.append(item[0])
cname.append(item[1])
fig, ax = plt.subplots(figsize=(6, 3), subplot_kw=dict(aspect="equal"))
wedges, texts, autotexts = ax.pie(cdata, autopct=lambda pct: func(pct, cdata),
textprops=dict(color="w"))
ax.legend(wedges, cname,
title="Country",
loc="center left",
bbox_to_anchor=(1, 0, 0.5, 1))
plt.setp(autotexts, size=8, weight="bold")
ax.set_title("Country statistic")
plt.show()
histlabel = []
for item in cname:
l = ""
for i in range(len(item)):
if str.isupper(item[i]):
l += item[i]
histlabel.append(l)
print(histlabel)
N = 8
ind = np.arange(N) # the x locations for the groups
width = 0.35 # the width of the bars: can also be len(x) sequence
cmap = plt.get_cmap('viridis')
colors = cmap(np.linspace(0, 1, len(histlabel)))
p1 = plt.bar(ind, cdata, width,color=colors)
plt.ylabel('Numbers')
plt.xlabel('Country')
plt.xticks(ind,histlabel)
plt.legend(p1,cname)
plt.show()
```
#### File: python crawl/chen/tfTest.py
```python
import os
import tensorflow as tf
from PIL import Image
import numpy as np
# 验证码存放路径
IMAGE_PATH = "./pictest/"
# 验证码图片宽度
IMAGE_WIDTH = 60
# 验证码图片高度
IMAGE_HEIGHT = 24
# 验证集,用于模型验证的验证码图片的文件名
VALIDATION_IMAGE_NAME = []
# 存放训练好的模型的路径
MODEL_SAVE_PATH = './models/'
CHAR_SET_LEN = 10
CAPTCHA_LEN = 4
def get_image_file_name(imgPath=IMAGE_PATH):
fileName = []
total = 0
for filePath in os.listdir(imgPath):
print(filePath)
captcha_name = filePath.split('/')[-1]
# captcha_name = captcha_name.split('.')[0]
print(captcha_name)
fileName.append(captcha_name)
total += 1
return fileName, total
# 将验证码转换为训练时用的标签向量,维数是 40
# 例如,如果验证码是 ‘0296’ ,则对应的标签是
# [1 0 0 0 0 0 0 0 0 0
# 0 0 1 0 0 0 0 0 0 0
# 0 0 0 0 0 0 0 0 0 1
# 0 0 0 0 0 0 1 0 0 0]
def name2label(name):
label = np.zeros(CAPTCHA_LEN * CHAR_SET_LEN)
for i, c in enumerate(name):
idx = i * CHAR_SET_LEN + ord(c) - ord('0')
label[idx] = 1
return label
# 取得验证码图片的数据以及它的标签
def get_data_and_label(fileName, filePath=IMAGE_PATH):
pathName = os.path.join(filePath, fileName)
img = Image.open(pathName)
# 转为灰度图
img = img.convert("L")
image_array = np.array(img)
image_data = image_array.flatten() / 255
image_label = name2label(fileName[0:CAPTCHA_LEN])
return image_data, image_label
# 生成一个训练batch
def get_next_batch(batchSize=32, step=0):
batch_data = np.zeros([batchSize, IMAGE_WIDTH * IMAGE_HEIGHT])
batch_label = np.zeros([batchSize, CAPTCHA_LEN * CHAR_SET_LEN])
fileNameList = VALIDATION_IMAGE_NAME
totalNumber = len(fileNameList)
indexStart = step * batchSize
for i in range(batchSize):
index = (i + indexStart) % totalNumber
name = fileNameList[index]
img_data, img_label = get_data_and_label(name)
batch_data[i, :] = img_data
batch_label[i, :] = img_label
return batch_data, batch_label
# 构建卷积神经网络并训练
def validate_data_with_CNN():
# 初始化权值
def weight_variable(shape, name='weight'):
init = tf.truncated_normal(shape, stddev=0.1)
var = tf.Variable(initial_value=init, name=name)
return var
# 初始化偏置
def bias_variable(shape, name='bias'):
init = tf.constant(0.1, shape=shape)
var = tf.Variable(init, name=name)
return var
# 卷积
def conv2d(x, W, name='conv2d'):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME', name=name)
# 池化
def max_pool_2X2(x, name='maxpool'):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name=name)
# 输入层
# 请注意 X 的 name,在测试model时会用到它
X = tf.placeholder(tf.float32, [None, IMAGE_WIDTH * IMAGE_HEIGHT], name='data-input')
Y = tf.placeholder(tf.float32, [None, CAPTCHA_LEN * CHAR_SET_LEN], name='label-input')
x_input = tf.reshape(X, [-1, IMAGE_HEIGHT, IMAGE_WIDTH, 1], name='x-input')
# 第一层卷积
W_conv1 = weight_variable([5, 5, 1, 32], 'W_conv1')
B_conv1 = bias_variable([32], 'B_conv1')
conv1 = tf.nn.relu(conv2d(x_input, W_conv1, 'conv1') + B_conv1)
conv1 = max_pool_2X2(conv1, 'conv1-pool')
# 第二层卷积
W_conv2 = weight_variable([5, 5, 32, 64], 'W_conv2')
B_conv2 = bias_variable([64], 'B_conv2')
conv2 = tf.nn.relu(conv2d(conv1, W_conv2, 'conv2') + B_conv2)
conv2 = max_pool_2X2(conv2, 'conv2-pool')
# 第三层卷积
W_conv3 = weight_variable([5, 5, 64, 64], 'W_conv3')
B_conv3 = bias_variable([64], 'B_conv3')
conv3 = tf.nn.relu(conv2d(conv2, W_conv3, 'conv3') + B_conv3)
conv3 = max_pool_2X2(conv3, 'conv3-pool')
# 全链接层
# 每次池化后,图片的宽度和高度均缩小为原来的一半,进过上面的三次池化,宽度和高度均缩小8倍
W_fc1 = weight_variable([8 * 3 * 64, 1024], 'W_fc1')
B_fc1 = bias_variable([1024], 'B_fc1')
fc1 = tf.reshape(conv3, [-1, W_fc1.get_shape().as_list()[0]])
fc1 = tf.nn.relu(tf.add(tf.matmul(fc1, W_fc1), B_fc1))
# 输出层
W_fc2 = weight_variable([1024, CAPTCHA_LEN * CHAR_SET_LEN], 'W_fc2')
B_fc2 = bias_variable([CAPTCHA_LEN * CHAR_SET_LEN], 'B_fc2')
output = tf.add(tf.matmul(fc1, W_fc2), B_fc2, 'output')
predict = tf.reshape(output, [-1, CAPTCHA_LEN, CHAR_SET_LEN], name='predict')
labels = tf.reshape(Y, [-1, CAPTCHA_LEN, CHAR_SET_LEN], name='labels')
# 预测结果
# 请注意 predict_max_idx 的 name,在测试model时会用到它
predict_max_idx = tf.argmax(predict, axis=2, name='predict_max_idx')
labels_max_idx = tf.argmax(labels, axis=2, name='labels_max_idx')
predict_correct_vec = tf.equal(predict_max_idx, labels_max_idx)
accuracy = tf.reduce_mean(tf.cast(predict_correct_vec, tf.float32))
saver = tf.train.Saver()
config = tf.ConfigProto(allow_soft_placement=True,
log_device_placement=True)
config.gpu_options.per_process_gpu_memory_fraction = 0.6
with tf.Session(config=config) as sess:
sess.run(tf.global_variables_initializer())
ckpt = tf.train.get_checkpoint_state(MODEL_SAVE_PATH) # 获取checkpoints对象
if ckpt and ckpt.model_checkpoint_path: ##判断ckpt是否为空,若不为空,才进行模型的加载,否则从头开始训练
print("正在恢复参数.....")
saver.restore(sess, ckpt.model_checkpoint_path) # 恢复保存的神经网络结构,实现断点续训
print("参数恢复完成.")
steps = 0
test_data, test_label = get_next_batch(20, steps)
acc = sess.run(accuracy, feed_dict={X: test_data, Y: test_label})
predict_test = sess.run(predict_max_idx, feed_dict={X:test_data, Y: test_label})
data = predict_test.flatten().tolist()
prelab = []
pre = ""
for i in range(len(data)):
if i % 4 == 0 and i != 0:
prelab.append(pre)
pre = ""
pre = pre + str(data[i])
prelab.append(pre)
# print(prelab)
testlab = []
data = test_label.reshape((-1, 4, 10))
for item1 in data:
lab = ""
for item2 in item1:
ind = np.argmax(item2)
lab = lab + str(ind)
testlab.append(lab)
# print(testlab)
for item1, item2 in zip(prelab, testlab):
print("{}<->{}:{}".format(item1, item2, item1 == item2))
print("accuracy:{}".format(acc))
if __name__ == '__main__':
image_filename_list, total = get_image_file_name(IMAGE_PATH)
VALIDATION_IMAGE_NAME = image_filename_list
validate_data_with_CNN()
``` |
{
"source": "1054/a3cosmos-gas-evolution",
"score": 2
} |
#### File: a3cosmos_gas_evolution/Common_Python_Code/calc_cosmic_comoving_volume.py
```python
from __future__ import print_function
import os, sys, re, json, time, astropy
import numpy as np
#from astropy.table import Table, Column, hstack
from astropy import units as u
from copy import copy
if not (os.path.dirname(os.path.abspath(__file__)) in sys.path): sys.path.append(os.path.dirname(os.path.abspath(__file__)))
import apply_cosmology
#cosmo = apply_cosmology.cosmo
cosmo = apply_cosmology.apply_cosmology(70, 0.3, 0.7)
print('cosmo', cosmo)
if sys.version_info.major >= 3:
long = int
else:
pass
def calc_cosmic_comoving_volume_1(z_edge_1, z_edge_2, obs_area_arcmin2):
if type(obs_area_arcmin2) is u.quantity.Quantity:
obs_area = obs_area_arcmin2
else:
obs_area = obs_area_arcmin2 * u.arcmin**2
#comoving_z_list = np.linspace(z_edges[i], z_edges[i+1], num=100, endpoint=True)
#comoving_volume = np.sum((cosmo.comoving_volume(comoving_z_list[1:]) - cosmo.comoving_volume(comoving_z_list[0:-1])) / (4.0*np.pi*u.steradian) * obs_area.to(u.steradian))
#print('comoving_volume = %e [%s]'%(comoving_volume.value, comoving_volume.unit))
comoving_volume = ((cosmo.comoving_volume(z_edge_2) - cosmo.comoving_volume(z_edge_1)) / (4.0*np.pi*u.steradian) * obs_area.to(u.steradian))
print('comoving_volume = %e [%s]'%(comoving_volume.value, comoving_volume.unit))
return comoving_volume.value
def calc_cosmic_comoving_volume_2(z_edge_1, z_edge_2, obs_area_arcmin2):
if type(obs_area_arcmin2) is u.quantity.Quantity:
obs_area = obs_area_arcmin2
else:
obs_area = obs_area_arcmin2 * u.arcmin**2
differntial_z_list = np.linspace(z_edge_1, z_edge_2, num=10, endpoint=True)
comoving_volume = np.sum((cosmo.differential_comoving_volume(differntial_z_list[1:]) * np.diff(differntial_z_list) * obs_area.to(u.steradian)))
print('comoving_volume = %e [%s]'%(comoving_volume.value, comoving_volume.unit))
##print(cosmo.de_density_scale(z)) # should be 1
##print(cosmo._Ogamma0, cosmo._Onu0)
##print(cosmo.efunc(z), np.sqrt(0.27*(1.+z)**3 + 0*(1.+z)**2 + 0.73) ) # checked consistent
##print(cosmo._hubble_distance, 2.997902458e5/70 ) # checked consistent
#sys_lumdist_output = subprocess.getoutput("/Users/dzliu/Cloud/Github/Crab.Toolkit.PdBI/bin/lumdist -h0 70 -verbose %s | grep 'lumdist d_L=' | sed -e 's/=/ /g'"%(z))
##print(cosmo.angular_diameter_distance(z), sys_lumdist_output.split()[8], '(z = %s)'%(z), sys_lumdist_output ) #
#dH_astropy = cosmo._hubble_distance
#Ez_astropy = cosmo.efunc(z)
#dA_astropy = cosmo.angular_diameter_distance(z)
#dH_dzliu = 2.997902458e5/70
#Ez_dzliu = np.sqrt(0.27*(1.+z)**3 + 0*(1.+z)**2 + 0.73)
#dA_dzliu = float(sys_lumdist_output.split()[8])
#print(dH_astropy/Ez_astropy*dA_astropy, dH_dzliu/Ez_dzliu*dA_dzliu) # chekced consistent
#zp1 = 1.0 + z
#print(cosmo.differential_comoving_volume(z), dH_astropy*((zp1*dA_astropy)**2)/Ez_astropy, dH_dzliu/Ez_dzliu*dA_dzliu**2*zp1**2) # chekced consistent
# z = 2.0 - 2.5, area = 0.0006092348395183178 steradian, dVc = 43627725623.05944,
# 43627725623.05944 * 0.25 / 10
return comoving_volume.value
if __name__ == '__main__':
obs_area = 1.5546582999901375*u.deg*u.deg
print('obs_area = %s [%s]'%(obs_area.to(u.arcmin*u.arcmin).value, obs_area.to(u.arcmin*u.arcmin).unit))
print('obs_area = %s [%s]'%(obs_area.to(u.steradian).value, obs_area.to(u.steradian).unit))
#print(type(obs_area))
z_edges = [0.02, 0.25, 0.50, 0.75, 1.00, 1.5, 2.0, 2.5, 3.0, 4.0, 5.0, 6.0]
#
# loop z bin
for i in range(len(z_edges)-1):
#
print('z %s - %s, cosmic age %.2f - %.2f, time interval %.2f'%(\
z_edges[i],
z_edges[i+1],
cosmo.age(z_edges[i]).to('Gyr').value,
cosmo.age(z_edges[i+1]).to('Gyr').value,
cosmo.age(z_edges[i]).to('Gyr').value - cosmo.age(z_edges[i+1]).to('Gyr').value
)
)
#
z = (z_edges[i]+z_edges[i+1])/2.0
#
calc_cosmic_comoving_volume_1(z_edges[i], z_edges[i+1], obs_area)
calc_cosmic_comoving_volume_2(z_edges[i], z_edges[i+1], obs_area)
#
#
#
```
#### File: a3cosmos_gas_evolution/Common_Python_Code/calc_gas_depletion_time.py
```python
from __future__ import print_function
import os, sys, re, json, time, astropy
import numpy as np
from astropy.table import Table, Column, hstack
from numpy import log10, power as pow
#from astropy.cosmology import FlatLambdaCDM
##cosmo = FlatLambdaCDM(H0=73, Om0=0.27, Tcmb0=2.725) # replaced since 2019-02-21
#cosmo = FlatLambdaCDM(H0=70, Om0=0.27, Tcmb0=2.725)
if not (os.path.dirname(os.path.abspath(__file__)) in sys.path): sys.path.append(os.path.dirname(os.path.abspath(__file__)))
import apply_cosmology
cosmo = apply_cosmology.cosmo
if sys.version_info.major >= 3:
long = int
else:
pass
#
# def
#
def calc_gas_depletion_time_Scoville2017(z, lgMstar=10.5, DeltaMS=0.0):
# Scoville et al. 2017 (The Astrophysical Journal, 837:150 (20pp), 2017 March 10)
# Table 2
Ratio_M_molgas_SFR = 3.23 * (1+z)**(-1.04) * (10**DeltaMS)**(-0.70) * (10**(lgMstar-10.0))**(-0.01)
return Ratio_M_molgas_SFR
def calc_gas_depletion_time_Tacconi2018(z, lgMstar=10.5, DeltaMS=0.0):
# Tacconi et al. 2018 (The Astrophysical Journal, 853:179 (22pp), 2018 February 1 https://doi.org/10.3847/1538-4357/aaa4b4)
# Table 3
# Best (with bootstrap errors) with S14 (Speagle+2014) MS
A = +0.09
B = -0.62
C = -0.44
D = +0.09
E = +0.11
DeltaRe = 0.0 # log10 (R_e / R_e_0)
log10_Ratio_M_molgas_SFR = A + B * (np.log10(1+z)) + C * DeltaMS + D * (lgMstar-10.7) + E * (DeltaRe)
Ratio_M_molgas_SFR = 10**log10_Ratio_M_molgas_SFR
return Ratio_M_molgas_SFR
def calc_gas_depletion_time_A3COSMOS(z=None, cosmic_age=None, lgMstar=10.5, DeltaMS=0.0):
# A3COSMOS
# Sample 20181203
# fitting '/Users/dzliu/Cloud/GitLab/AlmaCosmos/Plot/Plot_z_tauDepl_data_fitting/20190716_with_gas_mass_calibration_H17/fitting_dzliu_function/best_fit_via_MCMC.json'
#popt = [-0.5820833042317259, -0.5346887707021821, -0.0038760530734522902, 0.041705991802229114, 0.1252905060544558, 0.0592184169424943]
popt = [-0.5816185208613138, -0.5338752260098323, -0.0038228411346192814, 0.040738011277865915, 0.12494331865991803, 0.05914760529893037]
a, b, c, d, ak, ck = popt
if z is None and cosmic_age is None:
print('Error! Please input either z or cosmic_age')
sys.exit()
else:
if cosmic_age is not None:
cosmoAge = cosmic_age
else:
cosmoAge = cosmo.age(z).value
log10_Ratio_M_molgas_SFR = (a+ak*(lgMstar-10.0))*DeltaMS + b*(lgMstar-10.0) + (c+ck*(lgMstar-10.0))*(cosmoAge) + d
Ratio_M_molgas_SFR = 10**log10_Ratio_M_molgas_SFR
return Ratio_M_molgas_SFR
#def calc_gas_depletion_time_A3COSMOS(z, lgMstar=10.5, DeltaMS=0.0):
# # A3COSMOS
# # Sample 20181203
# # fitting '/Users/dzliu/Cloud/GitLab/AlmaCosmos/Plots/Plot_z_tauDepl_data_fitting/best_fit_via_CurveFit.json'
# #popt = [0.1, -0.4979428049052056, -0.5172364530151924, -0.5, -0.12515630072829534] #<201901>#
# #popt = [-0.5422, -0.4824, +0.04271, -0.01896, +0.08644] #<20190205>#
# #popt = [-0.5817, -0.5640, 0.05553, -0.01192, 0.1375] #<20190207># fixed xCOLDGASS Z_PP04_O3N2
# #popt = [-0.5741, -0.5664, +0.05543, -0.004397, +0.1154] #<20190208># still fitting Bertemes and Lee's data
# popt = [-0.5702, -0.5741, 0.05489, 0.008601, -0.01334] #<20190301># Eq850 this work, RemyRuyer GDR, Genzel2015_Eq12a_with_dzliu_limit MZR, KMT09 fmol, fitting all available data '/Users/dzliu/Cloud/GitLab/AlmaCosmos/Plots/Plot_z_tauDepl_data_fitting/20190301_with_gas_mass_calibration_Eq850_dzliu/best_fit_via_MCMC.json'
# a, b, c, d, e = popt
# #log10_Ratio_M_molgas_SFR = b*DeltaMS + c*np.log10(1+z) + e #<201901>#
# cosmoAge = cosmo.age(z).value
# log10_Ratio_M_molgas_SFR = a*DeltaMS + (b+c*cosmoAge)*(lgMstar-10.0) + d*(cosmoAge) + e #<20190205>#
#
# #<20190714>#
# a = -0.5788835106242605
# b = -0.5376145163745489
# c = -0.004112269290743598
# d = 0.04527343126448624
# ak = 0.12203773985943833
# ck = 0.05930642823001797
# cosmoAge = cosmo.age(z).value
# log10_Ratio_M_molgas_SFR = (a+ak*(lgMstar-10.0))*DeltaMS + b*(lgMstar-10.0) + (c+ck*(lgMstar-10.0))*cosmoAge + d
#
#
# Ratio_M_molgas_SFR = 10**log10_Ratio_M_molgas_SFR
# return Ratio_M_molgas_SFR
#def calc_gas_depletion_time_A3COSMOS_with_dzliu_850_gas_mass_calibration(z, lgMstar=10.5, DeltaMS=0.0):
# # A3COSMOS
# # Sample 20181203
# # fitting '/Users/dzliu/Cloud/GitLab/AlmaCosmos/Plots/Plot_z_tauDepl_data_fitting/20190122_with_Leroy_GDR_with_Genzel_Eq12a_MZR_with_dzliu_gas_mass_calibration_with_KMT09_fmol/best_fit_via_CurveFit.json'
# #popt = [0.1, -0.5191087642422786, -1.5519298330096012, -0.5, 0.08064040034418785] #<20190122_with_Leroy_GDR_with_Genzel_Eq12a_MZR_with_KMT09_fmol>#
# popt = [-0.5702, -0.5741, 0.05489, 0.008601, -0.01334] #<20190301># Eq850 this work, RemyRuyer GDR, Genzel2015_Eq12a_with_dzliu_limit MZR, KMT09 fmol, fitting all available data '/Users/dzliu/Cloud/GitLab/AlmaCosmos/Plots/Plot_z_tauDepl_data_fitting/20190301_with_gas_mass_calibration_Eq850_dzliu/best_fit_via_MCMC.json'
# a, b, c, d, e = popt
# log10_Ratio_M_molgas_SFR = b*DeltaMS + c*np.log10(1+z) + e
# Ratio_M_molgas_SFR = 10**log10_Ratio_M_molgas_SFR
# return Ratio_M_molgas_SFR
#def calc_gas_depletion_time_A3COSMOS_with_Hughes2017_GasMassCalibration_Leslie2019_MS(z, lgMstar=10.5, DeltaMS=0.0):
# # A3COSMOS
# # Sample 20181203
# # MS: Leslie20190515
# # GasMassCalibration: Hughes2017
# # fitting '/Users/dzliu/Cloud/GitLab/AlmaCosmos/Plots/Plot_z_tauDepl_data_fitting/20190301_with_gas_mass_calibration_H17_with_MS_Leslie20190515/best_fit_via_MCMC.json'
# popt = [-0.4367323964697807, -0.31995589347193487, 0.0363679934437835, 0.011782986854565358, -0.19084213551539264]
# a, b, c, d, e = popt
# #log10_Ratio_M_molgas_SFR = b*DeltaMS + c*np.log10(1+z) + e #<201901>#
# cosmoAge = cosmo.age(z).value
# log10_Ratio_M_molgas_SFR = a*DeltaMS + (b+c*cosmoAge)*(lgMstar-10.0) + d*(cosmoAge) + e #<20190205>#
# Ratio_M_molgas_SFR = 10**log10_Ratio_M_molgas_SFR
# return Ratio_M_molgas_SFR
#
# def core functions (adapted from "a_dzliu_code_fit_z_tauDepl_v6.py")
#
def func_tauDepl_dzliu_log(pars, a, b, c, d, e):
deltaGas,DeltaMS,lgMstar,lgSFR,cosmoAge,z = pars
# 20181030c
#tauDepl_model_log = a*DeltaMS + b*(deltaGas) + c*(np.exp(-cosmoAge)) + d
#tauDepl_model_log = a*(np.log10(deltaGas))*0.0 + b*DeltaMS + c*10**lgSFR*0.0 + lgMstar*0.0 + d*(cosmoAge)*0.0 + e
tauDepl_model_log = a*(np.log10(deltaGas))*0.0 + b*DeltaMS + 10**lgSFR*0.0 + c*np.log10(1+z) + lgMstar*0.0 + d*(cosmoAge)*0.0 + e
#
return tauDepl_model_log
def func_tauDepl_dzliu(pars, a, b, c, d, e):
deltaGas,DeltaMS,lgMstar,lgSFR,cosmoAge,z = pars
tauDepl_model_log = func_tauDepl_dzliu_log((deltaGas,DeltaMS,lgMstar,lgSFR,cosmoAge,z), a, b, c, d, e)
tauDepl_model = np.power(10, tauDepl_model_log) # * np.power((1+z),c)
return tauDepl_model
def func_tauDepl_Tacconi2018_log(pars, a, b, c, d):
DeltaMS,lgMstar,z = pars
tauDepl_model_log = a*DeltaMS + b*(lgMstar-10.7) + c*(np.log10(1+z)) + d
return tauDepl_model_log
def func_tauDepl_Tacconi2018(pars):
DeltaMS,lgMstar,z = pars
tauDepl_model_log = func_tauDepl_Tacconi2018_log((DeltaMS,lgMstar,z), -0.44, 0.09, -0.62, 0.09)
tauDepl_model = np.power(10, tauDepl_model_log) # * np.power((1+z),c)
return tauDepl_model
def func_tauDepl_Scoville2017_log(pars, a, b, c, d):
DeltaMS,lgMstar,z = pars
tauDepl_model_log = a*DeltaMS + b*(lgMstar-10.0) + c*(np.log10(1+z)) + d
return tauDepl_model_log
def func_tauDepl_Scoville2017(pars):
DeltaMS,lgMstar,z = pars
tauDepl_model_log = func_tauDepl_Scoville2017_log((DeltaMS,lgMstar,z), -0.70, -0.01, -1.04, np.log10(3.23))
tauDepl_model = np.power(10, tauDepl_model_log) # * np.power((1+z),c)
return tauDepl_model
```
#### File: a3cosmos_gas_evolution/Common_Python_Code/calc_star_formation_law.py
```python
from __future__ import print_function
import os, sys, re, json, time, astropy
import numpy as np
from astropy.table import Table, Column, hstack
from copy import copy
from numpy import log, log10, power, sum, sqrt, pi, exp
pow = power
lg = log10
ln = log
from scipy.interpolate import InterpolatedUnivariateSpline, interp1d
if not (os.path.dirname(os.path.abspath(__file__)) in sys.path): sys.path.append(os.path.dirname(os.path.abspath(__file__)))
import apply_cosmology
cosmo = apply_cosmology.cosmo
if sys.version_info.major >= 3:
long = int
else:
pass
#
# def
#
def calc_Mmolgas_from_SFR_using_SF_law_Sargent2014(SFR, DeltaMS = 0.0):
# Sargent et al. 2014, https://ui.adsabs.harvard.edu/abs/2014ApJ...793...19S/abstract
# bimodal SF law
arr_alpha = [9.22, 8.05]
arr_beta = [0.81, 0.81]
arr_DeltaMS = [0.0, 1.0]
argmin_DeltaMS = np.argmin(arr_DeltaMS)
argmax_DeltaMS = np.argmax(arr_DeltaMS)
min_DeltaMS = arr_DeltaMS[argmin_DeltaMS]
max_DeltaMS = arr_DeltaMS[argmax_DeltaMS]
#
if np.isscalar(DeltaMS):
if DeltaMS <= min_DeltaMS:
t_DeltaMS = min_DeltaMS
t_alpha = arr_alpha[argmin_DeltaMS]
t_beta = arr_beta[argmin_DeltaMS]
elif DeltaMS >= max_DeltaMS:
t_DeltaMS = max_DeltaMS
t_alpha = arr_alpha[argmax_DeltaMS]
t_beta = arr_beta[argmax_DeltaMS]
else:
t_DeltaMS = DeltaMS
t_alpha = np.interp(t_DeltaMS, arr_DeltaMS, arr_alpha)
t_beta = np.interp(t_DeltaMS, arr_DeltaMS, arr_beta)
else:
t_DeltaMS = np.array(DeltaMS)
t_alpha = t_DeltaMS * 0.0
t_beta = t_DeltaMS * 0.0
t_mask_1 = (t_DeltaMS <= min_DeltaMS)
if np.count_nonzero(t_mask_1) > 0:
t_DeltaMS[t_mask_1] = min_DeltaMS
t_alpha[t_mask_1] = arr_alpha[argmin_DeltaMS]
t_beta[t_mask_1] = arr_beta[argmin_DeltaMS]
t_mask_2 = (t_DeltaMS >= max_DeltaMS)
if np.count_nonzero(t_mask_2) > 0:
t_DeltaMS[t_mask_2] = max_DeltaMS
t_alpha[t_mask_2] = arr_alpha[argmax_DeltaMS]
t_beta[t_mask_2] = arr_beta[argmax_DeltaMS]
t_mask_3 = np.logical_and(t_DeltaMS > min_DeltaMS, t_DeltaMS < max_DeltaMS)
if np.count_nonzero(t_mask_3) > 0:
t_alpha[t_mask_3] = np.interp(t_DeltaMS[t_mask_3], arr_DeltaMS, arr_alpha)
t_beta[t_mask_3] = np.interp(t_DeltaMS[t_mask_3], arr_DeltaMS, arr_beta)
Mmol = 10**(t_alpha + t_beta * np.log10(SFR))
return Mmol
def calc_SFR_from_Mmolgas_using_SF_law_Sargent2014(Mmolgas, DeltaMS = 0.0):
# Sargent et al. 2014, https://ui.adsabs.harvard.edu/abs/2014ApJ...793...19S/abstract
# bimodal SF law
arr_alpha = [9.22, 8.05]
arr_beta = [0.81, 0.81]
arr_DeltaMS = [0.0, 1.0]
argmin_DeltaMS = np.argmin(arr_DeltaMS)
argmax_DeltaMS = np.argmax(arr_DeltaMS)
min_DeltaMS = arr_DeltaMS[argmin_DeltaMS]
max_DeltaMS = arr_DeltaMS[argmax_DeltaMS]
#
if np.isscalar(DeltaMS):
if DeltaMS <= min_DeltaMS:
t_DeltaMS = min_DeltaMS
t_alpha = arr_alpha[argmin_DeltaMS]
t_beta = arr_beta[argmin_DeltaMS]
elif DeltaMS >= max_DeltaMS:
t_DeltaMS = max_DeltaMS
t_alpha = arr_alpha[argmax_DeltaMS]
t_beta = arr_beta[argmax_DeltaMS]
else:
t_DeltaMS = DeltaMS
t_alpha = np.interp(t_DeltaMS, arr_DeltaMS, arr_alpha)
t_beta = np.interp(t_DeltaMS, arr_DeltaMS, arr_beta)
else:
t_DeltaMS = np.array(DeltaMS)
t_alpha = t_DeltaMS * 0.0
t_beta = t_DeltaMS * 0.0
t_mask_1 = (t_DeltaMS <= min_DeltaMS)
if np.count_nonzero(t_mask_1) > 0:
t_DeltaMS[t_mask_1] = min_DeltaMS
t_alpha[t_mask_1] = arr_alpha[argmin_DeltaMS]
t_beta[t_mask_1] = arr_beta[argmin_DeltaMS]
t_mask_2 = (t_DeltaMS >= max_DeltaMS)
if np.count_nonzero(t_mask_2) > 0:
t_DeltaMS[t_mask_2] = max_DeltaMS
t_alpha[t_mask_2] = arr_alpha[argmax_DeltaMS]
t_beta[t_mask_2] = arr_beta[argmax_DeltaMS]
t_mask_3 = np.logical_and(t_DeltaMS > min_DeltaMS, t_DeltaMS < max_DeltaMS)
if np.count_nonzero(t_mask_3) > 0:
t_alpha[t_mask_3] = np.interp(t_DeltaMS[t_mask_3], arr_DeltaMS, arr_alpha)
t_beta[t_mask_3] = np.interp(t_DeltaMS[t_mask_3], arr_DeltaMS, arr_beta)
SFR = 10**( (np.log10(Mmolgas) - t_alpha) / t_beta )
return SFR
```
#### File: a3cosmos_gas_evolution/Common_Python_Code/setup_matplotlib.py
```python
import os, sys
from matplotlib import pyplot as plt
from matplotlib import ticker as ticker
from matplotlib.ticker import MultipleLocator, AutoMinorLocator
from matplotlib import font_manager
import matplotlib as mpl
import numpy as np
def setup_matplotlib():
if os.path.isdir(os.path.expanduser('~')+os.sep+'Library'+os.sep+'Fonts'):
font_dirs = [os.path.expanduser('~')+os.sep+'Library'+os.sep+'Fonts']
font_files = font_manager.findSystemFonts(fontpaths=font_dirs)
font_list = font_manager.createFontList(font_files)
font_manager.fontManager.ttflist.extend(font_list)
font_manager.findfont('NGC', rebuild_if_missing=True)
mpl.rcParams['font.family'] = 'NGC'
#mpl.rcParams['text.usetex'] = True
mpl_version = np.array(mpl.__version__.split('.')).astype(int)
if mpl_version[0] > 3 or (mpl_version[0] >= 3 and mpl_version[1] >= 1):
# since matplotlib version 3.1.0, mpl.rcParams['text.latex.preamble'] is a single str not a list
mpl.rcParams['text.latex.preamble'] = r'\usepackage{amsmath}'
mpl.rcParams['text.latex.preamble'] += '\n'
mpl.rcParams['text.latex.preamble'] += r'\makeatletter \newcommand*{\rom}[1]{\expandafter\@slowromancap\romannumeral #1@} \makeatother'
else:
# since matplotlib version 3.1.0, mpl.rcParams['text.latex.preamble'] is a single str not a list
mpl.rcParams['text.latex.preamble'] = [r'\usepackage{amsmath}'] #for \text command
mpl.rcParams['text.latex.preamble'].append(r'\makeatletter \newcommand*{\rom}[1]{\expandafter\@slowromancap\romannumeral #1@} \makeatother')
mpl.rcParams['axes.labelsize'] = '16' # https://matplotlib.org/users/customizing.html
mpl.rcParams['axes.grid'] = True
mpl.rcParams['axes.axisbelow'] = True
mpl.rcParams['xtick.direction'] = 'in'
mpl.rcParams['ytick.direction'] = 'in'
mpl.rcParams['xtick.minor.visible'] = True
mpl.rcParams['ytick.minor.visible'] = True
mpl.rcParams['xtick.labelsize'] = '13'
mpl.rcParams['ytick.labelsize'] = '13'
mpl.rcParams['xtick.top'] = True
mpl.rcParams['ytick.right'] = True
#mpl.rcParams['grid.color'] = 'b0b0b0'
mpl.rcParams['grid.linestyle'] = '--'
mpl.rcParams['grid.linewidth'] = 0.25
mpl.rcParams['grid.alpha'] = 0.8
mpl.rcParams['legend.fontsize'] = '12'
mpl.rcParams['legend.borderaxespad'] = 0.2 # space between legend border and axis
#mpl.rcParams['legend.borderpad'] = 0.2 # space between legend content and legend border
#mpl.rcParams['legend.handletextpad'] = 0.05
#mpl.rcParams['legend.labelspacing'] = None # in font-size units
#mpl.rcParams['legend.columnspacing'] = None # in font-size units
#mpl.rcParams['legend.ncol']
``` |
{
"source": "1064CBread/1064Chat",
"score": 3
} |
#### File: blueprints/rest/restutil.py
```python
from enum import Enum
from collections.abc import MutableMapping
from util import get_current_app
from flask import Response
from functools import wraps
import re
class ClientType(str, Enum):
BROWSER = "browser" # most useful in debug
CURL = "cURL" # also useful in debug
OTHER = "other" # usually production apps
browsers = re.compile("|".join(("chrome", "firefox", "safari", "opera")), re.IGNORECASE)
def get_implied_client_type(useragent: str) -> ClientType:
"""
Attempts to get the client type based on user-agent. This is by no means exaustive for browser checking,
and may be incorrect if the client lies.
:param useragent: The user-agent that the client provided
:return: The ClientType the user-agent implies
"""
if browsers.search(useragent):
return ClientType.BROWSER
if "curl/" in useragent:
return ClientType.CURL
return ClientType.OTHER
_shared_decorator_key = __name__ + "_shared_decorator"
def _shared_decorator_logic(**response_kwargs):
"""
Shared deco logic, merges decorators that are used together
"""
def make_wrapper(f):
merged_kwargs = response_kwargs.copy()
fn = f
if hasattr(f, _shared_decorator_key):
data = getattr(f, _shared_decorator_key)
kwtomerge = data['kwargs']
merge_dict = dict()
for k, v in kwtomerge.items():
if k in merged_kwargs and isinstance(merged_kwargs[k], MutableMapping):
merged_kwargs[k].update(v)
else:
merge_dict[k] = v
merged_kwargs.update(merge_dict)
fn = data['wrapped']
@wraps(fn)
def wrapper(*args, **kwargs):
ret = fn(*args, **kwargs)
if isinstance(ret, Response):
# ahhhhhh
raise ValueError("No support for returning response and merging")
return get_current_app().response_class(ret, **merged_kwargs)
setattr(wrapper, _shared_decorator_key, {'kwargs': merged_kwargs, 'wrapped': fn})
return wrapper
return make_wrapper
def content_type(ctype):
return _shared_decorator_logic(content_type=ctype)
def status_code(code):
return _shared_decorator_logic(status=code)
def headers(direct_dict=None, **kwargs):
funneled = direct_dict or dict()
funneled.update(kwargs)
funneled = {k.replace('_', '-').upper(): v for k, v in funneled.items()}
return _shared_decorator_logic(headers=funneled)
```
#### File: server/blueprints/webui.py
```python
from flask import Blueprint, Flask, render_template, url_for
from blueprints import security
blueprint = Blueprint(__name__, __name__, url_prefix='/webui')
@blueprint.route('/')
def index():
return render_template('index.jinja')
@blueprint.app_template_filter('index')
def filter_index(m, v):
return m[v]
@blueprint.app_context_processor
def add_jinja_vars():
import constants
from titlecase import titlecase
from util import get_rules
d = dict()
d['PageData'] = constants.PageData
rules = get_rules([security.blueprint.name, blueprint.name])
# sorts endpoints by last segment (e.x. f.o.o is sorted using o)
endpoints = list(sorted(((x.endpoint, x.endpoint.split('.')[-1]) for x in rules), key=lambda x: x[1]))
d['allpages'] = tuple((url_for(k), v, titlecase(v)) for k, v in endpoints)
d['navbar_should_show_page'] = lambda x: True
d['custom_css'] = [ "/static/webui.css" ]
return d
def registerself(app: Flask, prefix=''):
prefix += blueprint.url_prefix
app.register_blueprint(blueprint, url_prefix=prefix)
security.registerself(app, prefix=prefix)
``` |
{
"source": "1065672644894730302/Chromium",
"score": 2
} |
#### File: build/android/single_test_runner.py
```python
import logging
import os
import sys
from base_test_runner import BaseTestRunner
import debug_info
import run_tests_helper
from test_package_apk import TestPackageApk
from test_package_executable import TestPackageExecutable
from test_result import TestResults
class SingleTestRunner(BaseTestRunner):
"""Single test suite attached to a single device.
Args:
device: Device to run the tests.
test_suite: A specific test suite to run, empty to run all.
gtest_filter: A gtest_filter flag.
test_arguments: Additional arguments to pass to the test binary.
timeout: Timeout for each test.
rebaseline: Whether or not to run tests in isolation and update the filter.
performance_test: Whether or not performance test(s).
cleanup_test_files: Whether or not to cleanup test files on device.
tool: Name of the Valgrind tool.
shard_index: index number of the shard on which the test suite will run.
dump_debug_info: Whether or not to dump debug information.
"""
def __init__(self, device, test_suite, gtest_filter, test_arguments, timeout,
rebaseline, performance_test, cleanup_test_files, tool,
shard_index, dump_debug_info=False,
fast_and_loose=False):
BaseTestRunner.__init__(self, device, shard_index)
self._running_on_emulator = self.device.startswith('emulator')
self._gtest_filter = gtest_filter
self._test_arguments = test_arguments
self.test_results = TestResults()
if dump_debug_info:
self.dump_debug_info = debug_info.GTestDebugInfo(self.adb, device,
os.path.basename(test_suite), gtest_filter)
else:
self.dump_debug_info = None
self.fast_and_loose = fast_and_loose
if os.path.splitext(test_suite)[1] == '.apk':
self.test_package = TestPackageApk(
self.adb, device,
test_suite, timeout, rebaseline, performance_test, cleanup_test_files,
tool, self.dump_debug_info)
else:
self.test_package = TestPackageExecutable(
self.adb, device,
test_suite, timeout, rebaseline, performance_test, cleanup_test_files,
tool, self.dump_debug_info)
def _GetHttpServerDocumentRootForTestSuite(self):
"""Returns the document root needed by the test suite."""
if self.test_package.test_suite_basename == 'page_cycler_tests':
return os.path.join(run_tests_helper.CHROME_DIR, 'data', 'page_cycler')
return None
def _TestSuiteRequiresMockTestServer(self):
"""Returns True if the test suite requires mock test server."""
return False
# TODO(yfriedman): Disabled because of flakiness.
# (self.test_package.test_suite_basename == 'unit_tests' or
# self.test_package.test_suite_basename == 'net_unittests' or
# False)
def _GetFilterFileName(self):
"""Returns the filename of gtest filter."""
return os.path.join(sys.path[0], 'gtest_filter',
self.test_package.test_suite_basename + '_disabled')
def _GetAdditionalEmulatorFilterName(self):
"""Returns the filename of additional gtest filter for emulator."""
return os.path.join(sys.path[0], 'gtest_filter',
self.test_package.test_suite_basename +
'_emulator_additional_disabled')
def GetDisabledTests(self):
"""Returns a list of disabled tests.
Returns:
A list of disabled tests obtained from gtest_filter/test_suite_disabled.
"""
disabled_tests = run_tests_helper.GetExpectations(self._GetFilterFileName())
if self._running_on_emulator:
# Append emulator's filter file.
disabled_tests.extend(run_tests_helper.GetExpectations(
self._GetAdditionalEmulatorFilterName()))
return disabled_tests
def UpdateFilter(self, failed_tests):
"""Updates test_suite_disabled file with the new filter (deletes if empty).
If running in Emulator, only the failed tests which are not in the normal
filter returned by _GetFilterFileName() are written to emulator's
additional filter file.
Args:
failed_tests: A sorted list of failed tests.
"""
disabled_tests = []
if not self._running_on_emulator:
filter_file_name = self._GetFilterFileName()
else:
filter_file_name = self._GetAdditionalEmulatorFilterName()
disabled_tests.extend(
run_tests_helper.GetExpectations(self._GetFilterFileName()))
logging.info('About to update emulator\'s additional filter (%s).'
% filter_file_name)
new_failed_tests = []
if failed_tests:
for test in failed_tests:
if test.name not in disabled_tests:
new_failed_tests.append(test.name)
if not new_failed_tests:
if os.path.exists(filter_file_name):
os.unlink(filter_file_name)
return
filter_file = file(filter_file_name, 'w')
if self._running_on_emulator:
filter_file.write('# Addtional list of suppressions from emulator\n')
else:
filter_file.write('# List of suppressions\n')
filter_file.write('# This file was automatically generated by %s\n'
% sys.argv[0])
filter_file.write('\n'.join(sorted(new_failed_tests)))
filter_file.write('\n')
filter_file.close()
def GetDataFilesForTestSuite(self):
"""Returns a list of data files/dirs needed by the test suite."""
# Ideally, we'd just push all test data. However, it has >100MB, and a lot
# of the files are not relevant (some are used for browser_tests, others for
# features not supported, etc..).
if self.test_package.test_suite_basename in ['base_unittests',
'sql_unittests',
'unit_tests']:
return [
'base/data/json/bom_feff.json',
'net/data/cache_tests/insert_load1',
'net/data/cache_tests/dirty_entry5',
'ui/base/test/data/data_pack_unittest',
'chrome/test/data/bookmarks/History_with_empty_starred',
'chrome/test/data/bookmarks/History_with_starred',
'chrome/test/data/extensions/json_schema_test.js',
'chrome/test/data/History/',
'chrome/test/data/json_schema_validator/',
'chrome/test/data/serializer_nested_test.js',
'chrome/test/data/serializer_test.js',
'chrome/test/data/serializer_test_nowhitespace.js',
'chrome/test/data/top_sites/',
'chrome/test/data/web_database',
'chrome/test/data/zip',
]
elif self.test_package.test_suite_basename == 'net_unittests':
return [
'net/data/cache_tests',
'net/data/filter_unittests',
'net/data/ftp',
'net/data/proxy_resolver_v8_unittest',
'net/data/ssl/certificates',
]
elif self.test_package.test_suite_basename == 'ui_tests':
return [
'chrome/test/data/dromaeo',
'chrome/test/data/json2.js',
'chrome/test/data/sunspider',
'chrome/test/data/v8_benchmark',
'chrome/test/ui/sunspider_uitest.js',
'chrome/test/ui/v8_benchmark_uitest.js',
]
elif self.test_package.test_suite_basename == 'page_cycler_tests':
data = [
'tools/page_cycler',
'data/page_cycler',
]
for d in data:
if not os.path.exists(d):
raise Exception('Page cycler data not found.')
return data
elif self.test_package.test_suite_basename == 'webkit_unit_tests':
return [
'third_party/WebKit/Source/WebKit/chromium/tests/data',
# We need the chrome/ directory to convice webkit_support::
# GetWebKitRootDirFilePath() we're in a chrome working dir.
'chrome/VERSION',
]
return []
def LaunchHelperToolsForTestSuite(self):
"""Launches helper tools for the test suite.
Sometimes one test may need to run some helper tools first in order to
successfully complete the test.
"""
document_root = self._GetHttpServerDocumentRootForTestSuite()
if document_root:
self.LaunchTestHttpServer(document_root)
if self._TestSuiteRequiresMockTestServer():
self.LaunchChromeTestServerSpawner()
def StripAndCopyFiles(self):
"""Strips and copies the required data files for the test suite."""
self.test_package.StripAndCopyExecutable()
self.test_package.tool.CopyFiles()
test_data = self.GetDataFilesForTestSuite()
if test_data and not self.fast_and_loose:
if self.test_package.test_suite_basename == 'page_cycler_tests':
# Since the test data for page cycler are huge (around 200M), we use
# sdcard to store the data and create symbol links to map them to
# data/local/tmp/ later.
self.CopyTestData(test_data, '/sdcard/')
for p in [os.path.dirname(d) for d in test_data if os.path.isdir(d)]:
mapped_device_path = '/data/local/tmp/' + p
# Unlink the mapped_device_path at first in case it was mapped to
# a wrong path. Add option '-r' becuase the old path could be a dir.
self.adb.RunShellCommand('rm -r %s' % mapped_device_path)
self.adb.RunShellCommand(
'ln -s /sdcard/%s %s' % (p, mapped_device_path))
else:
self.CopyTestData(test_data, '/data/local/tmp/')
def RunTestsWithFilter(self):
"""Runs a tests via a small, temporary shell script."""
self.test_package.CreateTestRunnerScript(self._gtest_filter,
self._test_arguments)
self.test_results = self.test_package.RunTestsAndListResults()
def RebaselineTests(self):
"""Runs all available tests, restarting in case of failures."""
if self._gtest_filter:
all_tests = set(self._gtest_filter.split(':'))
else:
all_tests = set(self.test_package.GetAllTests())
failed_results = set()
executed_results = set()
while True:
executed_names = set([f.name for f in executed_results])
self._gtest_filter = ':'.join(all_tests - executed_names)
self.RunTestsWithFilter()
failed_results.update(self.test_results.crashed,
self.test_results.failed)
executed_results.update(self.test_results.crashed,
self.test_results.failed,
self.test_results.ok)
executed_names = set([f.name for f in executed_results])
logging.info('*' * 80)
logging.info(self.device)
logging.info('Executed: ' + str(len(executed_names)) + ' of ' +
str(len(all_tests)))
logging.info('Failed so far: ' + str(len(failed_results)) + ' ' +
str([f.name for f in failed_results]))
logging.info('Remaining: ' + str(len(all_tests - executed_names)) + ' ' +
str(all_tests - executed_names))
logging.info('*' * 80)
if executed_names == all_tests:
break
self.test_results = TestResults.FromRun(
ok=list(executed_results - failed_results),
failed=list(failed_results))
def RunTests(self):
"""Runs all tests (in rebaseline mode, runs each test in isolation).
Returns:
A TestResults object.
"""
if self.test_package.rebaseline:
self.RebaselineTests()
else:
if not self._gtest_filter:
self._gtest_filter = ('-' + ':'.join(self.GetDisabledTests()) + ':' +
':'.join(['*.' + x + '*' for x in
self.test_package.GetDisabledPrefixes()]))
self.RunTestsWithFilter()
return self.test_results
def SetUp(self):
"""Sets up necessary test enviroment for the test suite."""
super(SingleTestRunner, self).SetUp()
if self.test_package.performance_test:
if run_tests_helper.IsRunningAsBuildbot():
self.adb.SetJavaAssertsEnabled(enable=False)
self.adb.Reboot(full_reboot=False)
self.adb.SetupPerformanceTest()
if self.dump_debug_info:
self.dump_debug_info.StartRecordingLog(True)
self.StripAndCopyFiles()
self.LaunchHelperToolsForTestSuite()
self.test_package.tool.SetupEnvironment()
def TearDown(self):
"""Cleans up the test enviroment for the test suite."""
self.test_package.tool.CleanUpEnvironment()
if self.test_package.cleanup_test_files:
self.adb.RemovePushedFiles()
if self.dump_debug_info:
self.dump_debug_info.StopRecordingLog()
if self.test_package.performance_test:
self.adb.TearDownPerformanceTest()
super(SingleTestRunner, self).TearDown()
```
#### File: docs/server2/branch_utility_test.py
```python
import unittest
import test_urlfetch
from branch_utility import BranchUtility
class BranchUtilityTest(unittest.TestCase):
def testGetChannelNameFromPath(self):
b_util = BranchUtility(test_urlfetch)
self.assertEquals('dev', b_util.GetChannelNameFromPath(
'dev/hello/stuff.html'))
self.assertEquals('beta', b_util.GetChannelNameFromPath(
'beta/hello/stuff.html'))
self.assertEquals('trunk', b_util.GetChannelNameFromPath(
'trunk/hello/stuff.html'))
self.assertEquals('stable', b_util.GetChannelNameFromPath(
'hello/stuff.html'))
self.assertEquals('stable', b_util.GetChannelNameFromPath(
'hello/dev/stuff.html'))
def testGetBranchNumberForChannelName(self):
b_util = BranchUtility(test_urlfetch)
b_util.SetURL('branch_utility/first.json')
self.assertEquals('1132',
b_util.GetBranchNumberForChannelName('dev'))
self.assertEquals('1084',
b_util.GetBranchNumberForChannelName('beta'))
self.assertEquals('1234',
b_util.GetBranchNumberForChannelName('stable'))
self.assertEquals('trunk',
b_util.GetBranchNumberForChannelName('trunk'))
if __name__ == '__main__':
unittest.main()
```
#### File: docs/server2/resource_fetcher.py
```python
SUBVERSION_URL = 'http://src.chromium.org/viewvc/chrome/'
TRUNK_URL = SUBVERSION_URL + 'trunk/'
BRANCH_URL = SUBVERSION_URL + 'branches/'
class SubversionFetcher(object):
"""Class to fetch code from src.chromium.org.
"""
def __init__(self, urlfetch):
self.urlfetch = urlfetch
def _GetURLFromBranch(self, branch):
if branch == 'trunk':
return TRUNK_URL
return BRANCH_URL + branch + '/'
def FetchResource(self, branch, path):
url = self._GetURLFromBranch(branch) + path
result = self.urlfetch.fetch(url)
return result
```
#### File: docs/server2/test_urlfetch.py
```python
def _ReadFile(filename):
with open(filename, 'r') as f:
return f.read()
class _MockResponse(object):
def __init__(self):
self.content = ''
def fetch(url):
result = _MockResponse()
result.content = _ReadFile('test_data/' + url)
return result
```
#### File: docs/server2/urlfetch.py
```python
import logging
from google.appengine.api import urlfetch
from google.appengine.api import memcache
DEFAULT_CACHE_TIME = 300
class _FetchException(Exception):
"""Thrown when status code is not 200.
"""
def __init__(self, url):
Exception.__init__(self, 'Fetch exception from ' + url)
def fetch(url):
result = memcache.get(url, namespace=__name__)
if result is not None:
return result
logging.info('Fetch cache miss: ' + url)
result = urlfetch.fetch(url)
if result.status_code != 200:
raise _FetchException(url)
memcache.add(url, result, DEFAULT_CACHE_TIME, namespace=__name__)
return result
```
#### File: test/functional/chromeos_device_policy.py
```python
import logging
import pyauto_functional # Must come before pyauto (and thus, policy_base).
import policy_base
import pyauto_errors
class ChromeosDevicePolicy(policy_base.PolicyTestBase):
"""Tests various ChromeOS device policies."""
def _SetDevicePolicyAndOwner(self, policy):
self.SetDevicePolicy(device_policy=policy, owner=self._usernames[0])
def LoginAsGuest(self):
self.assertFalse(self.GetLoginInfo()['is_logged_in'],
msg='Expected to be logged out.')
policy_base.PolicyTestBase.LoginAsGuest(self)
self.assertTrue(self.GetLoginInfo()['is_logged_in'],
msg='Expected to be logged in.')
def Login(self, user_index, expect_success):
self.assertFalse(self.GetLoginInfo()['is_logged_in'],
msg='Expected to be logged out.')
if expect_success:
policy_base.PolicyTestBase.Login(self,
self._usernames[user_index],
self._passwords[user_index])
self.assertTrue(self.GetLoginInfo()['is_logged_in'],
msg='Expected to be logged in.')
else:
self.assertRaises(
pyauto_errors.LoginError,
lambda: policy_base.PolicyTestBase.Login(self,
self._usernames[user_index],
self._passwords[user_index]))
self.assertFalse(self.GetLoginInfo()['is_logged_in'],
msg='Expected to not be logged in.')
# TODO(bartfab): Remove this after crosbug.com/20709 is fixed.
def TryToDisableLocalStateAutoClearing(self):
# Try to disable automatic clearing of the local state.
self.TryToDisableLocalStateAutoClearingOnChromeOS()
self._local_state_auto_clearing = \
self.IsLocalStateAutoClearingEnabledOnChromeOS()
if not self._local_state_auto_clearing:
# Prevent the inherited Logout() method from cleaning up /home/chronos
# as this also clears the local state.
self.set_clear_profile(False)
def ExtraChromeFlags(self):
"""Sets up Chrome to skip OOBE.
TODO(bartfab): Ensure OOBE is still skipped when crosbug.com/20709 is fixed.
Disabling automatic clearing of the local state has the curious side effect
of removing a flag that disables OOBE. This method adds back the flag.
"""
flags = policy_base.PolicyTestBase.ExtraChromeFlags(self)
flags.append('--login-screen=login')
return flags
def setUp(self):
policy_base.PolicyTestBase.setUp(self)
# TODO(bartfab): Remove this after crosbug.com/20709 is fixed.
self._local_state_auto_clearing = \
self.IsLocalStateAutoClearingEnabledOnChromeOS()
# Cache user credentials for easy lookup. The first user will become the
# owner.
credentials = (self.GetPrivateInfo()['prod_enterprise_test_user'],
self.GetPrivateInfo()['prod_enterprise_executive_user'],
self.GetPrivateInfo()['prod_enterprise_sales_user'])
self._usernames = [credential['username'] for credential in credentials]
self._passwords = [credential['password'] for credential in credentials]
def tearDown(self):
# TODO(bartfab): Remove this after crosbug.com/20709 is fixed.
# Try to re-enable automatic clearing of the local state and /home/chronos.
if not self._local_state_auto_clearing:
self.TryToEnableLocalStateAutoClearingOnChromeOS()
self.set_clear_profile(True)
policy_base.PolicyTestBase.tearDown(self)
def _CheckGuestModeAvailableInLoginWindow(self):
return self.ExecuteJavascriptInOOBEWebUI(
"""window.domAutomationController.send(
!document.getElementById('guestSignin').hidden);""")
def _CheckGuestModeAvailableInAccountPicker(self):
return self.ExecuteJavascriptInOOBEWebUI(
"""window.domAutomationController.send(
!!document.getElementById('pod-row').getPodWithUsername_(''));
""")
def _GetCurrentLoginScreenId(self):
return self.ExecuteJavascriptInOOBEWebUI(
"""window.domAutomationController.send(
String(cr.ui.Oobe.getInstance().currentScreen.id));""")
def testGuestModeEnabled(self):
"""Checks that guest mode login can be enabled/disabled."""
self._SetDevicePolicyAndOwner({'guest_mode_enabled': True})
self.assertTrue(self._CheckGuestModeAvailableInLoginWindow(),
msg='Expected guest mode to be available.')
self.LoginAsGuest()
self.Logout()
self._SetDevicePolicyAndOwner({'guest_mode_enabled': False})
self.assertFalse(self._CheckGuestModeAvailableInLoginWindow(),
msg='Expected guest mode to not be available.')
# TODO(bartfab): Remove this after crosbug.com/20709 is fixed.
self.TryToDisableLocalStateAutoClearing()
if self._local_state_auto_clearing:
logging.warn("""Unable to disable local state clearing. Skipping remainder
of test.""")
return
# Log in as a regular so that the pod row contains at least one pod and the
# account picker is shown.
self.Login(user_index=0, expect_success=True)
self.Logout()
self._SetDevicePolicyAndOwner({'guest_mode_enabled': True})
self.assertTrue(self._CheckGuestModeAvailableInAccountPicker(),
msg='Expected guest mode to be available.')
self.LoginAsGuest()
self.Logout()
self._SetDevicePolicyAndOwner({'guest_mode_enabled': False})
self.assertFalse(self._CheckGuestModeAvailableInAccountPicker(),
msg='Expected guest mode to not be available.')
def testShowUserNamesOnSignin(self):
"""Checks that the account picker can be enabled/disabled."""
# TODO(bartfab): Remove this after crosbug.com/20709 is fixed.
self.TryToDisableLocalStateAutoClearing()
if self._local_state_auto_clearing:
logging.warn('Unable to disable local state clearing. Skipping test.')
return
# Log in as a regular user so that the pod row contains at least one pod and
# the account picker can be shown.
self.Login(user_index=0, expect_success=True)
self.Logout()
self._SetDevicePolicyAndOwner({'show_user_names': True})
self.assertEquals('account-picker',
self._GetCurrentLoginScreenId(),
msg='Expected the account picker to be visible.')
self._SetDevicePolicyAndOwner({'show_user_names': False})
self.assertEquals('gaia-signin',
self._GetCurrentLoginScreenId(),
msg='Expected the account picker to not be visible.')
def testUserWhitelistAndAllowNewUsers(self):
"""Checks that login can be (dis)allowed by whitelist and allow-new-users.
The test verifies that these two interrelated policies behave as documented
in the chrome/browser/policy/proto/chrome_device_policy.proto file. Cases
for which the current behavior is marked as "broken" are intentionally
ommitted since the broken behavior should be fixed rather than protected by
tests.
"""
# No whitelist
self._SetDevicePolicyAndOwner({'allow_new_users': True})
self.Login(user_index=0, expect_success=True)
self.Logout()
# Empty whitelist
self._SetDevicePolicyAndOwner({'user_whitelist': []})
self.Login(user_index=0, expect_success=True)
self.Logout()
self._SetDevicePolicyAndOwner({'allow_new_users': True,
'user_whitelist': []})
self.Login(user_index=0, expect_success=True)
self.Logout()
# Populated whitelist
self._SetDevicePolicyAndOwner({'user_whitelist': [self._usernames[0]]})
self.Login(user_index=0, expect_success=True)
self.Logout()
self.Login(user_index=1, expect_success=False)
self._SetDevicePolicyAndOwner({'allow_new_users': True,
'user_whitelist': [self._usernames[0]]})
self.Login(user_index=0, expect_success=True)
self.Logout()
self.Login(user_index=1, expect_success=True)
# New users not allowed, populated whitelist
self._SetDevicePolicyAndOwner({'allow_new_users': False,
'user_whitelist': [self._usernames[0]]})
self.Login(user_index=0, expect_success=True)
self.Logout()
self.Login(user_index=1, expect_success=False)
if __name__ == '__main__':
pyauto_functional.Main()
```
#### File: test/functional/plugins.py
```python
import logging
import os
import re
import pyauto_functional # Must be imported before pyauto
import pyauto
import test_utils
class PluginsTest(pyauto.PyUITest):
"""TestCase for Plugins."""
def Debug(self):
"""Test method for experimentation.
This method will not run automatically.
"""
while True:
raw_input('Interact with the browser and hit <enter> to list plugins...')
self.pprint(self.GetPluginsInfo().Plugins())
def setUp(self):
pyauto.PyUITest.setUp(self)
self._flash_plugin_type = 'Plug-in'
if (self.IsLinux() and
self.GetBrowserInfo()['properties']['branding'] == 'Google Chrome'):
self._flash_plugin_type = 'Pepper Plugin'
def _ObtainPluginsList(self):
"""Obtain a list of plugins for each platform.
Produces warnings for plugins which are not installed on the machine.
Returns:
a list of 2-tuple, corresponding to the html file used for test and the
name of the plugin
"""
plugins = [('flash-clicktoplay.html', 'Shockwave Flash'),
('java_new.html', 'Java'),] # common to all platforms
if self.IsWin() or self.IsMac():
plugins = plugins + [
('silverlight_new.html', 'Silverlight'),
('quicktime.html', 'QuickTime'),
('wmp_new.html', 'Windows Media'),
('real.html', 'RealPlayer'),
]
out = []
# Emit warnings for plugins that are not installed on the machine and
# therefore cannot be tested.
plugins_info = self.GetPluginsInfo()
for fname, name in plugins:
for a_plugin in plugins_info.Plugins():
is_installed = False
if re.search(name, a_plugin['name']):
is_installed = True
break
if not is_installed:
logging.warn('%s plugin is not installed and cannot be tested' % name)
else:
out.append((fname, name))
return out
def _GetPluginPID(self, plugin_name):
"""Fetch the pid of the plugin process with name |plugin_name|."""
child_processes = self.GetBrowserInfo()['child_processes']
plugin_type = 'Plug-in'
if plugin_name == 'Shockwave Flash':
plugin_type = self._flash_plugin_type
for x in child_processes:
if x['type'] == plugin_type and re.search(plugin_name, x['name']):
return x['pid']
return None
def _TogglePlugin(self, plugin_name):
"""Toggle a plugin's status.
If enabled, disable it.
If disabled, enable it.
"""
for plugin in self.GetPluginsInfo().Plugins():
if re.search(plugin_name, plugin['name']):
if plugin['enabled']:
self.DisablePlugin(plugin['path'])
else:
self.EnablePlugin(plugin['path'])
def _IsEnabled(self, plugin_name):
"""Checks if plugin is enabled."""
for plugin in self.GetPluginsInfo().Plugins():
if re.search(plugin_name, plugin['name']):
return plugin['enabled']
def _PluginNeedsAuthorization(self, plugin_name):
# These plug-ins seek permission to run
return plugin_name in ['Java', 'QuickTime', 'Windows Media', 'RealPlayer']
def testKillAndReloadAllPlugins(self):
"""Verify plugin processes and check if they can reload after killing."""
for fname, plugin_name in self._ObtainPluginsList():
if plugin_name == 'Shockwave Flash':
continue # cannot reload file:// flash URL - crbug.com/47249
url = self.GetFileURLForPath(
os.path.join(self.DataDir(), 'plugin', fname))
self.NavigateToURL(url)
if self._PluginNeedsAuthorization(plugin_name):
self.assertTrue(self.WaitForInfobarCount(1))
self.PerformActionOnInfobar('accept', 0)
self.WaitUntil(
lambda: self._GetPluginPID(plugin_name) is not None )
pid = self._GetPluginPID(plugin_name)
self.assertTrue(pid, 'No plugin process for %s' % plugin_name)
self.Kill(pid)
self.assertTrue(self.WaitUntil(
lambda: self._GetPluginPID(plugin_name) is None),
msg='Expected %s plugin to die after killing' % plugin_name)
self.GetBrowserWindow(0).GetTab(0).Reload()
self.assertTrue(self.WaitUntil(
lambda: self._GetPluginPID(plugin_name)),
msg='No plugin process for %s after reloading' % plugin_name)
# Verify that it's in fact a new process.
self.assertNotEqual(pid, self._GetPluginPID(plugin_name),
'Did not get new pid for %s after reloading' %
plugin_name)
def testDisableEnableAllPlugins(self):
"""Verify if all the plugins can be disabled and enabled.
This is equivalent to testing the enable/disable functionality in
chrome://plugins
"""
# Flash files loaded too quickly after firing browser end up getting
# downloaded, which seems to indicate that the plugin hasn't been
# registered yet.
# Hack to register Flash plugin on all platforms. crbug.com/94123
self.GetPluginsInfo()
for fname, plugin_name in self._ObtainPluginsList():
# Verify initial state
self.assertTrue(self._IsEnabled(plugin_name),
'%s not enabled initially.' % plugin_name)
# Disable
self._TogglePlugin(plugin_name)
self.assertFalse(self._IsEnabled(plugin_name))
# Attempt to load a page that triggers the plugin and verify that it
# indeed could not be loaded.
url = self.GetFileURLForPath(
os.path.join(self.DataDir(), 'plugin', fname))
self.NavigateToURL(url)
self.assertTrue(self.WaitUntil(
lambda: self._GetPluginPID(plugin_name) is None ))
self.assertFalse(self._GetPluginPID(plugin_name=plugin_name))
if plugin_name == 'Shockwave Flash':
continue # cannot reload file:// flash URL - crbug.com/47249
if plugin_name == 'Java':
continue # crbug.com/71223
# Enable
self._TogglePlugin(plugin_name)
self.GetBrowserWindow(0).GetTab(0).Reload()
if self._PluginNeedsAuthorization(plugin_name):
self.assertTrue(self.WaitForInfobarCount(1))
self.PerformActionOnInfobar('accept', 0)
self.assertTrue(self.WaitUntil(
lambda: self._GetPluginPID(plugin_name=plugin_name)))
self.assertTrue(self._IsEnabled(plugin_name), plugin_name)
def testBlockAllPlugins(self):
"""Verify that all the plugins can be blocked.
Verifying by checking that flash plugin was blocked.
"""
flash_url = self.GetFileURLForPath(os.path.join(
self.DataDir(), 'plugin', 'flash-clicktoplay.html'))
self.NavigateToURL(flash_url)
flash_pid = self._GetPluginPID('Shockwave Flash')
self.assertTrue(flash_pid, msg='No plugin process for Shockwave Flash')
# Killing the flash process as it takes a while before the plugin
# process is terminated even though there are no tabs using it.
self.Kill(flash_pid)
self.assertTrue(self.WaitUntil(
lambda: self._GetPluginPID('Shockwave Flash') is None),
msg='Expected Shockwave Flash plugin to die after killing')
# Set the preference to block all plugins.
self.SetPrefs(pyauto.kDefaultContentSettings, {'plugins': 2})
self.GetBrowserWindow(0).GetTab(0).Reload()
self.assertFalse(self._GetPluginPID('Shockwave Flash'),
msg='Plug-in not blocked.')
def testAllowPluginException(self):
"""Verify that plugins can be allowed on a domain by adding
an exception(s)."""
# Set the preference to block all plugins.
self.SetPrefs(pyauto.kDefaultContentSettings, {'plugins': 2})
flash_url = self.GetFileURLForPath(os.path.join(
self.DataDir(), 'plugin', 'flash-clicktoplay.html'))
self.NavigateToURL(flash_url)
# Check that plugins are blocked.
self.assertFalse(self._GetPluginPID('Shockwave Flash'),
msg='Plug-in not blocked.')
# Add an exception to allow plugins on hulu.com.
self.SetPrefs(pyauto.kContentSettingsPatternPairs,
{'[*.]hulu.com,*': {'plugins': 1}})
self.AppendTab(pyauto.GURL('http://www.hulu.com'))
self.assertTrue(self._GetPluginPID('Shockwave Flash'),
msg='No plugin process for Shockwave Flash')
def testBlockPluginException(self):
"""Verify that plugins can be blocked on a domain by adding
an exception(s)."""
url = 'http://www.hulu.com'
self.NavigateToURL(url)
# Wait until Shockwave Flash plugin process loads.
self.assertTrue(self.WaitUntil(
lambda: self._GetPluginPID('Shockwave Flash') is not None),
msg='No plugin process for Shockwave Flash')
self.Kill(self._GetPluginPID('Shockwave Flash'))
self.assertTrue(self.WaitUntil(
lambda: self._GetPluginPID('Shockwave Flash') is None),
msg='Expected Shockwave Flash plugin to die after killing')
# Add an exception to block plugins on localhost.
self.SetPrefs(pyauto.kContentSettingsPatternPairs,
{'[*.]hulu.com,*': {'plugins': 2}})
self.GetBrowserWindow(0).GetTab(0).Reload()
self.assertFalse(self._GetPluginPID('Shockwave Flash'),
msg='Shockwave Flash Plug-in not blocked.')
if __name__ == '__main__':
pyauto_functional.Main()
```
#### File: src/build_tools/generate_make.py
```python
import os
import sys
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
ARCHITECTURES = ['32', '64']
def WriteMakefile(srcpath, dstpath, replacements):
print 'opening: %s\n' % srcpath
text = open(srcpath, 'rb').read()
for key in replacements:
text = text.replace(key, replacements[key])
open(dstpath, 'wb').write(text)
def GetExtType(desc):
if desc['TYPE'] in ['main', 'nexe']:
ext = '.nexe'
else:
ext = '.so'
return ext
def GenPatsubst(arch, macro, ext, EXT):
return '$(patsubst %%.%s,%%_%s.o,$(%s_%s))' % (ext, arch, macro, EXT)
def SetVar(varname, values):
if not values:
return varname + ':=\n'
line = varname + ':='
out = ''
for value in values:
if not line:
line = varname + '+='
if len(line) + len(value) > 78:
out += line[:-1] + '\n'
line = ''
else:
line += value + ' '
if line:
out += line[:-1] + '\n'
return out
def GenerateReplacements(desc):
# Generate target settings
tools = desc['TOOLS']
settings = SetVar('VALID_TOOLCHAINS', tools)
settings+= 'TOOLCHAIN?=%s\n\n' % tools[0]
targets = []
rules = ''
for name in desc['TARGETS']:
target = desc['TARGETS'][name]
macro = name.upper()
ext = GetExtType(target)
sources = target['SOURCES']
cc_sources = [fname for fname in sources if fname.endswith('.c')]
cxx_sources = [fname for fname in sources if fname.endswith('.cc')]
if cc_sources:
flags = target.get('CCFLAGS', '')
settings += SetVar(macro + '_CC', cc_sources)
settings += SetVar(macro + '_CCFLAGS', flags)
if cxx_sources:
flags = target.get('CXXFLAGS', '')
settings += SetVar(macro + '_CXX', cxx_sources)
settings += SetVar(macro + '_CCFLAGS', flags)
flags = target.get('LDFLAGS')
settings += SetVar(macro + '_LDFLAGS', flags)
for arch in ARCHITECTURES:
object_sets = []
if cc_sources:
objs = '%s_%s_CC_O' % (macro, arch)
rules += '%s:=%s\n' % (objs, GenPatsubst(arch, macro, 'c', 'CC'))
rules += '$(%s) : %%_%s.o : %%.c $(THIS_MAKEFILE)\n' % (objs, arch)
rules += '\t$(NACL_CC) -o $@ $< -m%s $(%s_CCFLAGS)\n\n' % (arch, macro)
object_sets.append('$(%s)' % objs)
if cxx_sources:
objs = '%s_%s_CXX_O' % (macro, arch)
rules += '%s:=%s\n' % (objs, GenPatsubst(arch, macro, 'cc', 'CXX'))
rules += '$(%s) : %%_%s.o : %%.cc $(THIS_MAKEFILE)\n' % (objs, arch)
rules += '\t$(NACL_CXX) -o $@ $< -m%s $(%s_CXXFLAGS)\n\n' % (arch,
macro)
object_sets.append('$(%s)' % objs)
target_name = '%s_x86_%s%s' % (name, arch, ext)
targets.append(target_name)
rules += '%s : %s\n' % (target_name, ' '.join(object_sets))
rules += '\t$(NACL_LINK) -o $@ $^ -m%s $(%s_LDFLAGS)\n\n' % (arch, macro)
targets = 'all : '+ ' '.join(targets)
return {
'__PROJECT_SETTINGS__' : settings,
'__PROJECT_TARGETS__' : targets,
'__PROJECT_RULES__' : rules
}
# 'KEY' : ( <TYPE>, [Accepted Values], <Required?>)
DSC_FORMAT = {
'TOOLS' : (list, ['host', 'newlib', 'glibc', 'pnacl'], True),
'TARGETS' : (list, {
'NAME': (str, '', True),
'TYPE': (str, ['main', 'nexe', 'so'], True),
'SOURCES': (list, '', True),
'CCFLAGS': (list, '', False),
'CXXFLAGS': (list, '', False),
'LDFLAGS': (list, '', False)
}, True),
'PAGE': (str, '', False),
'NAME': (str, '', False),
'DESC': (str, '', False),
'INFO': (str, '', False)
}
def ErrorMsgFunc(text):
sys.stderr.write(text + '\n')
def ValidateFormat(src, format, ErrorMsg=ErrorMsgFunc):
failed = False
# Verify all required keys are there
for key in format:
(exp_type, exp_value, required) = format[key]
if required and key not in src:
ErrorMsg('Missing required key %s.' % key)
failed = True
# For each provided key, verify it's valid
for key in src:
# Verify the key is known
if key not in format:
ErrorMsg('Unexpected key %s.' % key)
failed = True
continue
exp_type, exp_value, required = format[key]
value = src[key]
# Verify the key is of the expected type
if exp_type != type(value):
ErrorMsg('Key %s expects %s not %s.' % (
key, exp_type.__name__.upper(), type(value).__name__.upper()))
failed = True
continue
# Verify the value is non-empty if required
if required and not value:
ErrorMsg('Expected non-empty value for %s.' % key)
failed = True
continue
# If it's a string and there are expected values, make sure it matches
if exp_type is str:
if type(exp_value) is list and exp_value:
if value not in exp_value:
ErrorMsg('Value %s not expected for %s.' % (value, key))
failed = True
continue
# if it's a list, then we need to validate the values
if exp_type is list:
# If we expect a dictionary, then call this recursively
if type(exp_value) is dict:
for val in value:
if not ValidateFormat(val, exp_value, ErrorMsg):
failed = True
continue
# If we expect a list of strings
if type(exp_value) is str:
for val in value:
if type(val) is not str:
ErrorMsg('Value %s in %s is not a string.' % (val, key))
failed = True
continue
# if we expect a particular string
if type(exp_value) is list:
for val in value:
if val not in exp_value:
ErrorMsg('Value %s not expected in %s.' % (val, key))
failed = True
continue
# If we got this far, it's an unexpected type
ErrorMsg('Unexpected type %s for key %s.' % (str(type(src[key])), key))
continue
return not failed
# TODO(noelallen) : Remove before turning on
testdesc = {
'TOOLS': ['newlib', 'glibc'],
'TARGETS': {
'hello_world' : {
'TYPE' : 'main',
'SOURCES' : ['hello_world.c'],
'CCFLAGS' : '',
'CXXFLAGS' : '',
'LDFLAGS' : '',
},
},
'PAGE': 'hello_world.html',
'NAME': '<NAME>',
'DESC': """
The Hello World In C example demonstrates the basic structure of all
Native Client applications. This example loads a Native Client module. The
page tracks the status of the module as it load. On a successful load, the
module will post a message containing the string "Hello World" back to
JavaScript which will display it as an alert.""",
'INFO': 'Basic HTML, JavaScript, and module architecture.'
}
def main(argv):
srcpath = os.path.join(SCRIPT_DIR, 'template.mk')
repdict = GenerateReplacements(testdesc)
WriteMakefile(srcpath, 'out.make', repdict)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
```
#### File: third_party/handlebar/handlebar.py
```python
import json
import re
""" Handlebar templates are logicless templates inspired by ctemplate (or more
specifically mustache templates) then taken in their own direction because I
found those to be inadequate.
from handlebar import Handlebar
template = Handlebar('hello {{#foo}}{{bar}}{{/}} world')
input = {
'foo': [
{ 'bar': 1 },
{ 'bar': 2 },
{ 'bar': 3 }
]
}
print(template.render(input).text)
"""
class ParseException(Exception):
""" Exception thrown while parsing the template.
"""
def __init__(self, message):
Exception.__init__(self, message)
class RenderResult(object):
""" Result of a render operation.
"""
def __init__(self, text, errors):
self.text = text;
self.errors = errors
class StringBuilder(object):
""" Mimics Java's StringBuilder for easy porting from the Java version of
this file to Python.
"""
def __init__(self):
self._buf = []
def append(self, obj):
self._buf.append(str(obj))
def toString(self):
return ''.join(self._buf)
class PathIdentifier(object):
""" An identifier of the form "foo.bar.baz".
"""
def __init__(self, name):
if name == '':
raise ParseException("Cannot have empty identifiers")
if not re.match('^[a-zA-Z0-9._]*$', name):
raise ParseException(name + " is not a valid identifier")
self.path = name.split(".")
def resolve(self, contexts, errors):
resolved = None
for context in contexts:
if not context:
continue
resolved = self._resolveFrom(context)
if resolved:
return resolved
_RenderError(errors, "Couldn't resolve identifier ", self.path, " in ", contexts)
return None
def _resolveFrom(self, context):
result = context
for next in self.path:
if not result or type(result) != dict:
return None
result = result.get(next)
return result
def __str__(self):
return '.'.join(self.path)
class ThisIdentifier(object):
""" An identifier of the form "@".
"""
def resolve(self, contexts, errors):
return contexts[0]
def __str__(self):
return '@'
class SelfClosingNode(object):
""" Nodes which are "self closing", e.g. {{foo}}, {{*foo}}.
"""
def init(self, id):
self.id = id
class HasChildrenNode(object):
""" Nodes which are not self closing, and have 0..n children.
"""
def init(self, id, children):
self.id = id
self.children = children
class StringNode(object):
""" Just a string.
"""
def __init__(self, string):
self.string = string
def render(self, buf, contexts, errors):
buf.append(self.string)
class EscapedVariableNode(SelfClosingNode):
""" {{foo}}
"""
def render(self, buf, contexts, errors):
value = self.id.resolve(contexts, errors)
if value:
buf.append(self._htmlEscape(str(value)))
def _htmlEscape(self, unescaped):
escaped = StringBuilder()
for c in unescaped:
if c == '<':
escaped.append("<")
elif c == '>':
escaped.append(">")
elif c == '&':
escaped.append("&")
else:
escaped.append(c)
return escaped.toString()
class UnescapedVariableNode(SelfClosingNode):
""" {{{foo}}}
"""
def render(self, buf, contexts, errors):
value = self.id.resolve(contexts, errors)
if value:
buf.append(value)
class SectionNode(HasChildrenNode):
""" {{#foo}} ... {{/}}
"""
def render(self, buf, contexts, errors):
value = self.id.resolve(contexts, errors)
if not value:
return
type_ = type(value)
if value == None:
pass
elif type_ == list:
for item in value:
contexts.insert(0, item)
_RenderNodes(buf, self.children, contexts, errors)
contexts.pop(0)
elif type_ == dict:
contexts.insert(0, value)
_RenderNodes(buf, self.children, contexts, errors)
contexts.pop(0)
else:
_RenderError(errors, "{{#", self.id, "}} cannot be rendered with a ", type_)
class VertedSectionNode(HasChildrenNode):
""" {{?foo}} ... {{/}}
"""
def render(self, buf, contexts, errors):
value = self.id.resolve(contexts, errors)
if value and _VertedSectionNodeShouldRender(value):
contexts.insert(0, value)
_RenderNodes(buf, self.children, contexts, errors)
contexts.pop(0)
def _VertedSectionNodeShouldRender(value):
type_ = type(value)
if value == None:
return False
elif type_ == bool:
return value
elif type_ == int or type_ == float:
return value > 0
elif type_ == str or type_ == unicode:
return value != ''
elif type_ == list or type_ == dict:
return len(value) > 0
raise TypeError("Unhandled type: " + str(type_))
class InvertedSectionNode(HasChildrenNode):
""" {{^foo}} ... {{/}}
"""
def render(self, buf, contexts, errors):
value = self.id.resolve(contexts, errors)
if not value or not _VertedSectionNodeShouldRender(value):
_RenderNodes(buf, self.children, contexts, errors)
class JsonNode(SelfClosingNode):
""" {{*foo}}
"""
def render(self, buf, contexts, errors):
value = self.id.resolve(contexts, errors)
if value:
buf.append(json.dumps(value, separators=(',',':')))
class PartialNode(SelfClosingNode):
""" {{+foo}}
"""
def render(self, buf, contexts, errors):
value = self.id.resolve(contexts, errors)
if not isinstance(value, Handlebar):
_RenderError(errors, id, " didn't resolve to a Handlebar")
return
_RenderNodes(buf, value.nodes, contexts, errors)
class SwitchNode(object):
""" {{:foo}}
"""
def __init__(self, id):
self.id = id
self._cases = {}
def addCase(self, caseValue, caseNode):
self._cases[caseValue] = caseNode
def render(self, buf, contexts, errors):
value = self.id.resolve(contexts, errors)
if not value:
_RenderError(errors, id, " didn't resolve to any value")
return
if not (type(value) == str or type(value) == unicode):
_RenderError(errors, id, " didn't resolve to a String")
return
caseNode = self._cases.get(value)
if caseNode:
caseNode.render(buf, contexts, errors)
class CaseNode(object):
""" {{=foo}}
"""
def __init__(self, children):
self.children = children
def render(self, buf, contexts, errors):
for child in self.children:
child.render(buf, contexts, errors)
class Token(object):
""" The tokens that can appear in a template.
"""
class Data(object):
def __init__(self, name, text, clazz):
self.name = name
self.text = text
self.clazz = clazz
OPEN_START_SECTION = Data("OPEN_START_SECTION" , "{{#", SectionNode)
OPEN_START_VERTED_SECTION = Data("OPEN_START_VERTED_SECTION" , "{{?", VertedSectionNode)
OPEN_START_INVERTED_SECTION = Data("OPEN_START_INVERTED_SECTION", "{{^", InvertedSectionNode)
OPEN_START_JSON = Data("OPEN_START_JSON" , "{{*", JsonNode)
OPEN_START_PARTIAL = Data("OPEN_START_PARTIAL" , "{{+", PartialNode)
OPEN_START_SWITCH = Data("OPEN_START_SWITCH" , "{{:", SwitchNode)
OPEN_CASE = Data("OPEN_CASE" , "{{=", CaseNode)
OPEN_END_SECTION = Data("OPEN_END_SECTION" , "{{/", None)
OPEN_UNESCAPED_VARIABLE = Data("OPEN_UNESCAPED_VARIABLE" , "{{{", UnescapedVariableNode)
CLOSE_MUSTACHE3 = Data("CLOSE_MUSTACHE3" , "}}}", None)
OPEN_VARIABLE = Data("OPEN_VARIABLE" , "{{" , EscapedVariableNode)
CLOSE_MUSTACHE = Data("CLOSE_MUSTACHE" , "}}" , None)
CHARACTER = Data("CHARACTER" , "." , StringNode)
# List of tokens in order of longest to shortest, to avoid any prefix matching
# issues.
_tokenList = [
Token.OPEN_START_SECTION,
Token.OPEN_START_VERTED_SECTION,
Token.OPEN_START_INVERTED_SECTION,
Token.OPEN_START_JSON,
Token.OPEN_START_PARTIAL,
Token.OPEN_START_SWITCH,
Token.OPEN_CASE,
Token.OPEN_END_SECTION,
Token.OPEN_UNESCAPED_VARIABLE,
Token.CLOSE_MUSTACHE3,
Token.OPEN_VARIABLE,
Token.CLOSE_MUSTACHE,
Token.CHARACTER
]
class TokenStream(object):
""" Tokeniser for template parsing.
"""
def __init__(self, string):
self.nextToken = None
self._remainder = string
self._nextContents = None
self.advance()
def hasNext(self):
return self.nextToken != None
def advanceOver(self, token):
if self.nextToken != token:
raise ParseException(
"Expecting token " + token.name + " but got " + self.nextToken.name)
return self.advance()
def advance(self):
self.nextToken = None
self._nextContents = None
if self._remainder == '':
return None
for token in _tokenList:
if self._remainder.startswith(token.text):
self.nextToken = token
break
if self.nextToken == None:
self.nextToken = Token.CHARACTER
self._nextContents = self._remainder[0:len(self.nextToken.text)]
self._remainder = self._remainder[len(self.nextToken.text):]
return self.nextToken
def nextString(self):
buf = StringBuilder()
while self.nextToken == Token.CHARACTER:
buf.append(self._nextContents)
self.advance()
return buf.toString()
def _CreateIdentifier(path):
if path == '@':
return ThisIdentifier()
else:
return PathIdentifier(path)
class Handlebar(object):
""" A handlebar template.
"""
def __init__(self, template):
self.nodes = []
self._parseTemplate(template, self.nodes)
def _parseTemplate(self, template, nodes):
tokens = TokenStream(template)
self._parseSection(tokens, nodes)
if tokens.hasNext():
raise ParseException("There are still tokens remaining, "
"was there an end-section without a start-section:")
def _parseSection(self, tokens, nodes):
sectionEnded = False
while tokens.hasNext() and not sectionEnded:
next = tokens.nextToken
if next == Token.CHARACTER:
nodes.append(StringNode(tokens.nextString()))
elif next == Token.OPEN_VARIABLE or \
next == Token.OPEN_UNESCAPED_VARIABLE or \
next == Token.OPEN_START_JSON or \
next == Token.OPEN_START_PARTIAL:
token = tokens.nextToken
id = self._openSection(tokens)
node = token.clazz()
node.init(id)
nodes.append(node)
elif next == Token.OPEN_START_SECTION or \
next == Token.OPEN_START_VERTED_SECTION or \
next == Token.OPEN_START_INVERTED_SECTION:
token = tokens.nextToken
id = self._openSection(tokens)
children = []
self._parseSection(tokens, children)
self._closeSection(tokens, id)
node = token.clazz()
node.init(id, children)
nodes.append(node)
elif next == Token.OPEN_START_SWITCH:
id = self._openSection(tokens)
while tokens.nextToken == Token.CHARACTER:
tokens.advanceOver(Token.CHARACTER)
switchNode = SwitchNode(id)
nodes.append(switchNode)
while tokens.hasNext() and tokens.nextToken == Token.OPEN_CASE:
tokens.advanceOver(Token.OPEN_CASE)
caseValue = tokens.nextString()
tokens.advanceOver(Token.CLOSE_MUSTACHE)
caseChildren = []
self._parseSection(tokens, caseChildren)
switchNode.addCase(caseValue, CaseNode(caseChildren))
self._closeSection(tokens, id)
elif next == Token.OPEN_CASE:
# See below.
sectionEnded = True
elif next == Token.OPEN_END_SECTION:
# Handled after running parseSection within the SECTION cases, so this is a
# terminating condition. If there *is* an orphaned OPEN_END_SECTION, it will be caught
# by noticing that there are leftover tokens after termination.
sectionEnded = True
elif Token.CLOSE_MUSTACHE:
raise ParseException("Orphaned " + tokens.nextToken.name)
def _openSection(self, tokens):
openToken = tokens.nextToken
tokens.advance()
id = _CreateIdentifier(tokens.nextString())
if openToken == Token.OPEN_UNESCAPED_VARIABLE:
tokens.advanceOver(Token.CLOSE_MUSTACHE3)
else:
tokens.advanceOver(Token.CLOSE_MUSTACHE)
return id
def _closeSection(self, tokens, id):
tokens.advanceOver(Token.OPEN_END_SECTION)
nextString = tokens.nextString()
if nextString != '' and str(id) != nextString:
raise ParseException(
"Start section " + str(id) + " doesn't match end section " + nextString)
tokens.advanceOver(Token.CLOSE_MUSTACHE)
def render(self, *contexts):
""" Renders this template given a variable number of "contexts" to read
out values from (such as those appearing in {{foo}}).
"""
contextDeque = []
for context in contexts:
contextDeque.append(context)
buf = StringBuilder()
errors = []
_RenderNodes(buf, self.nodes, contextDeque, errors)
return RenderResult(buf.toString(), errors)
def _RenderNodes(buf, nodes, contexts, errors):
for node in nodes:
node.render(buf, contexts, errors)
def _RenderError(errors, *messages):
if not errors:
return
buf = StringBuilder()
for message in messages:
buf.append(message)
errors.append(buf.toString())
```
#### File: tools/isolate/trace_inputs.py
```python
import codecs
import csv
import glob
import json
import logging
import optparse
import os
import posixpath
import re
import subprocess
import sys
import weakref
## OS-specific imports
if sys.platform == 'win32':
from ctypes.wintypes import byref, create_unicode_buffer, c_int, c_wchar_p
from ctypes.wintypes import windll, FormatError # pylint: disable=E0611
from ctypes.wintypes import GetLastError # pylint: disable=E0611
elif sys.platform == 'darwin':
import Carbon.File # pylint: disable=F0401
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.dirname(os.path.dirname(BASE_DIR))
KEY_TRACKED = 'isolate_dependency_tracked'
KEY_UNTRACKED = 'isolate_dependency_untracked'
## OS-specific functions
if sys.platform == 'win32':
def QueryDosDevice(drive_letter):
"""Returns the Windows 'native' path for a DOS drive letter."""
assert re.match(r'^[a-zA-Z]:$', drive_letter), drive_letter
# Guesswork. QueryDosDeviceW never returns the required number of bytes.
chars = 1024
drive_letter = unicode(drive_letter)
p = create_unicode_buffer(chars)
if 0 == windll.kernel32.QueryDosDeviceW(drive_letter, p, chars):
err = GetLastError()
if err:
# pylint: disable=E0602
raise WindowsError(
err,
'QueryDosDevice(%s): %s (%d)' % (
str(drive_letter), FormatError(err), err))
return p.value
def GetShortPathName(long_path):
"""Returns the Windows short path equivalent for a 'long' path."""
long_path = unicode(long_path)
# Adds '\\\\?\\' when given an absolute path so the MAX_PATH (260) limit is
# not enforced.
if os.path.isabs(long_path) and not long_path.startswith('\\\\?\\'):
long_path = '\\\\?\\' + long_path
chars = windll.kernel32.GetShortPathNameW(long_path, None, 0)
if chars:
p = create_unicode_buffer(chars)
if windll.kernel32.GetShortPathNameW(long_path, p, chars):
return p.value
err = GetLastError()
if err:
# pylint: disable=E0602
raise WindowsError(
err,
'GetShortPathName(%s): %s (%d)' % (
str(long_path), FormatError(err), err))
def GetLongPathName(short_path):
"""Returns the Windows long path equivalent for a 'short' path."""
short_path = unicode(short_path)
# Adds '\\\\?\\' when given an absolute path so the MAX_PATH (260) limit is
# not enforced.
if os.path.isabs(short_path) and not short_path.startswith('\\\\?\\'):
short_path = '\\\\?\\' + short_path
chars = windll.kernel32.GetLongPathNameW(short_path, None, 0)
if chars:
p = create_unicode_buffer(chars)
if windll.kernel32.GetLongPathNameW(short_path, p, chars):
return p.value
err = GetLastError()
if err:
# pylint: disable=E0602
raise WindowsError(
err,
'GetLongPathName(%s): %s (%d)' % (
str(short_path), FormatError(err), err))
def get_current_encoding():
"""Returns the 'ANSI' code page associated to the process."""
return 'cp%d' % int(windll.kernel32.GetACP())
class DosDriveMap(object):
"""Maps \Device\HarddiskVolumeN to N: on Windows."""
# Keep one global cache.
_MAPPING = {}
def __init__(self):
if not self._MAPPING:
# This is related to UNC resolver on windows. Ignore that.
self._MAPPING['\\Device\\Mup'] = None
for letter in (chr(l) for l in xrange(ord('C'), ord('Z')+1)):
try:
letter = '%s:' % letter
mapped = QueryDosDevice(letter)
# It can happen. Assert until we see it happens in the wild. In
# practice, prefer the lower drive letter.
assert mapped not in self._MAPPING
if mapped not in self._MAPPING:
self._MAPPING[mapped] = letter
except WindowsError: # pylint: disable=E0602
pass
def to_dos(self, path):
"""Converts a native NT path to DOS path."""
match = re.match(r'(^\\Device\\[a-zA-Z0-9]+)(\\.*)?$', path)
assert match, path
if not match.group(1) in self._MAPPING:
# Unmapped partitions may be accessed by windows for the
# fun of it while the test is running. Discard these.
return None
drive = self._MAPPING[match.group(1)]
if not drive or not match.group(2):
return drive
return drive + match.group(2)
def isabs(path):
"""Accepts X: as an absolute path, unlike python's os.path.isabs()."""
return os.path.isabs(path) or len(path) == 2 and path[1] == ':'
def get_native_path_case(path):
"""Returns the native path case for an existing file.
On Windows, removes any leading '\\?\'.
"""
assert isabs(path), path
# Windows used to have an option to turn on case sensitivity on non Win32
# subsystem but that's out of scope here and isn't supported anymore.
# Go figure why GetShortPathName() is needed.
path = GetLongPathName(GetShortPathName(path))
if path.startswith('\\\\?\\'):
return path[4:]
return path
def CommandLineToArgvW(command_line):
"""Splits a commandline into argv using CommandLineToArgvW()."""
# http://msdn.microsoft.com/library/windows/desktop/bb776391.aspx
size = c_int()
ptr = windll.shell32.CommandLineToArgvW(unicode(command_line), byref(size))
try:
return [arg for arg in (c_wchar_p * size.value).from_address(ptr)]
finally:
windll.kernel32.LocalFree(ptr)
elif sys.platform == 'darwin':
# On non-windows, keep the stdlib behavior.
isabs = os.path.isabs
def get_native_path_case(path):
"""Returns the native path case for an existing file."""
assert isabs(path), path
# Technically, it's only HFS+ on OSX that is case insensitive. It's the
# default setting on HFS+ but can be changed.
rel_ref, _ = Carbon.File.FSPathMakeRef(path)
return rel_ref.FSRefMakePath()
else: # OSes other than Windows and OSX.
# On non-windows, keep the stdlib behavior.
isabs = os.path.isabs
def get_native_path_case(path):
"""Returns the native path case for an existing file.
On OSes other than OSX and Windows, assume the file system is
case-sensitive.
TODO(maruel): This is not strictly true. Implement if necessary.
"""
assert isabs(path), path
# Give up on cygwin, as GetLongPathName() can't be called.
return path
def get_flavor():
"""Returns the system default flavor. Copied from gyp/pylib/gyp/common.py."""
flavors = {
'cygwin': 'win',
'win32': 'win',
'darwin': 'mac',
'sunos5': 'solaris',
'freebsd7': 'freebsd',
'freebsd8': 'freebsd',
}
return flavors.get(sys.platform, 'linux')
def isEnabledFor(level):
return logging.getLogger().isEnabledFor(level)
def fix_python_path(cmd):
"""Returns the fixed command line to call the right python executable."""
out = cmd[:]
if out[0] == 'python':
out[0] = sys.executable
elif out[0].endswith('.py'):
out.insert(0, sys.executable)
return out
def posix_relpath(path, root):
"""posix.relpath() that keeps trailing slash."""
out = posixpath.relpath(path, root)
if path.endswith('/'):
out += '/'
return out
def cleanup_path(x):
"""Cleans up a relative path. Converts any os.path.sep to '/' on Windows."""
if x:
x = x.rstrip(os.path.sep).replace(os.path.sep, '/')
if x == '.':
x = ''
if x:
x += '/'
return x
def process_quoted_arguments(text):
"""Extracts quoted arguments on a string and return the arguments as a list.
Implemented as an automaton. Supports incomplete strings in the form
'"foo"...'.
Example:
With text = '"foo", "bar"', the function will return ['foo', 'bar']
TODO(maruel): Implement escaping.
"""
# All the possible states of the DFA.
( NEED_QUOTE, # Begining of a new arguments.
INSIDE_STRING, # Inside an argument.
NEED_COMMA_OR_DOT, # Right after the closing quote of an argument. Could be
# a serie of 3 dots or a comma.
NEED_SPACE, # Right after a comma
NEED_DOT_2, # Found a dot, need a second one.
NEED_DOT_3, # Found second dot, need a third one.
NEED_COMMA, # Found third dot, need a comma.
) = range(7)
state = NEED_QUOTE
current_argument = ''
out = []
for i in text:
if i == '"':
if state == NEED_QUOTE:
state = INSIDE_STRING
elif state == INSIDE_STRING:
# The argument is now closed.
out.append(current_argument)
current_argument = ''
state = NEED_COMMA_OR_DOT
else:
assert False, text
elif i == ',':
if state in (NEED_COMMA_OR_DOT, NEED_COMMA):
state = NEED_SPACE
else:
assert False, text
elif i == ' ':
if state == NEED_SPACE:
state = NEED_QUOTE
if state == INSIDE_STRING:
current_argument += i
elif i == '.':
if state == NEED_COMMA_OR_DOT:
# The string is incomplete, this mean the strace -s flag should be
# increased.
state = NEED_DOT_2
elif state == NEED_DOT_2:
state = NEED_DOT_3
elif state == NEED_DOT_3:
state = NEED_COMMA
elif state == INSIDE_STRING:
current_argument += i
else:
assert False, text
else:
if state == INSIDE_STRING:
current_argument += i
else:
assert False, text
assert state in (NEED_COMMA, NEED_COMMA_OR_DOT)
return out
class ApiBase(object):
"""OS-agnostic API to trace a process and its children."""
class Context(object):
"""Processes one log line at a time and keeps the list of traced processes.
"""
class Process(object):
"""Keeps context for one traced child process.
Logs all the files this process touched. Ignores directories.
"""
def __init__(self, root, pid, initial_cwd, parentid):
"""root is a reference to the Context."""
assert isinstance(root, ApiBase.Context)
assert isinstance(pid, int), repr(pid)
self.root = weakref.ref(root)
self.pid = pid
# Children are pids.
self.children = []
self.parentid = parentid
self.initial_cwd = initial_cwd
self.cwd = None
self.files = set()
self.executable = None
self.command = None
if parentid:
self.root().processes[parentid].children.append(pid)
def to_results_process(self):
"""Resolves file case sensitivity and or late-bound strings."""
children = [
self.root().processes[c].to_results_process() for c in self.children
]
# When resolving files, it's normal to get dupe because a file could be
# opened multiple times with different case. Resolve the deduplication
# here.
def render_to_string_and_fix_case(x):
"""Returns the native file path case if the file exists.
Converts late-bound strings.
"""
if not x:
return x
# TODO(maruel): Do not upconvert to unicode here, on linux we don't
# know the file path encoding so they must be treated as bytes.
x = unicode(x)
if not os.path.exists(x):
return x
return get_native_path_case(x)
return Results.Process(
self.pid,
set(map(render_to_string_and_fix_case, self.files)),
render_to_string_and_fix_case(self.executable),
self.command,
render_to_string_and_fix_case(self.initial_cwd),
children)
def add_file(self, filepath):
if self.root().blacklist(unicode(filepath)):
return
logging.debug('add_file(%d, %s)' % (self.pid, filepath))
self.files.add(filepath)
def __init__(self, blacklist):
self.blacklist = blacklist
self.processes = {}
@staticmethod
def clean_trace(logname):
"""Deletes the old log."""
raise NotImplementedError()
@classmethod
def gen_trace(cls, cmd, cwd, logname, output):
"""Runs the OS-specific trace program on an executable.
Since the logs are per pid, we need to log the list of the initial pid.
"""
raise NotImplementedError(cls.__class__.__name__)
@classmethod
def parse_log(cls, filename, blacklist):
"""Processes a trace log and returns the files opened and the files that do
not exist.
It does not track directories.
Most of the time, files that do not exist are temporary test files that
should be put in /tmp instead. See http://crbug.com/116251.
Returns a tuple (existing files, non existing files, nb_processes_created)
"""
raise NotImplementedError(cls.__class__.__name__)
class Results(object):
"""Results of a trace session."""
class File(object):
"""A file that was accessed."""
def __init__(self, root, path):
"""Represents a file accessed. May not be present anymore."""
logging.debug('%s(%s, %s)' % (self.__class__.__name__, root, path))
self.root = root
self.path = path
self._size = None
# For compatibility with Directory object interface.
# Shouldn't be used normally, only exists to simplify algorithms.
self.nb_files = 1
assert path, path
assert bool(root) != bool(isabs(path)), (root, path)
assert (
not os.path.exists(self.full_path) or
self.full_path == get_native_path_case(self.full_path))
@property
def existent(self):
return self.size != -1
@property
def size(self):
"""File's size. -1 is not existent."""
if self._size is None:
try:
self._size = os.stat(self.full_path).st_size
except OSError:
self._size = -1
return self._size
@property
def full_path(self):
if self.root:
return os.path.join(self.root, self.path)
return self.path
def flatten(self):
return {
'path': self.path,
'size': self.size,
}
def strip_root(self, root):
"""Returns a clone of itself with 'root' stripped off."""
assert isabs(root) and root.endswith(os.path.sep), root
if not self.full_path.startswith(root):
return None
out = self.__class__(root, self.full_path[len(root):])
# Keep size cache.
out._size = self._size
return out
class Directory(File):
"""A directory of files. Must exist."""
def __init__(self, root, path, size, nb_files):
"""path='.' is a valid value and must be handled appropriately."""
super(Results.Directory, self).__init__(root, path)
assert not self.path.endswith(os.path.sep)
self.path = self.path + os.path.sep
self.nb_files = nb_files
self._size = size
def flatten(self):
out = super(Results.Directory, self).flatten()
out['nb_files'] = self.nb_files
return out
class Process(object):
"""A process that was traced.
Contains references to the files accessed by this process and its children.
"""
def __init__(
self, pid, files, executable, command, initial_cwd, children):
logging.debug('Process(%s, %d, ...)' % (pid, len(files)))
self.pid = pid
self.files = sorted(
(Results.File(None, f) for f in files), key=lambda x: x.path)
assert len(set(f.path for f in self.files)) == len(self.files), [
f.path for f in self.files]
assert isinstance(children, list)
assert isinstance(self.files, list)
self.children = children
self.executable = executable
self.command = command
self.initial_cwd = initial_cwd
@property
def all(self):
for child in self.children:
for i in child.all:
yield i
yield self
def flatten(self):
return {
'children': [c.flatten() for c in self.children],
'command': self.command,
'executable': self.executable,
'files': [f.flatten() for f in self.files],
'initial_cwd': self.initial_cwd,
'pid': self.pid,
}
def strip_root(self, root):
assert isabs(root) and root.endswith(os.path.sep), root
out = self.__class__(
self.pid,
[],
self.executable,
self.command,
self.initial_cwd,
[c.strip_root(root) for c in self.children])
# Override the files property.
out.files = filter(None, (f.strip_root(root) for f in self.files))
logging.debug(
'strip_root(%s) %d -> %d' % (root, len(self.files), len(out.files)))
return out
def __init__(self, process):
self.process = process
# Cache.
self._files = None
def flatten(self):
return {
'root': self.process.flatten(),
}
@property
def files(self):
if self._files is None:
self._files = sorted(
sum((p.files for p in self.process.all), []),
key=lambda x: x.path)
return self._files
@property
def existent(self):
return [f for f in self.files if f.existent]
@property
def non_existent(self):
return [f for f in self.files if not f.existent]
def strip_root(self, root):
"""Returns a clone with all the files outside the directory |root| removed
and converts all the path to be relative paths.
"""
root = get_native_path_case(root).rstrip(os.path.sep) + os.path.sep
logging.debug('strip_root(%s)' % root)
return Results(self.process.strip_root(root))
def extract_directories(files):
"""Detects if all the files in a directory are in |files| and if so, replace
the individual files by a Results.Directory instance.
Takes an array of Results.File instances and returns an array of
Results.File and Results.Directory instances.
"""
assert not any(isinstance(f, Results.Directory) for f in files)
# Remove non existent files.
files = [f for f in files if f.existent]
if not files:
return files
# All files must share the same root, which can be None.
assert len(set(f.root for f in files)) == 1, set(f.root for f in files)
def blacklist(f):
return f in ('.git', '.svn') or f.endswith('.pyc')
# Creates a {directory: {filename: File}} mapping, up to root.
root = files[0].root
assert root.endswith(os.path.sep)
buckets = {}
if root:
buckets[root.rstrip(os.path.sep)] = {}
for fileobj in files:
path = fileobj.full_path
directory = os.path.dirname(path)
# Do not use os.path.basename() so trailing os.path.sep is kept.
basename = path[len(directory)+1:]
files_in_directory = buckets.setdefault(directory, {})
files_in_directory[basename] = fileobj
# Add all the directories recursively up to root.
while True:
old_d = directory
directory = os.path.dirname(directory)
if directory + os.path.sep == root or directory == old_d:
break
buckets.setdefault(directory, {})
for directory in sorted(buckets, reverse=True):
actual = set(f for f in os.listdir(directory) if not blacklist(f))
expected = set(buckets[directory])
if not (actual - expected):
parent = os.path.dirname(directory)
buckets[parent][os.path.basename(directory)] = Results.Directory(
root,
directory[len(root):],
sum(f.size for f in buckets[directory].itervalues()),
sum(f.nb_files for f in buckets[directory].itervalues()))
# Remove the whole bucket.
del buckets[directory]
# Reverse the mapping with what remains. The original instances are returned,
# so the cached meta data is kept.
return sorted(
sum((x.values() for x in buckets.itervalues()), []),
key=lambda x: x.path)
class Strace(ApiBase):
"""strace implies linux."""
IGNORED = (
'/bin',
'/dev',
'/etc',
'/lib',
'/proc',
'/sys',
'/tmp',
'/usr',
'/var',
)
class Context(ApiBase.Context):
"""Processes a strace log line and keeps the list of existent and non
existent files accessed.
Ignores directories.
Uses late-binding to processes the cwd of each process. The problem is that
strace generates one log file per process it traced but doesn't give any
information about which process was started when and by who. So we don't
even know which process is the initial one. So process the logs out of
order and use late binding with RelativePath to be able to deduce the
initial directory of each process once all the logs are parsed.
"""
class Process(ApiBase.Context.Process):
"""Represents the state of a process.
Contains all the information retrieved from the pid-specific log.
"""
# Function names are using ([a-z_0-9]+)
# This is the most common format. function(args) = result
RE_HEADER = re.compile(r'^([a-z_0-9]+)\((.+?)\)\s+= (.+)$')
# An interrupted function call, only grab the minimal header.
RE_UNFINISHED = re.compile(r'^([^\(]+)(.*) \<unfinished \.\.\.\>$')
# A resumed function call.
RE_RESUMED = re.compile(r'^<\.\.\. ([^ ]+) resumed> (.+)$')
# A process received a signal.
RE_SIGNAL = re.compile(r'^--- SIG[A-Z]+ .+ ---')
# A process didn't handle a signal.
RE_KILLED = re.compile(r'^\+\+\+ killed by ([A-Z]+) \+\+\+$')
# A call was canceled.
RE_UNAVAILABLE = re.compile(r'\)\s+= \? <unavailable>$')
# Happens when strace fails to even get the function name.
UNNAMED_FUNCTION = '????'
# Arguments parsing.
RE_CHDIR = re.compile(r'^\"(.+?)\"$')
RE_EXECVE = re.compile(r'^\"(.+?)\", \[(.+)\], \[\/\* \d\d vars \*\/\]$')
RE_OPEN2 = re.compile(r'^\"(.*?)\", ([A-Z\_\|]+)$')
RE_OPEN3 = re.compile(r'^\"(.*?)\", ([A-Z\_\|]+), (\d+)$')
RE_RENAME = re.compile(r'^\"(.+?)\", \"(.+?)\"$')
class RelativePath(object):
"""A late-bound relative path."""
def __init__(self, parent, value):
self.parent = parent
self.value = value
def render(self):
"""Returns the current directory this instance is representing.
This function is used to return the late-bound value.
"""
if self.value and self.value.startswith(u'/'):
# An absolute path.
return self.value
parent = self.parent.render() if self.parent else u'<None>'
if self.value:
return os.path.normpath(os.path.join(parent, self.value))
return parent
def __unicode__(self):
"""Acts as a string whenever needed."""
return unicode(self.render())
def __str__(self):
"""Acts as a string whenever needed."""
return str(self.render())
def __init__(self, root, pid):
super(Strace.Context.Process, self).__init__(root, pid, None, None)
# The dict key is the function name of the pending call, like 'open'
# or 'execve'.
self._pending_calls = {}
self._line_number = 0
# Current directory when the process started.
self.initial_cwd = self.RelativePath(self.root(), None)
def get_cwd(self):
"""Returns the best known value of cwd."""
return self.cwd or self.initial_cwd
def render(self):
"""Returns the string value of the RelativePath() object.
Used by RelativePath. Returns the initial directory and not the
current one since the current directory 'cwd' validity is time-limited.
The validity is only guaranteed once all the logs are processed.
"""
return self.initial_cwd.render()
def on_line(self, line):
self._line_number += 1
if self.RE_SIGNAL.match(line):
# Ignore signals.
return
match = self.RE_KILLED.match(line)
if match:
self.handle_exit_group(match.group(1), None, None)
return
match = self.RE_UNFINISHED.match(line)
if match:
assert match.group(1) not in self._pending_calls
self._pending_calls[match.group(1)] = match.group(1) + match.group(2)
return
match = self.RE_UNAVAILABLE.match(line)
if match:
# This usually means a process was killed and a pending call was
# canceled.
# TODO(maruel): Look up the last exit_group() trace just above and
# make sure any self._pending_calls[anything] is properly flushed.
return
match = self.RE_RESUMED.match(line)
if match:
assert match.group(1) in self._pending_calls, self._pending_calls
pending = self._pending_calls.pop(match.group(1))
# Reconstruct the line.
line = pending + match.group(2)
match = self.RE_HEADER.match(line)
assert match, (self.pid, self._line_number, line)
if match.group(1) == self.UNNAMED_FUNCTION:
return
handler = getattr(self, 'handle_%s' % match.group(1), None)
assert handler, (self.pid, self._line_number, line)
try:
return handler(
match.group(1),
match.group(2),
match.group(3))
except Exception:
print >> sys.stderr, (self.pid, self._line_number, line)
raise
def handle_chdir(self, _function, args, result):
"""Updates cwd."""
assert result.startswith('0'), 'Unexecpected fail: %s' % result
cwd = self.RE_CHDIR.match(args).group(1)
self.cwd = self.RelativePath(self, cwd)
logging.debug('handle_chdir(%d, %s)' % (self.pid, self.cwd))
def handle_clone(self, _function, _args, result):
"""Transfers cwd."""
if result == '? ERESTARTNOINTR (To be restarted)':
return
# Update the other process right away.
childpid = int(result)
child = self.root().get_or_set_proc(childpid)
# Copy the cwd object.
child.initial_cwd = self.get_cwd()
assert child.parentid is None
child.parentid = self.pid
# It is necessary because the logs are processed out of order.
assert childpid not in self.children
self.children.append(childpid)
def handle_close(self, _function, _args, _result):
pass
def handle_execve(self, _function, args, result):
if result != '0':
return
m = self.RE_EXECVE.match(args)
filepath = m.group(1)
self._handle_file(filepath, result)
self.executable = self.RelativePath(self.get_cwd(), filepath)
self.command = process_quoted_arguments(m.group(2))
def handle_exit_group(self, _function, _args, _result):
"""Removes cwd."""
self.cwd = None
@staticmethod
def handle_fork(_function, args, result):
assert False, (args, result)
def handle_open(self, _function, args, result):
args = (self.RE_OPEN3.match(args) or self.RE_OPEN2.match(args)).groups()
if 'O_DIRECTORY' in args[1]:
return
self._handle_file(args[0], result)
def handle_rename(self, _function, args, result):
args = self.RE_RENAME.match(args).groups()
self._handle_file(args[0], result)
self._handle_file(args[1], result)
@staticmethod
def handle_stat64(_function, args, result):
assert False, (args, result)
@staticmethod
def handle_vfork(_function, args, result):
assert False, (args, result)
def _handle_file(self, filepath, result):
if result.startswith('-1'):
return
filepath = self.RelativePath(self.get_cwd(), filepath)
self.add_file(filepath)
def __init__(self, blacklist, initial_cwd):
super(Strace.Context, self).__init__(blacklist)
self.initial_cwd = initial_cwd
def render(self):
"""Returns the string value of the initial cwd of the root process.
Used by RelativePath.
"""
return self.initial_cwd
def on_line(self, pid, line):
self.get_or_set_proc(pid).on_line(line.strip())
def to_results(self):
"""Finds back the root process and verify consistency."""
# TODO(maruel): Absolutely unecessary, fix me.
root = [p for p in self.processes.itervalues() if not p.parentid]
assert len(root) == 1
process = root[0].to_results_process()
assert sorted(self.processes) == sorted(p.pid for p in process.all)
return Results(process)
def get_or_set_proc(self, pid):
"""Returns the Context.Process instance for this pid or creates a new one.
"""
assert isinstance(pid, int) and pid
return self.processes.setdefault(pid, self.Process(self, pid))
@classmethod
def traces(cls):
prefix = 'handle_'
return [i[len(prefix):] for i in dir(cls.Process) if i.startswith(prefix)]
@staticmethod
def clean_trace(logname):
if os.path.isfile(logname):
os.remove(logname)
# Also delete any pid specific file from previous traces.
for i in glob.iglob(logname + '.*'):
if i.rsplit('.', 1)[1].isdigit():
os.remove(i)
@classmethod
def gen_trace(cls, cmd, cwd, logname, output):
"""Runs strace on an executable.
Since the logs are per pid, we need to log the list of the initial pid.
"""
logging.info('gen_trace(%s, %s, %s, %s)' % (cmd, cwd, logname, output))
stdout = stderr = None
if output:
stdout = subprocess.PIPE
stderr = subprocess.STDOUT
traces = ','.join(cls.Context.traces())
trace_cmd = [
'strace',
'-ff',
'-s', '256',
'-e', 'trace=%s' % traces,
'-o', logname,
]
child = subprocess.Popen(
trace_cmd + cmd,
cwd=cwd,
stdin=subprocess.PIPE,
stdout=stdout,
stderr=stderr)
out = child.communicate()[0]
# Once it's done, write metadata into the log file to be able to follow the
# pid files.
assert not os.path.isfile(logname)
with open(logname, 'wb') as f:
json.dump(
{
'cwd': cwd,
# The pid of strace process, not very useful.
'pid': child.pid,
},
f)
return child.returncode, out
@classmethod
def parse_log(cls, filename, blacklist):
logging.info('parse_log(%s, %s)' % (filename, blacklist))
with open(filename, 'r') as f:
data = json.load(f)
context = cls.Context(blacklist, data['cwd'])
for pidfile in glob.iglob(filename + '.*'):
pid = pidfile.rsplit('.', 1)[1]
if pid.isdigit():
pid = int(pid)
# TODO(maruel): Load as utf-8
for line in open(pidfile, 'rb'):
context.on_line(pid, line)
return context.to_results()
class Dtrace(ApiBase):
"""Uses DTrace framework through dtrace. Requires root access.
Implies Mac OSX.
dtruss can't be used because it has compatibility issues with python.
Also, the pid->cwd handling needs to be done manually since OSX has no way to
get the absolute path of the 'cwd' dtrace variable from the probe.
Also, OSX doesn't populate curpsinfo->pr_psargs properly, see
https://discussions.apple.com/thread/1980539.
"""
IGNORED = (
'/.vol',
'/Library',
'/System',
'/dev',
'/etc',
'/private/var',
'/tmp',
'/usr',
'/var',
)
# pylint: disable=C0301
# To understand the following code, you'll want to take a look at:
# http://developers.sun.com/solaris/articles/dtrace_quickref/dtrace_quickref.html
# https://wikis.oracle.com/display/DTrace/Variables
# http://docs.oracle.com/cd/E19205-01/820-4221/
#
# The list of valid probes can be retrieved with:
# sudo dtrace -l -P syscall | less
D_CODE = """
proc:::start /trackedpid[ppid]/ {
trackedpid[pid] = 1;
current_processes += 1;
printf("%d %d:%d %s_%s(\\"%s\\", %d) = 0\\n",
logindex, ppid, pid, probeprov, probename, execname,
current_processes);
logindex++;
}
proc:::exit /trackedpid[pid] && current_processes == 1/ {
trackedpid[pid] = 0;
current_processes -= 1;
printf("%d %d:%d %s_%s(\\"%s\\", %d) = 0\\n",
logindex, ppid, pid, probeprov, probename, execname,
current_processes);
logindex++;
exit(0);
}
proc:::exit /trackedpid[pid]/ {
trackedpid[pid] = 0;
current_processes -= 1;
printf("%d %d:%d %s_%s(\\"%s\\", %d) = 0\\n",
logindex, ppid, pid, probeprov, probename, execname,
current_processes);
logindex++;
}
/* Finally what we care about! */
syscall::exec*:entry /trackedpid[pid]/ {
self->e_arg0 = copyinstr(arg0);
/* Incrementally probe for a NULL in the argv parameter of execve() to
* figure out argc. */
self->argc = 0;
self->argv = (user_addr_t*)copyin(
arg1, sizeof(user_addr_t) * (self->argc + 1));
self->argc = self->argv[self->argc] ? (self->argc + 1) : self->argc;
self->argv = (user_addr_t*)copyin(
arg1, sizeof(user_addr_t) * (self->argc + 1));
self->argc = self->argv[self->argc] ? (self->argc + 1) : self->argc;
self->argv = (user_addr_t*)copyin(
arg1, sizeof(user_addr_t) * (self->argc + 1));
self->argc = self->argv[self->argc] ? (self->argc + 1) : self->argc;
self->argv = (user_addr_t*)copyin(
arg1, sizeof(user_addr_t) * (self->argc + 1));
self->argc = self->argv[self->argc] ? (self->argc + 1) : self->argc;
self->argv = (user_addr_t*)copyin(
arg1, sizeof(user_addr_t) * (self->argc + 1));
self->argc = self->argv[self->argc] ? (self->argc + 1) : self->argc;
self->argv = (user_addr_t*)copyin(
arg1, sizeof(user_addr_t) * (self->argc + 1));
self->argc = self->argv[self->argc] ? (self->argc + 1) : self->argc;
self->argv = (user_addr_t*)copyin(
arg1, sizeof(user_addr_t) * (self->argc + 1));
/* Copy the inputs strings since there is no guarantee they'll be
* present after the call completed. */
self->args[0] = (self->argc > 0) ? copyinstr(self->argv[0]) : "";
self->args[1] = (self->argc > 1) ? copyinstr(self->argv[1]) : "";
self->args[2] = (self->argc > 2) ? copyinstr(self->argv[2]) : "";
self->args[3] = (self->argc > 3) ? copyinstr(self->argv[3]) : "";
self->args[4] = (self->argc > 4) ? copyinstr(self->argv[4]) : "";
self->args[5] = (self->argc > 5) ? copyinstr(self->argv[5]) : "";
self->args[6] = (self->argc > 6) ? copyinstr(self->argv[6]) : "";
self->args[7] = (self->argc > 7) ? copyinstr(self->argv[7]) : "";
self->args[8] = (self->argc > 8) ? copyinstr(self->argv[8]) : "";
self->args[9] = (self->argc > 9) ? copyinstr(self->argv[9]) : "";
}
syscall::exec*: /trackedpid[pid] && errno == 0/ {
/* We need to join strings here, as using multiple printf() would cause
* tearing when multiple threads/processes are traced. */
this->args = "";
this->args = strjoin(this->args, (self->argc > 0) ? ", \\"" : "");
this->args = strjoin(this->args, (self->argc > 0) ? self->args[0] : "");
this->args = strjoin(this->args, (self->argc > 0) ? "\\"" : "");
this->args = strjoin(this->args, (self->argc > 1) ? ", \\"" : "");
this->args = strjoin(this->args, (self->argc > 1) ? self->args[1] : "");
this->args = strjoin(this->args, (self->argc > 1) ? "\\"" : "");
this->args = strjoin(this->args, (self->argc > 2) ? ", \\"" : "");
this->args = strjoin(this->args, (self->argc > 2) ? self->args[2] : "");
this->args = strjoin(this->args, (self->argc > 2) ? "\\"" : "");
this->args = strjoin(this->args, (self->argc > 3) ? ", \\"" : "");
this->args = strjoin(this->args, (self->argc > 3) ? self->args[3] : "");
this->args = strjoin(this->args, (self->argc > 3) ? "\\"" : "");
this->args = strjoin(this->args, (self->argc > 4) ? ", \\"" : "");
this->args = strjoin(this->args, (self->argc > 4) ? self->args[4] : "");
this->args = strjoin(this->args, (self->argc > 4) ? "\\"" : "");
this->args = strjoin(this->args, (self->argc > 5) ? ", \\"" : "");
this->args = strjoin(this->args, (self->argc > 5) ? self->args[5] : "");
this->args = strjoin(this->args, (self->argc > 5) ? "\\"" : "");
this->args = strjoin(this->args, (self->argc > 6) ? ", \\"" : "");
this->args = strjoin(this->args, (self->argc > 6) ? self->args[6] : "");
this->args = strjoin(this->args, (self->argc > 6) ? "\\"" : "");
this->args = strjoin(this->args, (self->argc > 7) ? ", \\"" : "");
this->args = strjoin(this->args, (self->argc > 7) ? self->args[7] : "");
this->args = strjoin(this->args, (self->argc > 7) ? "\\"" : "");
this->args = strjoin(this->args, (self->argc > 8) ? ", \\"" : "");
this->args = strjoin(this->args, (self->argc > 8) ? self->args[8] : "");
this->args = strjoin(this->args, (self->argc > 8) ? "\\"" : "");
this->args = strjoin(this->args, (self->argc > 9) ? ", \\"" : "");
this->args = strjoin(this->args, (self->argc > 9) ? self->args[9]: "");
this->args = strjoin(this->args, (self->argc > 9) ? "\\"" : "");
/* Prints self->argc to permits verifying the internal consistency since
* this code is quite fishy. */
printf("%d %d:%d %s(\\"%s\\", [%d%s]) = %d\\n",
logindex, ppid, pid, probefunc,
self->e_arg0,
self->argc,
this->args,
errno);
logindex++;
/* TODO(maruel): Clean up memory
self->e_arg0 = 0;
self->argc = 0;
self->args[0] = 0;
self->args[1] = 0;
self->args[2] = 0;
self->args[3] = 0;
self->args[4] = 0;
self->args[5] = 0;
self->args[6] = 0;
self->args[7] = 0;
self->args[8] = 0;
self->args[9] = 0;
*/
}
syscall::open*:entry /trackedpid[pid]/ {
self->arg0 = arg0;
self->arg1 = arg1;
self->arg2 = arg2;
}
syscall::open*:return /trackedpid[pid] && errno == 0/ {
printf("%d %d:%d %s(\\"%s\\", %d, %d) = %d\\n",
logindex, ppid, pid, probefunc, copyinstr(self->arg0),
self->arg1, self->arg2, errno);
logindex++;
self->arg0 = 0;
self->arg1 = 0;
self->arg2 = 0;
}
syscall::rename:entry /trackedpid[pid]/ {
self->arg0 = arg0;
self->arg1 = arg1;
}
syscall::rename:return /trackedpid[pid]/ {
printf("%d %d:%d %s(\\"%s\\", \\"%s\\") = %d\\n",
logindex, ppid, pid, probefunc, copyinstr(self->arg0),
copyinstr(self->arg1), errno);
logindex++;
self->arg0 = 0;
self->arg1 = 0;
}
/* Track chdir, it's painful because it is only receiving relative path */
syscall::chdir:entry /trackedpid[pid]/ {
self->arg0 = arg0;
}
syscall::chdir:return /trackedpid[pid] && errno == 0/ {
printf("%d %d:%d %s(\\"%s\\") = %d\\n",
logindex, ppid, pid, probefunc, copyinstr(self->arg0), errno);
logindex++;
self->arg0 = 0;
}
/* TODO(maruel): *stat* functions and friends
syscall::access:return,
syscall::chdir:return,
syscall::chflags:return,
syscall::chown:return,
syscall::chroot:return,
syscall::getattrlist:return,
syscall::getxattr:return,
syscall::lchown:return,
syscall::lstat64:return,
syscall::lstat:return,
syscall::mkdir:return,
syscall::pathconf:return,
syscall::readlink:return,
syscall::removexattr:return,
syscall::setxattr:return,
syscall::stat64:return,
syscall::stat:return,
syscall::truncate:return,
syscall::unlink:return,
syscall::utimes:return,
*/
"""
@classmethod
def code(cls, pid, cwd):
"""Setups the D code to implement child process tracking.
Injects the pid and the initial cwd into the trace header for context.
The reason is that the child process is already running at that point so:
- no proc_start() is logged for it.
- there is no way to figure out the absolute path of cwd in kernel on OSX
Since the child process is already started, initialize current_processes to
1.
"""
pid = str(pid)
cwd = os.path.realpath(cwd).replace('\\', '\\\\').replace('%', '%%')
return (
'dtrace:::BEGIN {\n'
' current_processes = 1;\n'
' logindex = 0;\n'
' trackedpid[' + pid + '] = 1;\n'
' printf("%d %d:%d %s_%s(\\"' + cwd + '\\") = 0\\n",\n'
' logindex, ppid, ' + pid + ', probeprov, probename);\n'
' logindex++;\n'
'}\n') + cls.D_CODE
class Context(ApiBase.Context):
# This is the most common format. index pid function(args) = result
RE_HEADER = re.compile(r'^\d+ (\d+):(\d+) ([a-zA-Z_\-]+)\((.*?)\) = (.+)$')
# Arguments parsing.
RE_DTRACE_BEGIN = re.compile(r'^\"(.+?)\"$')
RE_CHDIR = re.compile(r'^\"(.+?)\"$')
RE_EXECVE = re.compile(r'^\"(.+?)\", \[(\d+), (.+)\]$')
RE_OPEN = re.compile(r'^\"(.+?)\", (\d+), (-?\d+)$')
RE_RENAME = re.compile(r'^\"(.+?)\", \"(.+?)\"$')
O_DIRECTORY = 0x100000
class Process(ApiBase.Context.Process):
pass
def __init__(self, blacklist):
super(Dtrace.Context, self).__init__(blacklist)
# Process ID of the trace_child_process.py wrapper script instance.
self._tracer_pid = None
# First process to be started by self._tracer_pid.
self._initial_pid = None
self._initial_cwd = None
def on_line(self, line):
match = self.RE_HEADER.match(line)
assert match, line
fn = getattr(
self,
'handle_%s' % match.group(3).replace('-', '_'),
self._handle_ignored)
return fn(
int(match.group(1)),
int(match.group(2)),
match.group(3),
match.group(4),
match.group(5))
def to_results(self):
"""Uses self._initial_pid to determine the initial process."""
process = self.processes[self._initial_pid].to_results_process()
assert sorted(self.processes) == sorted(p.pid for p in process.all), (
sorted(self.processes), sorted(p.pid for p in process.all))
return Results(process)
def handle_dtrace_BEGIN(self, _ppid, pid, _function, args, _result):
assert not self._tracer_pid and not self._initial_pid
self._tracer_pid = pid
self._initial_cwd = self.RE_DTRACE_BEGIN.match(args).group(1)
def handle_proc_start(self, ppid, pid, _function, _args, result):
"""Transfers cwd.
The dtrace script already takes care of only tracing the processes that
are child of the traced processes so there is no need to verify the
process hierarchy.
"""
assert result == '0'
assert pid not in self.processes
assert (ppid == self._tracer_pid) != bool(self._initial_pid)
if not self._initial_pid:
self._initial_pid = pid
ppid = None
cwd = self._initial_cwd
else:
parent = self.processes[ppid]
cwd = parent.cwd
proc = self.processes[pid] = self.Process(self, pid, cwd, ppid)
proc.cwd = cwd
logging.debug('New child: %s -> %d' % (ppid, pid))
def handle_proc_exit(self, _ppid, pid, _function, _args, _result):
"""Removes cwd."""
if pid != self._tracer_pid:
# self._tracer_pid is not traced itself.
self.processes[pid].cwd = None
def handle_execve(self, _ppid, pid, _function, args, _result):
"""Sets the process' executable.
TODO(maruel): Read command line arguments. See
https://discussions.apple.com/thread/1980539 for an example.
https://gist.github.com/1242279
Will have to put the answer at http://stackoverflow.com/questions/7556249.
:)
"""
match = self.RE_EXECVE.match(args)
assert match, args
proc = self.processes[pid]
proc.executable = match.group(1)
proc.command = process_quoted_arguments(match.group(3))
assert int(match.group(2)) == len(proc.command), args
def handle_chdir(self, _ppid, pid, _function, args, result):
"""Updates cwd."""
assert self._tracer_pid
assert pid in self.processes
if result.startswith('0'):
cwd = self.RE_CHDIR.match(args).group(1)
if not cwd.startswith('/'):
cwd2 = os.path.join(self.processes[pid].cwd, cwd)
logging.debug('handle_chdir(%d, %s) -> %s' % (pid, cwd, cwd2))
else:
logging.debug('handle_chdir(%d, %s)' % (pid, cwd))
cwd2 = cwd
self.processes[pid].cwd = cwd2
else:
assert False, 'Unexecpected fail: %s' % result
def handle_open_nocancel(self, ppid, pid, function, args, result):
return self.handle_open(ppid, pid, function, args, result)
def handle_open(self, _ppid, pid, function, args, result):
match = self.RE_OPEN.match(args)
assert match, (pid, function, args, result)
args = match.groups()
flag = int(args[1])
if self.O_DIRECTORY & flag == self.O_DIRECTORY:
# Ignore directories.
return
self._handle_file(pid, args[0], result)
def handle_rename(self, _ppid, pid, _function, args, result):
args = self.RE_RENAME.match(args).groups()
self._handle_file(pid, args[0], result)
self._handle_file(pid, args[1], result)
def _handle_file(self, pid, filepath, result):
if result.startswith(('-1', '2')):
return
if not filepath.startswith('/'):
filepath = os.path.join(self.processes[pid].cwd, filepath)
# We can get '..' in the path.
filepath = os.path.normpath(filepath)
# Sadly, still need to filter out directories here;
# saw open_nocancel(".", 0, 0) = 0 lines.
if os.path.isdir(filepath):
return
self.processes[pid].add_file(filepath)
@staticmethod
def _handle_ignored(_ppid, pid, function, args, result):
logging.debug('%d %s(%s) = %s' % (pid, function, args, result))
@staticmethod
def clean_trace(logname):
if os.path.isfile(logname):
os.remove(logname)
@classmethod
def gen_trace(cls, cmd, cwd, logname, output):
"""Runs dtrace on an executable.
This dtruss is broken when it starts the process itself or when tracing
child processes, this code starts a wrapper process trace_child_process.py,
which waits for dtrace to start, then trace_child_process.py starts the
executable to trace.
"""
logging.info('gen_trace(%s, %s, %s, %s)' % (cmd, cwd, logname, output))
logging.info('Running: %s' % cmd)
signal = 'Go!'
logging.debug('Our pid: %d' % os.getpid())
# Part 1: start the child process.
stdout = stderr = None
if output:
stdout = subprocess.PIPE
stderr = subprocess.STDOUT
child_cmd = [
sys.executable, os.path.join(BASE_DIR, 'trace_child_process.py'),
]
child = subprocess.Popen(
child_cmd + cmd,
stdin=subprocess.PIPE,
stdout=stdout,
stderr=stderr,
cwd=cwd)
logging.debug('Started child pid: %d' % child.pid)
# Part 2: start dtrace process.
# Note: do not use the -p flag. It's useless if the initial process quits
# too fast, resulting in missing traces from the grand-children. The D code
# manages the dtrace lifetime itself.
trace_cmd = [
'sudo',
'dtrace',
'-x', 'dynvarsize=4m',
'-x', 'evaltime=exec',
'-n', cls.code(child.pid, cwd),
'-o', '/dev/stderr',
'-q',
]
with open(logname, 'w') as logfile:
dtrace = subprocess.Popen(
trace_cmd, stdout=logfile, stderr=subprocess.STDOUT)
logging.debug('Started dtrace pid: %d' % dtrace.pid)
# Part 3: Read until one line is printed, which signifies dtrace is up and
# ready.
with open(logname, 'r') as logfile:
while 'dtrace_BEGIN' not in logfile.readline():
if dtrace.poll() is not None:
break
try:
# Part 4: We can now tell our child to go.
# TODO(maruel): Another pipe than stdin could be used instead. This would
# be more consistent with the other tracing methods.
out = child.communicate(signal)[0]
dtrace.wait()
if dtrace.returncode != 0:
print 'dtrace failure: %d' % dtrace.returncode
with open(logname) as logfile:
print ''.join(logfile.readlines()[-100:])
# Find a better way.
os.remove(logname)
else:
# Short the log right away to simplify our life. There isn't much
# advantage in keeping it out of order.
cls._sort_log(logname)
except KeyboardInterrupt:
# Still sort when testing.
cls._sort_log(logname)
raise
return dtrace.returncode or child.returncode, out
@classmethod
def parse_log(cls, filename, blacklist):
logging.info('parse_log(%s, %s)' % (filename, blacklist))
context = cls.Context(blacklist)
for line in open(filename, 'rb'):
context.on_line(line)
return context.to_results()
@staticmethod
def _sort_log(logname):
"""Sorts the log back in order when each call occured.
dtrace doesn't save the buffer in strict order since it keeps one buffer per
CPU.
"""
with open(logname, 'rb') as logfile:
lines = [l for l in logfile if l.strip()]
errors = [l for l in lines if l.startswith('dtrace:')]
if errors:
print >> sys.stderr, 'Failed to load: %s' % logname
print >> sys.stderr, '\n'.join(errors)
assert not errors, errors
try:
lines = sorted(lines, key=lambda l: int(l.split(' ', 1)[0]))
except ValueError:
print >> sys.stderr, 'Failed to load: %s' % logname
print >> sys.stderr, '\n'.join(lines)
raise
with open(logname, 'wb') as logfile:
logfile.write(''.join(lines))
class LogmanTrace(ApiBase):
"""Uses the native Windows ETW based tracing functionality to trace a child
process.
Caveat: this implementations doesn't track cwd or initial_cwd.
"""
class Context(ApiBase.Context):
"""Processes a ETW log line and keeps the list of existent and non
existent files accessed.
Ignores directories.
"""
# Only the useful headers common to all entries are listed there. Any column
# at 19 or higher is dependent on the specific event.
EVENT_NAME = 0
TYPE = 1
PID = 9
TID = 10
PROCESSOR_ID = 11
TIMESTAMP = 16
class Process(ApiBase.Context.Process):
def __init__(self, *args):
super(LogmanTrace.Context.Process, self).__init__(*args)
# Handle file objects that succeeded.
self.file_objects = {}
def __init__(self, blacklist):
super(LogmanTrace.Context, self).__init__(blacklist)
self._drive_map = DosDriveMap()
# Threads mapping to the corresponding process id.
self._threads_active = {}
# Process ID of the tracer, e.g. tracer_inputs.py
self._tracer_pid = None
# First process to be started by self._tracer_pid.
self._initial_pid = None
self._line_number = 0
def on_csv_line(self, line):
"""Processes a CSV Event line."""
# So much white space!
line = [i.strip() for i in line]
self._line_number += 1
if self._line_number == 1:
assert line == [
u'Event Name',
u'Type',
u'Event ID',
u'Version',
u'Channel',
u'Level', # 5
u'Opcode',
u'Task',
u'Keyword',
u'PID',
u'TID', # 10
u'Processor Number',
u'Instance ID',
u'Parent Instance ID',
u'Activity ID',
u'Related Activity ID', # 15
u'Clock-Time',
u'Kernel(ms)', # Both have a resolution of ~15ms which makes them
u'User(ms)', # pretty much useless.
u'User Data',
]
return
# As you can see, the CSV is full of useful non-redundant information:
# Event ID
assert line[2] == '0'
# Version
assert line[3] in ('2', '3'), line[3]
# Channel
assert line[4] == '0'
# Level
assert line[5] == '0'
# Task
assert line[7] == '0'
# Keyword
assert line[8] == '0x0000000000000000'
# Instance ID
assert line[12] == ''
# Parent Instance ID
assert line[13] == ''
# Activity ID
assert line[14] == '{00000000-0000-0000-0000-000000000000}'
# Related Activity ID
assert line[15] == ''
if line[0].startswith('{'):
# Skip GUIDs.
return
# Convert the PID in-place from hex.
line[self.PID] = int(line[self.PID], 16)
# By Opcode
handler = getattr(
self,
'handle_%s_%s' % (line[self.EVENT_NAME], line[self.TYPE]),
None)
if not handler:
# Try to get an universal fallback
handler = getattr(self, 'handle_%s_Any' % line[self.EVENT_NAME], None)
if handler:
handler(line)
else:
assert False, '%s_%s' % (line[self.EVENT_NAME], line[self.TYPE])
def to_results(self):
"""Uses self._initial_pid to determine the initial process."""
process = self.processes[self._initial_pid].to_results_process()
assert sorted(self.processes) == sorted(p.pid for p in process.all), (
sorted(self.processes), sorted(p.pid for p in process.all))
return Results(process)
def _thread_to_process(self, tid):
"""Finds the process from the thread id."""
tid = int(tid, 16)
return self.processes.get(self._threads_active.get(tid))
@staticmethod
def handle_EventTrace_Header(line):
"""Verifies no event was dropped, e.g. no buffer overrun occured."""
#BUFFER_SIZE = 19
#VERSION = 20
#PROVIDER_VERSION = 21
#NUMBER_OF_PROCESSORS = 22
#END_TIME = 23
#TIMER_RESOLUTION = 24
#MAX_FILE_SIZE = 25
#LOG_FILE_MODE = 26
#BUFFERS_WRITTEN = 27
#START_BUFFERS = 28
#POINTER_SIZE = 29
EVENTS_LOST = 30
#CPU_SPEED = 31
#LOGGER_NAME = 32
#LOG_FILE_NAME = 33
#BOOT_TIME = 34
#PERF_FREQ = 35
#START_TIME = 36
#RESERVED_FLAGS = 37
#BUFFERS_LOST = 38
#SESSION_NAME_STRING = 39
#LOG_FILE_NAME_STRING = 40
assert line[EVENTS_LOST] == '0'
def handle_EventTrace_Any(self, line):
pass
def handle_FileIo_Cleanup(self, line):
"""General wisdom: if a file is closed, it's because it was opened.
Note that FileIo_Close is not used since if a file was opened properly but
not closed before the process exits, only Cleanup will be logged.
"""
#IRP = 19
TTID = 20 # Thread ID, that's what we want.
FILE_OBJECT = 21
#FILE_KEY = 22
proc = self._thread_to_process(line[TTID])
if not proc:
# Not a process we care about.
return
file_object = line[FILE_OBJECT]
if file_object in proc.file_objects:
proc.add_file(proc.file_objects.pop(file_object))
def handle_FileIo_Create(self, line):
"""Handles a file open.
All FileIo events are described at
http://msdn.microsoft.com/library/windows/desktop/aa363884.aspx
for some value of 'description'.
" (..) process and thread id values of the IO events (..) are not valid "
http://msdn.microsoft.com/magazine/ee358703.aspx
The FileIo.Create event doesn't return if the CreateFile() call
succeeded, so keep track of the file_object and check that it is
eventually closed with FileIo_Cleanup.
"""
#IRP = 19
TTID = 20 # Thread ID, that's what we want.
FILE_OBJECT = 21
#CREATE_OPTIONS = 22
#FILE_ATTRIBUTES = 23
#SHARE_ACCESS = 24
OPEN_PATH = 25
proc = self._thread_to_process(line[TTID])
if not proc:
# Not a process we care about.
return
match = re.match(r'^\"(.+)\"$', line[OPEN_PATH])
raw_path = match.group(1)
# Ignore directories and bare drive right away.
if raw_path.endswith(os.path.sep):
return
filename = self._drive_map.to_dos(raw_path)
# Ignore bare drive right away. Some may still fall through with format
# like '\\?\X:'
if len(filename) == 2:
return
file_object = line[FILE_OBJECT]
# Override any stale file object
proc.file_objects[file_object] = filename
def handle_FileIo_Rename(self, line):
# TODO(maruel): Handle?
pass
def handle_FileIo_Any(self, line):
pass
def handle_Process_Any(self, line):
pass
def handle_Process_DCStart(self, line):
"""Gives historic information about the process tree.
Use it to extract the pid of the trace_inputs.py parent process that
started logman.exe.
"""
#UNIQUE_PROCESS_KEY = 19
#PROCESS_ID = 20
PARENT_PID = 21
#SESSION_ID = 22
#EXIT_STATUS = 23
#DIRECTORY_TABLE_BASE = 24
#USER_SID = 25
IMAGE_FILE_NAME = 26
#COMMAND_LINE = 27
ppid = int(line[PARENT_PID], 16)
if line[IMAGE_FILE_NAME] == '"logman.exe"':
# logman's parent is trace_input.py or whatever tool using it as a
# library. Trace any other children started by it.
assert not self._tracer_pid
self._tracer_pid = ppid
logging.info('Found logman\'s parent at %d' % ppid)
def handle_Process_End(self, line):
# Look if it is logman terminating, if so, grab the parent's process pid
# and inject cwd.
pid = line[self.PID]
if pid in self.processes:
logging.info('Terminated: %d' % pid)
self.processes[pid].cwd = None
def handle_Process_Start(self, line):
"""Handles a new child process started by PID."""
#UNIQUE_PROCESS_KEY = 19
PROCESS_ID = 20
#PARENT_PID = 21
#SESSION_ID = 22
#EXIT_STATUS = 23
#DIRECTORY_TABLE_BASE = 24
#USER_SID = 25
IMAGE_FILE_NAME = 26
COMMAND_LINE = 27
ppid = line[self.PID]
pid = int(line[PROCESS_ID], 16)
if ppid == self._tracer_pid:
# Need to ignore processes we don't know about because the log is
# system-wide.
if line[IMAGE_FILE_NAME] == '"logman.exe"':
# Skip the shutdown call when "logman.exe stop" is executed.
return
self._initial_pid = self._initial_pid or pid
ppid = None
elif ppid not in self.processes:
# Ignore
return
assert pid not in self.processes
proc = self.processes[pid] = self.Process(self, pid, None, ppid)
# TODO(maruel): Process escapes.
assert (
line[COMMAND_LINE].startswith('"') and
line[COMMAND_LINE].endswith('"'))
proc.command = CommandLineToArgvW(line[COMMAND_LINE][1:-1])
assert (
line[IMAGE_FILE_NAME].startswith('"') and
line[IMAGE_FILE_NAME].endswith('"'))
proc.executable = line[IMAGE_FILE_NAME][1:-1]
# proc.command[0] may be the absolute path of 'executable' but it may be
# anything else too. If it happens that command[0] ends with executable,
# use it, otherwise defaults to the base name.
cmd0 = proc.command[0].lower()
if not cmd0.endswith('.exe'):
# TODO(maruel): That's not strictly true either.
cmd0 += '.exe'
if cmd0.endswith(proc.executable) and os.path.isfile(cmd0):
proc.executable = get_native_path_case(cmd0)
logging.info(
'New child: %s -> %d %s' % (ppid, pid, proc.executable))
def handle_Thread_End(self, line):
"""Has the same parameters as Thread_Start."""
tid = int(line[self.TID], 16)
self._threads_active.pop(tid, None)
def handle_Thread_Start(self, line):
"""Handles a new thread created.
Do not use self.PID here since a process' initial thread is created by
the parent process.
"""
PROCESS_ID = 19
TTHREAD_ID = 20
#STACK_BASE = 21
#STACK_LIMIT = 22
#USER_STACK_BASE = 23
#USER_STACK_LIMIT = 24
#AFFINITY = 25
#WIN32_START_ADDR = 26
#TEB_BASE = 27
#SUB_PROCESS_TAG = 28
#BASE_PRIORITY = 29
#PAGE_PRIORITY = 30
#IO_PRIORITY = 31
#THREAD_FLAGS = 32
# Do not use self.PID here since a process' initial thread is created by
# the parent process.
pid = int(line[PROCESS_ID], 16)
tid = int(line[TTHREAD_ID], 16)
self._threads_active[tid] = pid
def handle_Thread_Any(self, line):
pass
def handle_SystemConfig_Any(self, line):
"""If you have too many of these, check your hardware."""
pass
def __init__(self):
super(LogmanTrace, self).__init__()
# Most ignores need to be determined at runtime.
self.IGNORED = set([os.path.dirname(sys.executable)])
# Add many directories from environment variables.
vars_to_ignore = (
'APPDATA',
'LOCALAPPDATA',
'ProgramData',
'ProgramFiles',
'ProgramFiles(x86)',
'ProgramW6432',
'SystemRoot',
'TEMP',
'TMP',
)
for i in vars_to_ignore:
if os.environ.get(i):
self.IGNORED.add(os.environ[i])
# Also add their short path name equivalents.
for i in list(self.IGNORED):
self.IGNORED.add(GetShortPathName(i.replace('/', os.path.sep)))
# Add these last since they have no short path name equivalent.
self.IGNORED.add('\\SystemRoot')
self.IGNORED = tuple(sorted(self.IGNORED))
@staticmethod
def clean_trace(logname):
if os.path.isfile(logname):
os.remove(logname)
if os.path.isfile(logname + '.etl'):
os.remove(logname + '.etl')
@classmethod
def _start_log(cls, etl):
"""Starts the log collection.
Requires administrative access. logman.exe is synchronous so no need for a
"warmup" call. 'Windows Kernel Trace' is *localized* so use its GUID
instead. The GUID constant name is SystemTraceControlGuid. Lovely.
One can get the list of potentially interesting providers with:
"logman query providers | findstr /i file"
"""
cmd_start = [
'logman.exe',
'start',
'NT Kernel Logger',
'-p', '{9e814aad-3204-11d2-9a82-006008a86939}',
# splitio,fileiocompletion,syscall,file,cswitch,img
'(process,fileio,thread)',
'-o', etl,
'-ets', # Send directly to kernel
# Values extracted out of thin air.
'-bs', '1024',
'-nb', '200', '512',
]
logging.debug('Running: %s' % cmd_start)
try:
subprocess.check_call(
cmd_start,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError, e:
if e.returncode == -2147024891:
print >> sys.stderr, 'Please restart with an elevated admin prompt'
elif e.returncode == -2144337737:
print >> sys.stderr, (
'A kernel trace was already running, stop it and try again')
raise
@staticmethod
def _stop_log():
"""Stops the kernel log collection."""
cmd_stop = [
'logman.exe',
'stop',
'NT Kernel Logger',
'-ets', # Sends the command directly to the kernel.
]
logging.debug('Running: %s' % cmd_stop)
subprocess.check_call(
cmd_stop,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
@classmethod
def gen_trace(cls, cmd, cwd, logname, output):
"""Uses logman.exe to start and stop the NT Kernel Logger while the
executable to be traced is run.
"""
logging.info('gen_trace(%s, %s, %s, %s)' % (cmd, cwd, logname, output))
# Use "logman -?" for help.
etl = logname + '.etl'
stdout = stderr = None
if output:
stdout = subprocess.PIPE
stderr = subprocess.STDOUT
# 1. Start the log collection.
cls._start_log(etl)
# 2. Run the child process.
logging.debug('Running: %s' % cmd)
try:
child = subprocess.Popen(
cmd, cwd=cwd, stdin=subprocess.PIPE, stdout=stdout, stderr=stderr)
out = child.communicate()[0]
finally:
# 3. Stop the log collection.
cls._stop_log()
# 4. Convert the traces to text representation.
# Use "tracerpt -?" for help.
LOCALE_INVARIANT = 0x7F
windll.kernel32.SetThreadLocale(LOCALE_INVARIANT)
cmd_convert = [
'tracerpt.exe',
'-l', etl,
'-o', logname,
'-gmt', # Use UTC
'-y', # No prompt
# Use -of XML to get the header of each items after column 19, e.g. all
# the actual headers of 'User Data'.
]
# Normally, 'csv' is sufficient. If complex scripts are used (like eastern
# languages), use 'csv_unicode'. If localization gets in the way, use 'xml'.
logformat = 'csv'
if logformat == 'csv':
# tracerpt localizes the 'Type' column, for major brainfuck
# entertainment. I can't imagine any sane reason to do that.
cmd_convert.extend(['-of', 'CSV'])
elif logformat == 'csv_utf16':
# This causes it to use UTF-16, which doubles the log size but ensures the
# log is readable for non-ASCII characters.
cmd_convert.extend(['-of', 'CSV', '-en', 'Unicode'])
elif logformat == 'xml':
cmd_convert.extend(['-of', 'XML'])
else:
assert False, logformat
logging.debug('Running: %s' % cmd_convert)
subprocess.check_call(
cmd_convert, stdin=subprocess.PIPE, stdout=stdout, stderr=stderr)
return child.returncode, out
@classmethod
def parse_log(cls, filename, blacklist):
logging.info('parse_log(%s, %s)' % (filename, blacklist))
def blacklist_more(filepath):
# All the NTFS metadata is in the form x:\$EXTEND or stuff like that.
return blacklist(filepath) or re.match(r'[A-Z]\:\\\$EXTEND', filepath)
# Auto-detect the log format.
with open(filename, 'rb') as f:
hdr = f.read(2)
assert len(hdr) == 2
if hdr == '<E':
# It starts with <Events>.
logformat = 'xml'
elif hdr == '\xFF\xEF':
# utf-16 BOM.
logformat = 'csv_utf16'
else:
logformat = 'csv'
context = cls.Context(blacklist_more)
if logformat == 'csv_utf16':
def utf_8_encoder(unicode_csv_data):
"""Encodes the unicode object as utf-8 encoded str instance"""
for line in unicode_csv_data:
yield line.encode('utf-8')
def unicode_csv_reader(unicode_csv_data, **kwargs):
"""Encodes temporarily as UTF-8 since csv module doesn't do unicode."""
csv_reader = csv.reader(utf_8_encoder(unicode_csv_data), **kwargs)
for row in csv_reader:
# Decode str utf-8 instances back to unicode instances, cell by cell:
yield [cell.decode('utf-8') for cell in row]
# The CSV file is UTF-16 so use codecs.open() to load the file into the
# python internal unicode format (utf-8). Then explicitly re-encode as
# utf8 as str instances so csv can parse it fine. Then decode the utf-8
# str back into python unicode instances. This sounds about right.
for line in unicode_csv_reader(codecs.open(filename, 'r', 'utf-16')):
# line is a list of unicode objects
context.on_csv_line(line)
elif logformat == 'csv':
def ansi_csv_reader(ansi_csv_data, **kwargs):
"""Loads an 'ANSI' code page and returns unicode() objects."""
assert sys.getfilesystemencoding() == 'mbcs'
encoding = get_current_encoding()
for row in csv.reader(ansi_csv_data, **kwargs):
# Decode str 'ansi' instances to unicode instances, cell by cell:
yield [cell.decode(encoding) for cell in row]
# The fastest and smallest format but only supports 'ANSI' file paths.
# E.g. the filenames are encoding in the 'current' encoding.
for line in ansi_csv_reader(open(filename)):
# line is a list of unicode objects.
context.on_csv_line(line)
else:
raise NotImplementedError('Implement %s' % logformat)
return context.to_results()
def pretty_print(variables, stdout):
"""Outputs a gyp compatible list from the decoded variables.
Similar to pprint.print() but with NIH syndrome.
"""
# Order the dictionary keys by these keys in priority.
ORDER = (
'variables', 'condition', 'command', 'relative_cwd', 'read_only',
KEY_TRACKED, KEY_UNTRACKED)
def sorting_key(x):
"""Gives priority to 'most important' keys before the others."""
if x in ORDER:
return str(ORDER.index(x))
return x
def loop_list(indent, items):
for item in items:
if isinstance(item, basestring):
stdout.write('%s\'%s\',\n' % (indent, item))
elif isinstance(item, dict):
stdout.write('%s{\n' % indent)
loop_dict(indent + ' ', item)
stdout.write('%s},\n' % indent)
elif isinstance(item, list):
# A list inside a list will write the first item embedded.
stdout.write('%s[' % indent)
for index, i in enumerate(item):
if isinstance(i, basestring):
stdout.write(
'\'%s\', ' % i.replace('\\', '\\\\').replace('\'', '\\\''))
elif isinstance(i, dict):
stdout.write('{\n')
loop_dict(indent + ' ', i)
if index != len(item) - 1:
x = ', '
else:
x = ''
stdout.write('%s}%s' % (indent, x))
else:
assert False
stdout.write('],\n')
else:
assert False
def loop_dict(indent, items):
for key in sorted(items, key=sorting_key):
item = items[key]
stdout.write("%s'%s': " % (indent, key))
if isinstance(item, dict):
stdout.write('{\n')
loop_dict(indent + ' ', item)
stdout.write(indent + '},\n')
elif isinstance(item, list):
stdout.write('[\n')
loop_list(indent + ' ', item)
stdout.write(indent + '],\n')
elif isinstance(item, basestring):
stdout.write(
'\'%s\',\n' % item.replace('\\', '\\\\').replace('\'', '\\\''))
elif item in (True, False, None):
stdout.write('%s\n' % item)
else:
assert False, item
stdout.write('{\n')
loop_dict(' ', variables)
stdout.write('}\n')
def get_api():
flavor = get_flavor()
if flavor == 'linux':
return Strace()
elif flavor == 'mac':
return Dtrace()
elif sys.platform == 'win32':
return LogmanTrace()
else:
print >> sys.stderr, 'Unsupported platform %s' % sys.platform
sys.exit(1)
def get_blacklist(api):
"""Returns a function to filter unimportant files normally ignored."""
git_path = os.path.sep + '.git' + os.path.sep
svn_path = os.path.sep + '.svn' + os.path.sep
return lambda f: (
f.startswith(api.IGNORED) or
f.endswith('.pyc') or
git_path in f or
svn_path in f)
def generate_dict(files, cwd_dir, product_dir):
"""Converts the list of files into a .isolate dictionary.
Arguments:
- files: list of files to generate a dictionary out of.
- cwd_dir: directory to base all the files from, relative to root_dir.
- product_dir: directory to replace with <(PRODUCT_DIR), relative to root_dir.
"""
cwd_dir = cleanup_path(cwd_dir)
product_dir = cleanup_path(product_dir)
def fix(f):
"""Bases the file on the most restrictive variable."""
logging.debug('fix(%s)' % f)
# Important, GYP stores the files with / and not \.
f = f.replace(os.path.sep, '/')
if product_dir and f.startswith(product_dir):
return '<(PRODUCT_DIR)/%s' % f[len(product_dir):]
else:
# cwd_dir is usually the directory containing the gyp file. It may be
# empty if the whole directory containing the gyp file is needed.
return posix_relpath(f, cwd_dir) or './'
corrected = [fix(f) for f in files]
tracked = [f for f in corrected if not f.endswith('/') and ' ' not in f]
untracked = [f for f in corrected if f.endswith('/') or ' ' in f]
variables = {}
if tracked:
variables[KEY_TRACKED] = tracked
if untracked:
variables[KEY_UNTRACKED] = untracked
return variables
def trace(logfile, cmd, cwd, api, output):
"""Traces an executable. Returns (returncode, output) from api.
Arguments:
- logfile: file to write to.
- cmd: command to run.
- cwd: current directory to start the process in.
- api: a tracing api instance.
- output: if True, returns output, otherwise prints it at the console.
"""
cmd = fix_python_path(cmd)
assert os.path.isabs(cmd[0]), cmd[0]
api.clean_trace(logfile)
return api.gen_trace(cmd, cwd, logfile, output)
def load_trace(logfile, root_dir, api):
"""Loads a trace file and returns the processed file lists.
Arguments:
- logfile: file to load.
- root_dir: root directory to use to determine if a file is relevant to the
trace or not.
- api: a tracing api instance.
"""
results = api.parse_log(logfile, get_blacklist(api))
results = results.strip_root(root_dir)
simplified = extract_directories(results.files)
return results, simplified
def trace_inputs(logfile, cmd, root_dir, cwd_dir, product_dir, force_trace):
"""Tries to load the logs if available. If not, trace the test.
Symlinks are not processed at all.
Arguments:
- logfile: Absolute path to the OS-specific trace.
- cmd: Command list to run.
- root_dir: Base directory where the files we care about live.
- cwd_dir: Cwd to use to start the process, relative to the root_dir
directory.
- product_dir: Directory containing the executables built by the build
process, relative to the root_dir directory. It is used to
properly replace paths with <(PRODUCT_DIR) for gyp output.
- force_trace: Will force to trace unconditionally even if a trace already
exist.
"""
logging.debug(
'trace_inputs(%s, %s, %s, %s, %s, %s)' % (
logfile, cmd, root_dir, cwd_dir, product_dir, force_trace))
def print_if(txt):
if cwd_dir is None:
print txt
# It is important to have unambiguous path.
assert os.path.isabs(root_dir), root_dir
assert os.path.isabs(logfile), logfile
assert not cwd_dir or not os.path.isabs(cwd_dir), cwd_dir
assert not product_dir or not os.path.isabs(product_dir), product_dir
api = get_api()
# Resolve any symlink
root_dir = os.path.realpath(root_dir)
if not os.path.isfile(logfile) or force_trace:
print_if('Tracing... %s' % cmd)
# Use the proper relative directory.
cwd = root_dir if not cwd_dir else os.path.join(root_dir, cwd_dir)
silent = not isEnabledFor(logging.WARNING)
returncode, _ = trace(logfile, cmd, cwd, api, silent)
if returncode and not force_trace:
return returncode
print_if('Loading traces... %s' % logfile)
results, simplified = load_trace(logfile, root_dir, api)
print_if('Total: %d' % len(results.files))
print_if('Non existent: %d' % len(results.non_existent))
for f in results.non_existent:
print_if(' %s' % f.path)
print_if(
'Interesting: %d reduced to %d' % (
len(results.existent), len(simplified)))
for f in simplified:
print_if(' %s' % f.path)
if cwd_dir is not None:
value = {
'conditions': [
['OS=="%s"' % get_flavor(), {
'variables': generate_dict(
[f.path for f in simplified], cwd_dir, product_dir),
}],
],
}
pretty_print(value, sys.stdout)
return 0
def main():
parser = optparse.OptionParser(
usage='%prog <options> [cmd line...]')
parser.allow_interspersed_args = False
parser.add_option(
'-v', '--verbose', action='count', default=0, help='Use multiple times')
parser.add_option('-l', '--log', help='Log file')
parser.add_option(
'-c', '--cwd',
help='Signal to start the process from this relative directory. When '
'specified, outputs the inputs files in a way compatible for '
'gyp processing. Should be set to the relative path containing the '
'gyp file, e.g. \'chrome\' or \'net\'')
parser.add_option(
'-p', '--product-dir', default='out/Release',
help='Directory for PRODUCT_DIR. Default: %default')
parser.add_option(
'--root-dir', default=ROOT_DIR,
help='Root directory to base everything off. Default: %default')
parser.add_option(
'-f', '--force',
action='store_true',
default=False,
help='Force to retrace the file')
options, args = parser.parse_args()
level = [logging.ERROR, logging.INFO, logging.DEBUG][min(2, options.verbose)]
logging.basicConfig(
level=level,
format='%(levelname)5s %(module)15s(%(lineno)3d):%(message)s')
if not options.log:
parser.error('Must supply a log file with -l')
if not args:
if not os.path.isfile(options.log) or options.force:
parser.error('Must supply a command to run')
else:
args[0] = os.path.abspath(args[0])
if options.root_dir:
options.root_dir = os.path.abspath(options.root_dir)
return trace_inputs(
os.path.abspath(options.log),
args,
options.root_dir,
options.cwd,
options.product_dir,
options.force)
if __name__ == '__main__':
sys.exit(main())
```
#### File: tools/json_schema_compiler/model_test.py
```python
from json_schema import CachedLoad
import model
import unittest
class ModelTest(unittest.TestCase):
def setUp(self):
self.model = model.Model()
self.permissions_json = CachedLoad('test/permissions.json')
self.model.AddNamespace(self.permissions_json[0],
'path/to/permissions.json')
self.permissions = self.model.namespaces.get('permissions')
self.windows_json = CachedLoad('test/windows.json')
self.model.AddNamespace(self.windows_json[0],
'path/to/window.json')
self.windows = self.model.namespaces.get('windows')
self.tabs_json = CachedLoad('test/tabs.json')
self.model.AddNamespace(self.tabs_json[0],
'path/to/tabs.json')
self.tabs = self.model.namespaces.get('tabs')
def testNamespaces(self):
self.assertEquals(3, len(self.model.namespaces))
self.assertTrue(self.permissions)
def testHasFunctions(self):
self.assertEquals(["contains", "getAll", "remove", "request"],
sorted(self.permissions.functions.keys()))
def testHasTypes(self):
self.assertEquals(['tabs.Tab'], self.tabs.types.keys())
self.assertEquals(['permissions.Permissions'],
self.permissions.types.keys())
self.assertEquals(['windows.Window'], self.windows.types.keys())
def testHasProperties(self):
self.assertEquals(["active", "favIconUrl", "highlighted", "id",
"incognito", "index", "pinned", "selected", "status", "title", "url",
"windowId"],
sorted(self.tabs.types['tabs.Tab'].properties.keys()))
def testProperties(self):
string_prop = self.tabs.types['tabs.Tab'].properties['status']
self.assertEquals(model.PropertyType.STRING, string_prop.type_)
integer_prop = self.tabs.types['tabs.Tab'].properties['id']
self.assertEquals(model.PropertyType.INTEGER, integer_prop.type_)
array_prop = self.windows.types['windows.Window'].properties['tabs']
self.assertEquals(model.PropertyType.ARRAY, array_prop.type_)
self.assertEquals(model.PropertyType.REF, array_prop.item_type.type_)
self.assertEquals('tabs.Tab', array_prop.item_type.ref_type)
object_prop = self.tabs.functions['query'].params[0]
self.assertEquals(model.PropertyType.OBJECT, object_prop.type_)
self.assertEquals(
["active", "highlighted", "pinned", "status", "title", "url",
"windowId", "windowType"],
sorted(object_prop.properties.keys()))
def testChoices(self):
self.assertEquals(model.PropertyType.CHOICES,
self.tabs.functions['move'].params[0].type_)
def testPropertyNotImplemented(self):
(self.permissions_json[0]['types'][0]
['properties']['permissions']['type']) = 'something'
self.assertRaises(model.ParseException, self.model.AddNamespace,
self.permissions_json[0], 'path/to/something.json')
def testDescription(self):
self.assertFalse(
self.permissions.functions['contains'].params[0].description)
self.assertEquals('True if the extension has the specified permissions.',
self.permissions.functions['contains'].callback.params[0].description)
def testPropertyUnixName(self):
param = self.tabs.functions['move'].params[0]
self.assertEquals('tab_ids', param.unix_name)
self.assertRaises(AttributeError,
param.choices[model.PropertyType.INTEGER].GetUnixName)
param.choices[model.PropertyType.INTEGER].unix_name = 'asdf'
param.choices[model.PropertyType.INTEGER].unix_name = 'tab_ids_integer'
self.assertEquals('tab_ids_integer',
param.choices[model.PropertyType.INTEGER].unix_name)
self.assertRaises(AttributeError,
param.choices[model.PropertyType.INTEGER].SetUnixName, 'breakage')
def testUnixName(self):
expectations = {
'foo': 'foo',
'fooBar': 'foo_bar',
'fooBarBaz': 'foo_bar_baz',
'fooBARBaz': 'foo_bar_baz',
'fooBAR': 'foo_bar',
'FOO': 'foo',
'FOOBar': 'foo_bar',
'foo.bar': 'foo_bar',
'foo.BAR': 'foo_bar',
'foo.barBAZ': 'foo_bar_baz'
}
for name in expectations:
self.assertEquals(expectations[name], model._UnixName(name));
if __name__ == '__main__':
unittest.main()
```
#### File: tools/json_schema_compiler/schema_util.py
```python
def StripSchemaNamespace(s):
last_dot = s.rfind('.')
if not last_dot == -1:
return s[last_dot + 1:]
return s
def PrefixSchemasWithNamespace(schemas):
for s in schemas:
_PrefixWithNamespace(s.get("namespace"), s)
def _MaybePrefixFieldWithNamespace(namespace, schema, key):
if type(schema) == dict and key in schema:
old_value = schema[key]
if not "." in old_value:
schema[key] = namespace + "." + old_value
def _PrefixTypesWithNamespace(namespace, types):
for t in types:
_MaybePrefixFieldWithNamespace(namespace, t, "id")
_MaybePrefixFieldWithNamespace(namespace, t, "customBindings")
def _PrefixWithNamespace(namespace, schema):
if type(schema) == dict:
if "types" in schema:
_PrefixTypesWithNamespace(namespace, schema.get("types"))
_MaybePrefixFieldWithNamespace(namespace, schema, "$ref")
for s in schema:
_PrefixWithNamespace(namespace, schema[s])
elif type(schema) == list:
for s in schema:
_PrefixWithNamespace(namespace, s)
```
#### File: tools/json_schema_compiler/schema_util_test.py
```python
import schema_util
import unittest
class SchemaUtilTest(unittest.TestCase):
def testStripSchemaNamespace(self):
self.assertEquals('Bar', schema_util.StripSchemaNamespace('foo.Bar'))
self.assertEquals('Baz', schema_util.StripSchemaNamespace('Baz'))
def testPrefixSchemasWithNamespace(self):
schemas = [
{ 'namespace': 'n1',
'types': [
{
'id': 'T1',
'customBindings': 'T1',
'properties': {
'p1': {'$ref': 'T1'},
'p2': {'$ref': 'fully.qualified.T'},
}
}
],
'functions': [
{
'parameters': [
{ '$ref': 'T1' },
{ '$ref': 'fully.qualified.T' },
],
'returns': { '$ref': 'T1' }
},
],
'events': [
{
'parameters': [
{ '$ref': 'T1' },
{ '$ref': 'fully.qualified.T' },
],
},
],
},
]
schema_util.PrefixSchemasWithNamespace(schemas)
self.assertEquals('n1.T1', schemas[0]['types'][0]['id'])
self.assertEquals('n1.T1', schemas[0]['types'][0]['customBindings'])
self.assertEquals('n1.T1',
schemas[0]['types'][0]['properties']['p1']['$ref'])
self.assertEquals('fully.qualified.T',
schemas[0]['types'][0]['properties']['p2']['$ref'])
self.assertEquals('n1.T1',
schemas[0]['functions'][0]['parameters'][0]['$ref'])
self.assertEquals('fully.qualified.T',
schemas[0]['functions'][0]['parameters'][1]['$ref'])
self.assertEquals('n1.T1',
schemas[0]['functions'][0]['returns']['$ref'])
self.assertEquals('n1.T1',
schemas[0]['events'][0]['parameters'][0]['$ref'])
self.assertEquals('fully.qualified.T',
schemas[0]['events'][0]['parameters'][1]['$ref'])
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "1067511899/tornado-learn",
"score": 4
} |
#### File: examples/graphviz/colors.py
```python
import random
from pycallgraph import PyCallGraph
from pycallgraph import Config
from pycallgraph import Color
from pycallgraph.output import GraphvizOutput
def rainbow(node):
'''Colour using only changes in hue.
It will go from 0 to 0.8 which is red, orange, yellow, green, cyan, blue,
then purple.
See http://en.wikipedia.org/wiki/Hue for more information on hue.
'''
return Color.hsv(node.time.fraction * 0.8, 0.4, 0.9)
def greyscale(node):
'''Goes from dark grey to a light grey.'''
return Color.hsv(0, 0, node.time.fraction / 2 + 0.4)
def orange_green(node):
'''Make a higher total time have an orange colour and a higher number
of calls have a green colour using RGB.
'''
return Color(
0.2 + node.time.fraction * 0.8,
0.2 + node.calls.fraction * 0.4 + node.time.fraction * 0.4,
0.2,
)
def rand(node):
return Color.hsv(
random.random(),
node.calls.fraction * 0.5 + 0.5,
node.calls.fraction * 0.5 + 0.5,
)
def main():
graphviz = GraphvizOutput()
pycallgraph = PyCallGraph(
output=graphviz,
config=Config(include_stdlib=True)
)
pycallgraph.start()
import HTMLParser # noqa
pycallgraph.stop()
# Set the edge colour to black for all examples
graphviz.edge_color_func = lambda e: Color(0, 0, 0)
# Default node colouring
graphviz.output_file = 'colours-default.png'
graphviz.done()
def run(func, output_file):
graphviz.node_color_func = func
graphviz.output_file = output_file
graphviz.done()
run(rainbow, 'colors-rainbow.png')
run(greyscale, 'colors-greyscale.png')
run(orange_green, 'colors-orange-green.png')
run(rand, 'colors-random.png')
if __name__ == '__main__':
main()
```
#### File: examples/graphviz/large.py
```python
from pycallgraph import PyCallGraph
from pycallgraph import Config
from pycallgraph.output import GraphvizOutput
def main():
graphviz = GraphvizOutput()
graphviz.output_file = 'large.png'
config = Config(include_stdlib=True)
with PyCallGraph(output=graphviz, config=config):
from urllib2 import urlopen
from xml.dom.minidom import parseString
parseString(urlopen('http://w3.org/').read())
if __name__ == '__main__':
main()
```
#### File: callbacklearn/pycallgraph/color.py
```python
import colorsys
class ColorException(Exception):
pass
class Color(object):
def __init__(self, r, g, b, a=1):
self.r = r
self.g = g
self.b = b
self.a = a
self.validate_all()
@classmethod
def hsv(cls, h, s, v, a=1):
r, g, b = colorsys.hsv_to_rgb(h, s, v)
return cls(r, g, b, a)
def __str__(self):
return '<Color {}>'.format(self.rgba_web())
def validate_all(self):
self.validate('r')
self.validate('g')
self.validate('b')
self.validate('a')
def validate(self, attr):
v = getattr(self, attr)
if not 0 <= v <= 1:
raise ColorException('{} out of range 0 to 1: {}'.format(attr, v))
@property
def r255(self):
return int(self.r * 255)
@property
def g255(self):
return int(self.g * 255)
@property
def b255(self):
return int(self.b * 255)
@property
def a255(self):
return int(self.a * 255)
def rgb_web(self):
'''Returns a string with the RGB components as a HTML hex string.'''
return '#{0.r255:02x}{0.g255:02x}{0.b255:02x}'.format(self)
def rgba_web(self):
'''Returns a string with the RGBA components as a HTML hex string.'''
return '{0}{1.a255:02x}'.format(self.rgb_web(), self)
def rgb_csv(self):
'''Returns a string with the RGB components as CSV.'''
return '{0.r255},{0.g255},{0.b255}'.format(self)
```
#### File: callbacklearn/pycallgraph/config.py
```python
import argparse
import sys
from .output import outputters
from .globbing_filter import GlobbingFilter
from .grouper import Grouper
class Config(object):
'''Handles configuration settings for pycallgraph, tracer, and each output
module. It also handles command line arguments.
'''
def __init__(self, **kwargs):
'''
You can set defaults in the constructor, e.g. Config(verbose=True)
'''
self.output = None
self.verbose = False
self.debug = False
self.groups = True
self.threaded = False
self.memory = False
# Filtering
self.include_stdlib = False
self.include_pycallgraph = False
self.max_depth = 99999
self.trace_filter = GlobbingFilter(
exclude=['pycallgraph.*'],
include=['*'],
)
# Grouping
self.trace_grouper = Grouper()
self.did_init = True
# Update the defaults with anything from kwargs
[setattr(self, k, v) for k, v in kwargs.iteritems()]
self.create_parser()
def log_verbose(self, text):
if self.verbose:
print(text)
def log_debug(self, text):
if self.debug:
print(text)
def add_module_arguments(self, usage):
subparsers = self.parser.add_subparsers(
help='OUTPUT_TYPE', dest='output')
parent_parser = self.create_parent_parser()
for name, cls in outputters.items():
cls.add_arguments(subparsers, parent_parser, usage)
def get_output(self):
if not self.output:
return
output = outputters[self.output]()
output.set_config(self)
return output
def parse_args(self, args=None):
self.parser.parse_args(args, namespace=self)
self.convert_filter_args()
def strip_argv(self):
sys.argv = [self.command] + self.command_args
def convert_filter_args(self):
if not self.include:
self.include = ['*']
if not self.include_pycallgraph:
self.exclude.append('pycallgraph.*')
self.trace_filter = GlobbingFilter(
include=self.include,
exclude=self.exclude,
)
def create_parser(self):
'''Used by the pycallgraph command line interface to parse
arguments.
'''
usage = 'pycallgraph [options] OUTPUT_TYPE [output_options] -- ' \
'SCRIPT.py [ARG ...]'
self.parser = argparse.ArgumentParser(
description='Python Call Graph profiles a Python script and '
'generates a call graph visualization.', usage=usage,
)
self.add_ungrouped_arguments()
self.add_filter_arguments()
self.add_module_arguments(usage)
def create_parent_parser(self):
'''Mixing subparsers with positional arguments can be done with a
parents option. Found via: http://stackoverflow.com/a/11109863/11125
'''
parent_parser = argparse.ArgumentParser(add_help=False)
parent_parser.add_argument(
'command', metavar='SCRIPT',
help='The Python script file to profile',
)
parent_parser.add_argument(
'command_args', metavar='ARG', nargs='*',
help='Python script arguments.'
)
return parent_parser
def add_ungrouped_arguments(self):
self.parser.add_argument(
'-v', '--verbose', action='store_true', default=self.verbose,
help='Display informative messages while running')
self.parser.add_argument(
'-d', '--debug', action='store_true', default=self.debug,
help='Display debugging messages while running')
self.parser.add_argument(
'-t', '--threaded', action='store_true', default=self.threaded,
help='Process traces asyncronously (Experimental)')
self.parser.add_argument(
'-ng', '--no-groups', dest='groups', action='store_false',
default=self.groups, help='Do not group functions by module')
self.parser.add_argument(
'-s', '--stdlib', dest='include_stdlib', action='store_true',
default=self.include_stdlib,
help='Include standard library functions in the trace')
self.parser.add_argument(
'-m', '--memory', action='store_true', default=self.memory,
help='(Experimental) Track memory usage')
def add_filter_arguments(self):
group = self.parser.add_argument_group('filtering')
group.add_argument(
'-i', '--include', default=[], action='append',
help='Wildcard pattern of modules to include in the output. '
'You can have multiple include arguments.'
)
group.add_argument(
'-e', '--exclude', default=[], action='append',
help='Wildcard pattern of modules to exclude in the output. '
'You can have multiple exclude arguments.'
)
group.add_argument(
'--include-pycallgraph', default=self.include_pycallgraph,
action='store_true',
help='Do not automatically filter out pycallgraph',
)
group.add_argument(
'--max-depth', default=self.max_depth, type=int,
help='Maximum stack depth to trace',
)
```
#### File: pycallgraph/output/output.py
```python
import re
import os
from distutils.spawn import find_executable
from ..exceptions import PyCallGraphException
from ..color import Color
class Output(object):
'''Base class for all outputters.'''
def __init__(self, **kwargs):
self.node_color_func = self.node_color
self.edge_color_func = self.edge_color
self.node_label_func = self.node_label
self.edge_label_func = self.edge_label
# Update the defaults with anything from kwargs
[setattr(self, k, v) for k, v in kwargs.iteritems()]
def set_config(self, config):
'''
This is a quick hack to move the config variables set in Config into
the output module config variables.
'''
for k, v in config.__dict__.iteritems():
if hasattr(self, k) and \
callable(getattr(self, k)):
continue
setattr(self, k, v)
def node_color(self, node):
value = float(node.time.fraction * 2 + node.calls.fraction) / 3
return Color.hsv(value / 2 + .5, value, 0.9)
def edge_color(self, edge):
value = float(edge.time.fraction * 2 + edge.calls.fraction) / 3
return Color.hsv(value / 2 + .5, value, 0.7)
def node_label(self, node):
parts = [
'{0.name}',
'calls: {0.calls.value:n}',
'time: {0.time.value:f}s',
]
if self.processor.config.memory:
parts += [
'memory in: {0.memory_in.value_human_bibyte}',
'memory out: {0.memory_out.value_human_bibyte}',
]
return r'\n'.join(parts).format(node)
def edge_label(self, edge):
return '{0}'.format(edge.calls.value)
def sanity_check(self):
'''Basic checks for certain libraries or external applications. Raise
or warn if there is a problem.
'''
pass
@classmethod
def add_arguments(cls, subparsers):
pass
def reset(self):
pass
def set_processor(self, processor):
self.processor = processor
def start(self):
'''Initialise variables after initial configuration.'''
pass
def update(self):
'''Called periodically during a trace, but only when should_update is
set to True.
'''
raise NotImplementedError('update')
def should_update(self):
'''Return True if the update method should be called periodically.'''
return False
def done(self):
'''Called when the trace is complete and ready to be saved.'''
raise NotImplementedError('done')
def ensure_binary(self, cmd):
if find_executable(cmd):
return
raise PyCallGraphException(
'The command "{0}" is required to be in your path.'.format(cmd))
def normalize_path(self, path):
regex_user_expand = re.compile('\A~')
if regex_user_expand.match(path):
path = os.path.expanduser(path)
else:
path = os.path.expandvars(path) # expand, just in case
return path
def prepare_output_file(self):
if self.fp is None:
self.output_file = self.normalize_path(self.output_file)
self.fp = open(self.output_file, 'wb')
def verbose(self, text):
self.processor.config.log_verbose(text)
def debug(self, text):
self.processor.config.log_debug(text)
@classmethod
def add_output_file(cls, subparser, defaults, help):
subparser.add_argument(
'-o', '--output-file', type=str, default=defaults.output_file,
help=help,
)
```
#### File: tornado-learn/codewars/Countonesinasegment.py
```python
def countOnes(left, right):
def f(n):
c = 0
a = list(reversed(list(bin(n))))
for i, d in enumerate(a):
if d == '1':
c += 1 + 2 ** i * i / 2 + 2 ** i * a[i + 1:].count('1')
return c
return f(right) - f(left - 1)
if __name__ == '__main__':
pass
```
#### File: tornado-learn/codewars/DisemvowelTrolls.py
```python
def disemvowel(string):
tmp=''
for t in string:
x=t.lower()
if (x=='a' or x=='e' or x=='i' or x=='o' or x=='u'):
continue
else:
tmp+=t
return tmp
def disemvowel1(s):
return s.translate(str.maketrans('', '', 'aeiouAEIOU'))
if __name__ == '__main__':
print(disemvowel1("This website is for losers LOL!"))
```
#### File: tornado-learn/codewars/Firstnon-repeatingcharacter.py
```python
def first_non_repeating_letter1(string):
tmp = string.lower()
for x in string:
if tmp.count(x.lower()) == 1:
return x
return ''
# 这个更好,毕竟Counter是一次性的。
from collections import Counter
def first_non_repeating_letter(string):
cnt = Counter(string.lower())
for letter in string:
if cnt[letter.lower()] == 1:
return letter
return ''
if __name__ == '__main__':
print(first_non_repeating_letter('sTreSS'))
```
#### File: codewars/level3/base64encoding.py
```python
import string
base64_charset = string.ascii_uppercase + string.ascii_lowercase + string.digits + '+/'
def to_base_64(origin_bytes):
base64_bytes = ['{:0>8}'.format(str(bin(b)).replace('0b', '')) for b in origin_bytes]
resp = ''
nums = len(base64_bytes) // 3
remain = len(base64_bytes) % 3
integral_part = base64_bytes[0:3 * nums]
while integral_part:
tmp_unit = ''.join(integral_part[0:3])
tmp_unit = [int(tmp_unit[x: x + 6], 2) for x in [0, 6, 12, 18]]
resp += ''.join([base64_charset[i] for i in tmp_unit])
integral_part = integral_part[3:]
if remain:
remain_part = ''.join(base64_bytes[3 * nums:]) + (3 - remain) * '0' * 8
tmp_unit = [int(remain_part[x: x + 6], 2) for x in [0, 6, 12, 18]][:remain + 1]
resp += ''.join([base64_charset[i] for i in tmp_unit]) + (3 - remain) * '='
return resp
def from_base_64(base64_str):
base64_bytes = ['{:0>6}'.format(str(bin(base64_charset.index(s))).replace('0b', '')) for s in base64_str if
s != '=']
resp = bytearray()
nums = len(base64_bytes) // 4
remain = len(base64_bytes) % 4
integral_part = base64_bytes[0:4 * nums]
while integral_part:
tmp_unit = ''.join(integral_part[0:4])
tmp_unit = [int(tmp_unit[x: x + 8], 2) for x in [0, 8, 16]]
for i in tmp_unit:
resp.append(i)
integral_part = integral_part[4:]
if remain:
remain_part = ''.join(base64_bytes[nums * 4:])
tmp_unit = [int(remain_part[i * 8:(i + 1) * 8], 2) for i in range(remain - 1)]
for i in tmp_unit:
resp.append(i)
return resp
if __name__ == '__main__':
pass
```
#### File: codewars/level5/IntegersRecreationOne.py
```python
import time
def list_squared(m, n):
result = []
for x in range(m, n + 1):
sumx = 0
sqrtx = int(x ** 0.5) + 1
for i in range(1, sqrtx):
if x % i == 0:
tmp = x // i
if tmp != i:
sumx = sumx + i ** 2 + (tmp) ** 2
else:
sumx += tmp ** 2
if int(sumx ** 0.5) ** 2 == sumx:
result.append([x, sumx])
return result
if __name__ == '__main__':
beg = time.time()
print(list_squared(1, 251110))
print(time.time() - beg)
```
#### File: codewars/level5/SimplePigLatin.py
```python
import re
def pig_it(text):
st = text.split()
result = []
for x in st:
if re.search('\W', x):
result.append(x)
else:
result.append(x[1:] + x[0] + 'ay')
return ' '.join(result)
if __name__ == '__main__':
print(pig_it('Hello world !'))
```
#### File: codewars/level5/snumpy.py
```python
from numba import autojit
from time import time
LIMIT = pow(10, 6)
def primes(limit):
# Keep only odd numbers in sieve, mapping from index to number is
# num = 2 * idx + 3
# The square of the number corresponding to idx then corresponds to:
# idx2 = 2*idx*idx + 6*idx + 3
sieve = [True] * (limit // 2)
prime_numbers = set([2])
for j in range(len(sieve)):
if sieve[j]:
new_prime = 2 * j + 3
prime_numbers.add(new_prime)
for k in range((2 * j + 6) * j + 3, len(sieve), new_prime):
sieve[k] = False
return list(prime_numbers)
numba_primes = autojit(primes)
start = time()
numba_primes(LIMIT)
end = time()
print("Numba: Time Taken : ", end - start)
start = time()
primes(LIMIT)
end = time()
print("Python: Time Taken : ", end - start)
```
#### File: codewars/level6/Arraydiff.py
```python
def array_diff(a, b):
if not a or not b:
return a
return [x for x in a if x not in b]
if __name__ == '__main__':
print(array_diff([1, 2], [1]), [2], "a was [1,2], b was [1], expected [2]")
print(array_diff([1, 2, 2], [1]), [2, 2], "a was [1,2,2], b was [1], expected [2,2]")
print(array_diff([1, 2, 2], [2]), [1], "a was [1,2,2], b was [2], expected [1]")
print(array_diff([1, 2, 2], []), [1, 2, 2], "a was [1,2,2], b was [], expected [1,2,2]")
print(array_diff([], [1, 2]), [], "a was [], b was [1,2], expected []")
```
#### File: codewars/level6/ATMmoneycounter.py
```python
import re
VALUES = {'EUR': [5, 10, 20, 50, 100, 200, 500], 'RMB': [2, 5, 10, 20, 50, 100]}
def atm(value):
sp = re.findall('[\d]?', value)
print(sp)
return None
if __name__ == '__main__':
print(atm('XSF 1000'), 'Sorry, have no XSF.')
print(atm('rub 12341'), 'Can\'t do 12341 RUB. Value must be divisible by 10!')
print(atm('10202UAH'), '20 * 500 UAH, 2 * 100 UAH, 1 * 2 UAH')
print(atm('842 usd'), '8 * 100 USD, 2 * 20 USD, 1 * 2 USD')
print(atm('euR1000'), '2 * 500 EUR')
print(atm('sos100'), 'Can\'t do 100 SOS. Value must be divisible by 1000!')
```
#### File: codewars/level6/Yourorderplease.py
```python
import re
def order(sentence):
if not sentence:
return ''
dic = {}
for x in sentence.split():
pos = int(re.findall('\d', x)[0])
dic[pos] = x
key = list(dic.keys())
key.sort()
result = []
for x in key:
result.append(dic[x])
return ' '.join(result)
def order1(words):
return ' '.join(sorted(words.split(), key=lambda w:sorted(w)))
# 因为 数字比字母小,如果数字在最前面,那么排序其实是按照数字来排序的。
if __name__ == '__main__':
print(order1("is2 Thi1s T4est 3a"))
```
#### File: codewars/level7/BouncingBall.py
```python
def bouncing_ball(initial, proportion):
result = 1
initial = initial * proportion
while initial > 1.0:
initial = initial * proportion
result += 1
return result
import math
# 求对数
def bouncing_ball1(initial, proportion):
return math.ceil(math.log(initial, 1 / proportion))
if __name__ == '__main__':
# print(bouncing_ball(2, 0.5))
print(bouncing_ball(4, 0.5))
print(bouncing_ball(30, 0.3))
```
#### File: codewars/level7/VowelCount.py
```python
def getCount(inputStr):
num_vowels = 0
for x in inputStr:
if x in 'aeiouAEIOU':
num_vowels += 1
return num_vowels
#
def getCount1(inputStr):
return sum(1 for let in inputStr if let in "aeiouAEIOU")
if __name__ == '__main__':
pass
```
#### File: codewars/level8/DoIgetabonus.py
```python
def bonus_time(salary, bonus):
if bonus:
salary *= 10
return '${}'.format(salary)
def bonus_time1(salary, bonus):
return "${}".format(salary * (10 if bonus else 1))
if __name__ == '__main__':
print(bonus_time(67890, True))
```
#### File: codewars/level8/withoutnumbers.py
```python
if __name__ == '__main__':
print(ord('f'))
print(ord('a'))
print(ord('f') % ord('a'))
```
#### File: tornado-learn/codewars/LongestCommonSubsequence.py
```python
def lcs(x, y):
list1 = set(x)
list2 = set(y)
return ''.join(sorted(list(''.join(list1 & list2))))
if __name__ == '__main__':
print(lcs("abcdef", "abc"))
```
#### File: tornado-learn/codewars/Mumbling.py
```python
def accum(s):
count = 0
res = []
for x in s:
res.append(x.upper() + x.lower() * count)
count += 1
return '-'.join(res)
def accum1(s):
return '-'.join(c.upper() + c.lower() * i for i, c in enumerate(s))
if __name__ == '__main__':
print(accum('RqaEzty'))
```
#### File: tornado-learn/codewars/persistentbugger.py
```python
from _functools import reduce
import time
def persistence(n):
count=0
while(len(str(n))>1):
count+=1
tmp=str(n)
n=1
for x in tmp:
n=n*int(x)
return count
import operator
def persistence1(n):
i = 0
while n>=10:
n=reduce(operator.mul,[int(x) for x in str(n)],1)
i+=1
return i
if __name__=='__main__':
begin=time.time()
persistence(9888986647477777777777777777777)
end1=time.time()
persistence1(9888986647477777777777777777777)
end2=time.time()
print(end1-begin)
print(end2-end1)
```
#### File: tornado-learn/codewars/RangeExtraction.py
```python
def solution(args):
out = []
beg = end = args[0]
for n in args[1:] + [""]:
if n != end + 1:
if end == beg:
out.append(str(beg))
elif end == beg + 1:
out.extend([str(beg), str(end)])
else:
out.append(str(beg) + "-" + str(end))
beg = n
end = n
return ",".join(out)
# '-6,-3-1,3-5,7-11,14,15,17-20'
if __name__ == '__main__':
print(solution(([-60, -58, -56])))
```
#### File: tornado-learn/codewars/Stringincrementer.py
```python
import re
def increment_string(strng):
result = re.findall(r'\d+$', strng)
formatn = len(result)
if formatn == 0:
return strng + '1'
lenvalue = len(result[0])
count = int(result[0]) + 1
tmp = str(count)
if len(tmp) >= lenvalue:
return strng[:len(strng) - lenvalue] + tmp
else:
return strng[:len(strng) - lenvalue] + '0' * (lenvalue - len(tmp)) + tmp
def increment_string1(strng):
head = strng.rstrip('0123456789')
tail = strng[len(head):]
if tail == "": return strng + "1"
return head + str(int(tail) + 1).zfill(len(tail))
if __name__ == '__main__':
print(increment_string('foo6677bar00999'))
```
#### File: tornado-learn/codewars/StripComments.py
```python
def solution(string, markers):
parts = string.split('\n')
for s in markers:
parts = [v.split(s)[0].rstrip() for v in parts]
return '\n'.join(parts)
def solution1(string, markers):
s = string.split('\n')
result = []
for x in s:
hascomm = False
for m in markers:
ind = x.find(m)
if ind > 0:
hascomm = True
tmp = x[:ind]
result.append(tmp.strip())
if not hascomm:
result.append(x)
return '\n'.join(result)
if __name__ == '__main__':
print(solution("a #b\nc\nd $e f ", ["#", "$"]))
```
#### File: tornado-learn/codewars/Testmodule.py
```python
import unittest
class Test(unittest.TestCase):
def assert_equals(self, fun, res, output=''):
if self.assertEquals(fun, res, output) is not None:
print(output)
else:
pass
if __name__ == '__main__':
test = Test()
test.assert_equals(12, 12,'sadfjklasfj')
```
#### File: codewars/tmp/rstriptest.py
```python
def increment_string(strng):
head = strng.rstrip('0123456789')
tail = strng[len(head):]
print(head, tail)
if __name__ == '__main__':
increment_string('foo6677bar00999')
```
#### File: datascience/handsonmachine/allre.py
```python
import numpy as np
import matplotlib.pyplot as plt
###########1.数据生成部分##########
def f(x1, x2):
y = 0.5 * np.sin(x1) + 0.5 * np.cos(x2) + 3 + 0.1 * x1
return y
def load_data():
x1_train = np.linspace(0, 50, 500)
x2_train = np.linspace(-10, 10, 500)
data_train = np.array([[x1, x2, f(x1, x2) + (np.random.random(1) - 0.5)] for x1, x2 in zip(x1_train, x2_train)])
x1_test = np.linspace(0, 50, 100) + 0.5 * np.random.random(100)
x2_test = np.linspace(-10, 10, 100) + 0.02 * np.random.random(100)
data_test = np.array([[x1, x2, f(x1, x2)] for x1, x2 in zip(x1_test, x2_test)])
return data_train, data_test
train, test = load_data()
x_train, y_train = train[:, :2], train[:, 2] # 数据前两列是x1,x2 第三列是y,这里的y有随机噪声
x_test , y_test = test[:, :2], test[:, 2] # 同上,不过这里的y没有噪声
###########2.回归部分##########
def try_different_method(model):
model.fit(x_train, y_train)
score = model.score(x_test, y_test)
result = model.predict(x_test)
print(result)
plt.figure()
plt.plot(np.arange(len(result)), y_test, 'go-', label='true value')
plt.plot(np.arange(len(result)), result, 'ro-', label='predict value')
plt.title('score: %f' % score)
plt.legend()
plt.show()
###########3.具体方法选择##########
####3.1决策树回归####
from sklearn import tree
model_DecisionTreeRegressor = tree.DecisionTreeRegressor()
####3.2线性回归####
from sklearn import linear_model
model_LinearRegression = linear_model.LinearRegression()
####3.3SVM回归####
from sklearn import svm
model_SVR = svm.SVR()
print(model_SVR)
####3.4KNN回归####
from sklearn import neighbors
model_KNeighborsRegressor = neighbors.KNeighborsRegressor()
print(model_KNeighborsRegressor.get_params())
####3.5随机森林回归####
from sklearn import ensemble
model_RandomForestRegressor = ensemble.RandomForestRegressor(n_estimators=20) # 这里使用20个决策树
####3.6Adaboost回归####
from sklearn import ensemble
model_AdaBoostRegressor = ensemble.AdaBoostRegressor(n_estimators=50) # 这里使用50个决策树
####3.7GBRT回归####
from sklearn import ensemble
model_GradientBoostingRegressor = ensemble.GradientBoostingRegressor(n_estimators=100) # 这里使用100个决策树
####3.8Bagging回归####
from sklearn.ensemble import BaggingRegressor
model_BaggingRegressor = BaggingRegressor()
####3.9ExtraTree极端随机树回归####
from sklearn.tree import ExtraTreeRegressor
model_ExtraTreeRegressor = ExtraTreeRegressor()
# print(model_ExtraTreeRegressor.)
###########4.具体方法调用部分##########
try_different_method(model_SVR)
```
#### File: tornado-learn/datascience/sqlservertest.py
```python
import pymssql
class MSSQL:
def __init__(self):
self.host = '192.168.1.201'
self.user = 'sa'
self.pwd = '<PASSWORD>'
self.db = 'RoadCenter'
def __GetConnect(self):
if not self.db:
raise(NameError, "没有设置数据库信息")
self.conn = pymssql.connect(host=self.host, user=self.user, password=<PASSWORD>, database=self.db, charset="utf8")
cur = self.conn.cursor()
if not cur:
raise(NameError, "连接数据库失败")
else:
return cur
def ExecQuery(self, sql):
cur = self.__GetConnect()
cur.execute(sql)
resList = cur.fetchall()
# 查询完毕后必须关闭连接
self.conn.close()
return resList
def ExecNonQuery(self, sql):
cur = self.__GetConnect()
cur.execute(sql)
self.conn.commit()
self.conn.close()
ms = MSSQL()
reslist = ms.ExecQuery("select count(*) from O_Transaction")
print(reslist)
# for i in reslist:
# print (i)
```
#### File: tornado-learn/learnmongodb/wtf2.py
```python
import tornado
from tornado.ioloop import IOLoop
from motor.motor_tornado import MotorClient
import datetime
from tornado import web
class MainHandler(web.RequestHandler):
def get(self):
db = self.settings['db']
db.collection.insert_one({'datetime':datetime.datetime.now()})
application = tornado.web.Application([
(r'/', MainHandler)
])
server = tornado.httpserver.HTTPServer(application)
server.bind(8888)
# Forks one process per CPU.
server.start(0)
# Now, in each child process, create a MotorClient.
application.settings['db'] = MotorClient().test
IOLoop.current().start()
```
#### File: tornado-learn/others/strangeself.py
```python
class A():
def __init__(self,invalue):
self.invalue=invalue
print('a init invalue:{}'.format(invalue))
def a(self):
print('A a')
class B(A):
def __init__(self,invalue):
# A.__init__(self)
# super(B, self).__init__(invalue)
A.__init__(self,invalue)
self.invalue=invalue**2
print('b invalue value: {}'.format(self.invalue))
class Rectangle():
def __init__(self, w, h):
print('Rectangle init')
self.w = w
self.h = h
def area(self):
return self.w * self.h
def perimeter(self):
return 2 * (self.w + self.h)
class Square(Rectangle):
def __init__(self, s):
# super().__init__(s, s)
self.s = s
def area(self):
return self.s**2
# def a(self):
# raise NotImplementedError
if __name__ == '__main__':
tmp=Square(10)
print(tmp.area())
l=B(100)
```
#### File: others/tcptest/tcpserverex.py
```python
import socketserver
from concurrent.futures.thread import ThreadPoolExecutor
class MyTCPHandler(socketserver.BaseRequestHandler):
"""
The request handler class for our server.
It is instantiated once per connection to the server, and must
override the handle() method to implement communication to the
client.
"""
def handle(self):
# self.request is the TCP socket connected to the client
self.data = self.request.recv(1024).strip()
print("{} tcp handler wrote:".format(self.client_address[0]))
print(self.data)
# just send back the same data, but upper-cased
self.request.sendall(self.data.upper())
class MyUDPHandler(socketserver.BaseRequestHandler):
"""
This class works similar to the TCP handler class, except that
self.request consists of a pair of data and client socket, and since
there is no connection the client address must be given explicitly
when sending data back via sendto().
"""
def handle(self):
data = self.request[0].strip()
socket = self.request[1]
print("{} udphandler wrote:".format(self.client_address[0]))
print(data)
socket.sendto(data.lower(), self.client_address)
def tcp_task(host, port):
print(host, port)
server = socketserver.TCPServer((host, port), MyTCPHandler)
try:
print('start tcp server')
server.serve_forever()
except Exception as e:
print(e)
def udp_task(host, port):
print(host, port)
server = socketserver.UDPServer((host, port), MyUDPHandler)
try:
print('start udp server')
server.serve_forever()
except Exception as e:
print(e)
if __name__ == "__main__":
HOST, PORT = "127.0.0.1", 8888
executor = ThreadPoolExecutor()
a = executor.submit(tcp_task, HOST, PORT)
b = executor.submit(udp_task, HOST, PORT)
```
#### File: others/tcptest/udpserverex2.py
```python
import socketserver
from multiprocessing import Process, Pool
class MyTCPHandler(socketserver.BaseRequestHandler):
"""
The request handler class for our server.
It is instantiated once per connection to the server, and must
override the handle() method to implement communication to the
client.
"""
def handle(self):
# self.request is the TCP socket connected to the client
self.data = self.request.recv(1024).strip()
print("{} wrote:".format(self.client_address[0]))
print(self.data)
# just send back the same data, but upper-cased
self.request.sendall(self.data.upper())
class MyUDPHandler(socketserver.BaseRequestHandler):
"""
This class works similar to the TCP handler class, except that
self.request consists of a pair of data and client socket, and since
there is no connection the client address must be given explicitly
when sending data back via sendto().
"""
def handle(self):
data = self.request[0].strip()
socket = self.request[1]
print("{} wrote:".format(self.client_address[0]))
print(data)
socket.sendto(data.lower(), self.client_address)
def tcp_task():
server = socketserver.TCPServer(('0.0.0.0', 8888), MyTCPHandler)
try:
print('start tcp server')
server.serve_forever()
except Exception as e:
print(e)
def udp_task():
server = socketserver.UDPServer(('0.0.0.0', 8888), MyTCPHandler)
try:
server.serve_forever()
except Exception as e:
print(e)
if __name__ == "__main__":
HOST, PORT = "localhost", 8888
# p1 = Process(target=udp_task)
# p1.start()
# p1.join()
# p = Process(target=tcp_task)
# p.start()
# p.join()
# p = Pool(4)
# for i in range(4):
# p.apply_async(httpd_task)
# p.close()
# p.join()
# Create the server, binding to localhost on port 9999
with socketserver.UDPServer((HOST, PORT), MyUDPHandler) as server:
print('start udp server')
server.serve_forever()
# with socketserver.TCPServer((HOST, PORT), MyTCPHandler) as server1:
# server1.serve_forever()
```
#### File: tornado-learn/others/threadtest.py
```python
import math
from threading import Thread
from time import sleep
def calc_fact(num):
sleep(0.001)
math.factorial(num)
num = 600000
t = Thread(target=calc_fact, daemon=True, args=[num])
print("About to calculate: {}!".format(num))
t.start()
print("Calculating...")
t.join()
print("Calculated")
```
#### File: tornado-learn/pythonnetworklearn/sqrl.py
```python
def sqrt(x):
y = 1.0
while abs(y*y - x) > 1e-6 :
print(y)
y=(y+x/y)/2
return y
if __name__=='__main__':
print(sqrt(99))
```
#### File: tornado-learn/tornadomysqlexamp/tmysqlstep1.py
```python
from __future__ import print_function
from tornado import ioloop, gen
import tornado_mysql
@gen.coroutine
def main():
conn = yield tornado_mysql.connect(host='192.168.1.155', port=3306,charset='utf8', user='root', passwd='<PASSWORD>!@', db='wpnbmdb')
cur = conn.cursor()
yield cur.execute("SELECT * from zs_yjsf")
print(cur.description)
for row in cur:
print(row)
cur.close()
conn.close()
ioloop.IOLoop.current().run_sync(main)
``` |
{
"source": "10686142/recipe-app-api",
"score": 3
} |
#### File: core/tests/test_models.py
```python
from django.test import TestCase
from django.contrib.auth import get_user_model
from core import models
def sample_user(email="<EMAIL>", password="<PASSWORD>"):
"""Create sample user"""
return get_user_model().objects.create_user(email, password)
class ModelTests(TestCase):
def test_create_new_superuser(self):
"""Test creating a new superuser"""
user = get_user_model().objects.create_superuser(
'<EMAIL>',
'test123'
)
# Both need to be true as superuser
self.assertTrue(user.is_superuser)
self.assertTrue(user.is_staff)
def test_create_user_with_email_successful(self):
"""Test creating a new user with an email is successful"""
email = '<EMAIL>'
password = '<PASSWORD>'
user = get_user_model().objects.create_user(
email=email,
password=password
)
# Make sure the email saved for the new user has been set correctly
self.assertEqual(user.email, email)
# The check_password method is a build in User method from Django that basically,
# checks if given password is the one beloning to the user it's called on
self.assertTrue(user.check_password(password))
def test_new_user_email_normalized(self):
"""Test if the email for a new user is normalized"""
email = '<EMAIL>'
user = get_user_model().objects.create_user(email, 'test123')
# Check if it's lowercased
self.assertEqual(user.email, email.lower())
# Make sure we raise an error when no email address is provided
def test_new_user_invalid_email(self):
"""Test creating user with no email raises error"""
# The "with" means that anything in there needs to raise the ValueError,
# else meaning that this test will fail.
with self.assertRaises(ValueError):
get_user_model().objects.create_user(None, 'test123')
# Make sure the str reperensentation is the acutal model.name
def test_tag_str(self):
"""Test the tag string representation"""
tag = models.Tag.objects.create(
user=sample_user(),
name="Vegan"
)
self.assertEqual(str(tag), tag.name)
# Test that our model is converted correltly into a string representation
def test_ingredient_str(self):
"""Test ingredient string representation"""
ingredient = models.Ingredient.objects.create(
user=sample_user(),
name="Cucumber"
)
self.assertEqual(str(ingredient), ingredient.name)
# Test that our model is converted correltly into a string representation
def test_recipe_str(self):
"""Test recipe string representation"""
# All the mandatory fields
recipe = models.Recipe.objects.create(
user=sample_user(),
title="Steak and mushroom sauce",
time_minutes=5,
price=5.00
)
self.assertEqual(str(recipe), recipe.title)
``` |
{
"source": "1069066484/ML_hw2",
"score": 3
} |
#### File: 1069066484/ML_hw2/PCA.py
```python
from sklearn.preprocessing import Normalizer
from numpy import linalg as LA
import numpy as np
import os
class PCA_ling:
def __init__(self, alg='eig_solve', n_components=None):
"""
@Param alg: should be one between 'eig_solve' and 'svd', using two approaches to solve PCA.
@Param n_components: number of components to reduce the data to. If n_components is greater than dimension of
the data or is not set, then n_components would be adjusted to the right dimensions of the data.
"""
self.alg = alg
self.normalizer = None
self.n_components = n_components
self.selected_var = None
def _decentralize(self, data):
means = np.repeat(np.mean(data,0),data.shape[0],0).reshape(data.shape[1],-1).T
return data - means
def _get_XtX(self, data):
X = self._decentralize(data)
return [np.matmul( X.T, X) * (1 / (data.shape[0]-1)) ,X]
def _decide_components(self, data):
self.n_components = data.shape[1] if self.n_components is None or self.n_components > data.shape[1] else self.n_components
def _fit_transform_eig_solve(self, data):
[XtX, X] = self._get_XtX(data)
w, v = LA.eig(XtX)
wv = list(zip(w, v.T))
sorted_wv = sorted(wv, key=lambda x: -abs(x[0]))
sorted_w = [abs(wv_i[0]) for wv_i in sorted_wv]
sorted_v = [wv_i[1] for wv_i in sorted_wv]
selected_vs = np.vstack(sorted_v[:self.n_components]).T
self.selected_var = np.sum(sorted_w[:self.n_components]) / np.sum(sorted_w)
return np.matmul(X, selected_vs)
def _fit_transform_svd(self, data):
[XtX, X] = self._get_XtX(data)
U, ds, Vt = LA.svd(X)
# XtX * V /{by_col} ds == U
# eig vals of XtX are squared sigs of X
# U are eigs of XXt, V are eigs of XtX
selected_vs = Vt[:self.n_components].T
ds **= 2
self.selected_var = np.sum(ds[:self.n_components]) / np.sum(ds)
return np.matmul(X, selected_vs)
def fit_transform(self, data):
self._decide_components(data)
if self.alg == 'eig_solve':
return self._fit_transform_eig_solve(data)
elif self.alg == 'svd':
return self._fit_transform_svd(data)
else:
raise Exception("Invalid algorithm alg="+self.alg+"\nalg should be one among eig_solve and svd\n")
def selected_components(self):
return self.selected_var
def _read_test_data():
import data_helper
import global_defs
path_test_data = data_helper.npfilename(os.path.join(global_defs.PATH_SAVING_FOLDER, 'pca_test_data'))
if os.path.exists(path_test_data):
return np.load(path_test_data)
test_data = data_helper.read_features(use_dl=True)[:20,:50]
np.save(path_test_data, test_data)
return test_data
def _test_PCA_ling():
data = _read_test_data()
n_components = 3
print("ori_data.shape=", data.shape," n_components=", n_components)
for alg in ['eig_solve', 'svd']:
pca_ling = PCA_ling(alg=alg, n_components=n_components)
reduced1 = pca_ling.fit_transform(data)
print(alg, reduced1.shape, pca_ling.selected_components())
#print(reduced1)
from sklearn.decomposition import PCA
pca_sklearn = PCA(n_components=n_components)
reduced1 = pca_sklearn.fit_transform(data)
print(reduced1.shape, np.sum(pca_sklearn.explained_variance_ratio_[:n_components]))
#print(reduced1)
if __name__ == '__main__':
_test_PCA_ling()
``` |
{
"source": "106aRoboCupSim/simatch",
"score": 2
} |
#### File: 106a/src/goalie.py
```python
import rospy
import numpy as np
from realtimepseudoAstar import plan
from globaltorobotcoords import transform
from nubot_common.msg import ActionCmd, VelCmd, OminiVisionInfo, BallInfo, ObstaclesInfo, RobotInfo
from nubot_common.msg import BallIsHolding
import sys
# For plotting
# import math
# import matplotlib.pyplot as plt
# Initialize publisher and rate
pub = 0
ROBOT_NAME = 'rival_goalie'
if int(sys.argv[1]) == 0:
pub = rospy.Publisher('/NuBot1/nubotcontrol/actioncmd', ActionCmd, queue_size=1)
ROBOT_NAME = 'NuBot_goalie'
else:
pub = rospy.Publisher('/rival1/nubotcontrol/actioncmd', ActionCmd, queue_size=1)
rospy.init_node(ROBOT_NAME, anonymous=False)
hertz = 10
rate = rospy.Rate(hertz)
goalie_origin = np.array([-950, 0])
def line_to_goal(goal_pos, current_pos, obstacles):
if exists_clear_path(goal_pos, current_pos, obstacles):
return np.arctan2(target[1] - robot_pos[1], target[0] - robot_pos[0])
return False
def exists_clear_path(goal_pos, current_pos, obstacles):
AB = goal_pos - current_pos
for o in obstacles:
AC = o[:2] - current_pos
AD = AB * np.dot(AC, AB) / np.dot(AB, AB)
D = current_pos + AD
if np.linalg.norm(D - o[:2]) <= o[2]:
return False
return True
def in_range(robot_pos, ball_pos, thresh=100):
val = np.linalg.norm(robot_pos - ball_pos)
print(thresh, val)
return val < thresh
def should_pass(mate_pos, robot_pos, obstacles):
#if exists_clear_path(mate_pos, robot_pos, obstacles):
# return random.random() < 2
obstacle_list = np.empty((0,3), float)
return exists_clear_path(mate_pos, robot_pos, obstacle_list)
#return False
isholding = 0
def holding_callback(data):
global isholding
isholding = int(data.BallIsHolding)
def callback(data):
print(data.robotinfo[1].isdribble)
#Get ball position in global frame
b = data.ballinfo
ball_pos = np.array([b.pos.x, b.pos.y])
#Get robot position and heading in global frame
r = data.robotinfo[0]
robot_pos = np.array([r.pos.x, r.pos.y])
theta = r.heading.theta
off1 = data.robotinfo[1]
off1_pos = np.array([off1.pos.x, off1.pos.y])
off1_theta = off1.heading.theta
#Get obstacle positions in global frame
if isholding:
obstacles = data.obstacleinfo
obstacle_list = np.empty((0,3), float)
obstacle_list = np.concatenate((obstacle_list, np.array([[-1150, -125, 50]])))
obstacle_list = np.concatenate((obstacle_list, np.array([[-1150, 125, 50]])))
for p in obstacles.pos:
obstacle_list = np.concatenate((obstacle_list, np.array([[p.x, p.y, 100]])))
else:
obstacles = data.obstacleinfo
obstacle_list = np.empty((0,3), float)
obstacle_list = np.concatenate((obstacle_list, np.array([[-1150, -125, 50]])))
obstacle_list = np.concatenate((obstacle_list, np.array([[-1150, 125, 50]])))
for p in obstacles.pos:
obstacle_list = np.concatenate((obstacle_list, np.array([[p.x, p.y, 15]])))
print(should_pass(off1_pos, robot_pos, obstacle_list))
if isholding:
t = np.array([-700, 0])
target = plan(t, robot_pos, obstacle_list, 100, 400)
thetaDes = np.arctan2(target[1] - robot_pos[1], target[0] - robot_pos[0]) - theta
#Convert target from global coordinate frame to robot coordinate frame for use by hwcontroller
if in_range(robot_pos, t, 150):
target = np.array([0, 0])
else:
target = transform(target[0], target[1], robot_pos[0], robot_pos[1], theta)
#Generate ActionCmd() and publish to hwcontroller
if should_pass(off1_pos, robot_pos, obstacle_list):
angle_to_other = np.arctan2(off1_pos[1] - robot_pos[1], off1_pos[0] - robot_pos[0]) - theta
action = ActionCmd()
action.target.x = target[0]
action.target.y = target[1]
action.maxvel = 250
action.handle_enable = 1
action.target_ori = (angle_to_other - theta) / 2
pub.publish(action)
rate.sleep()
if action.target_ori - np.pi/3 < theta < action.target_ori + np.pi/3:
action.strength = 10
action.shootPos = 1
pub.publish(action)
else:
action = ActionCmd()
action.target.x = target[0]
action.target.y = target[1]
action.maxvel = 250
action.handle_enable = 1
action.target_ori = -theta
pub.publish(action)
rate.sleep()
if np.abs(theta) < np.pi/4:
action.strength = 10
action.shootPos = 1
pub.publish(action)
elif in_range(robot_pos, ball_pos, 500) and in_range(ball_pos, goalie_origin, 600):
#Generate target position and heading in global frame from real-time psuedo A-star path planning algorithm
target = plan(ball_pos, robot_pos, obstacle_list[0:0], 100, 400)
thetaDes = np.arctan2(target[1] - robot_pos[1], target[0] - robot_pos[0]) - theta
#Convert target from global coordinate frame to robot coordinate frame for use by hwcontroller
target = transform(target[0], target[1], robot_pos[0], robot_pos[1], theta)
#Generate ActionCmd() and publish to hwcontroller
action = ActionCmd()
action.target.x = target[0]
action.target.y = target[1]
action.maxvel = 250
action.handle_enable = 1
action.target_ori = thetaDes
pub.publish(action)
rate.sleep()
elif not in_range(robot_pos, goalie_origin, 100):
#Generate target position and heading in global frame from real-time psuedo A-star path planning algorithm
target = plan(goalie_origin, robot_pos, obstacle_list, 100, 400)
thetaDes = np.arctan2(target[1] - robot_pos[1], target[0] - robot_pos[0]) - theta
#Convert target from global coordinate frame to robot coordinate frame for use by hwcontroller
target = transform(target[0], target[1], robot_pos[0], robot_pos[1], theta)
#Generate ActionCmd() and publish to hwcontroller
action = ActionCmd()
action.target.x = target[0]
action.target.y = target[1]
action.maxvel = 250
action.handle_enable = 1
action.target_ori = thetaDes
pub.publish(action)
rate.sleep()
else:
action = ActionCmd()
action.target.x = 0
action.target.y = 0
pub.publish(action)
rate.sleep()
pass
def listener():
robot = int(sys.argv[1])
print(robot, type(robot))
if robot == 0:
rospy.Subscriber("/NuBot1/omnivision/OmniVisionInfo", OminiVisionInfo, callback, queue_size=1)
rospy.Subscriber("/NuBot1/ballisholding/BallIsHolding", BallIsHolding, holding_callback, queue_size=1)
elif robot == 1:
rospy.Subscriber("/rival1/omnivision/OmniVisionInfo", OminiVisionInfo, callback, queue_size=1)
rospy.Subscriber("/rival1/ballisholding/BallIsHolding", BallIsHolding, holding_callback, queue_size=1)
else:
print("Call 0 for cyan and 1 for magenta")
rospy.spin()
if __name__ == '__main__':
try:
listener()
except rospy.ROSInterruptException:
pass
```
#### File: 106a/src/player_brain.py
```python
import rospy
import sys
import time
import numpy as np
from numpy import linalg as LA
import random
from realtimepseudoAstar import plan
from globaltorobotcoords import transform
from std_msgs.msg import String
from nubot_common.msg import ActionCmd, VelCmd, OminiVisionInfo, BallInfo, ObstaclesInfo, RobotInfo, BallIsHolding
ROBOT_NAME = 'NuBot' + str(sys.argv[1])
if str(sys.argv[2]) == '1':
ROBOT_NAME = 'rival' + str(sys.argv[1])
opponent_goal = np.array([1100.0, 0.0])
isdribble = 0
#Init ball handlers
ball_handler1 = (0, "NuBot1")
ball_handler2 = (0, "NuBot2")
ball_handler3 = (0, "NuBot3")
ball_handler4 = (0, "rival1")
ball_handler5 = (0, "rival2")
ball_handler6 = (0, "rival3")
ball_handler_current = "nobody"
shoot_range = 500
#Bare minimum is 50
obstacle_radius = 75
plan_radius = 300
random_obstacle_clipping = True
if str(sys.argv[1]) == '2':
my_id = 1
mate_id = 2
if str(sys.argv[1]) == '3':
my_id = 2
mate_id = 1
# For plotting
# import math
# import matplotlib.pyplot as plt
# Initialize publisher and rate
pub = rospy.Publisher('/' + str(ROBOT_NAME)+'/nubotcontrol/actioncmd', ActionCmd, queue_size=1)
rospy.init_node(str(ROBOT_NAME) + '_brain', anonymous=False)
hertz = 10
rate = rospy.Rate(hertz)
#rate2 = rospy.Rate(1)
# #For plotting path and path plan
# targets_generated_x = []
# targets_generated_y = []
# robot_position_x = []
# robot_position_y = []
# def plot_circle(x, y, size, color="-b"): # pragma: no cover
# deg = list(range(0, 360, 5))
# deg.append(0)
# xl = [x + size * math.cos(np.deg2rad(d)) for d in deg]
# yl = [y + size * math.sin(np.deg2rad(d)) for d in deg]
# plt.plot(xl, yl, color)
def callback(data):
def in_range(pos_1, pos_2, dist=300):
val = np.linalg.norm(pos_1 - pos_2)
return val < dist
def exists_clear_path(goal_pos, current_pos, obstacles):
AB = goal_pos - current_pos
for o in obstacles:
AC = o[:2] - current_pos
AD = AB * np.dot(AC, AB) / np.dot(AB, AB)
D = current_pos + AD
if np.linalg.norm(D - o[:2]) <= o[2]:
return False
return True
action = ActionCmd()
#Get ball position in global frame
b = data.ballinfo
ball_pos = np.array([b.pos.x, b.pos.y])
if np.abs(ball_pos[0]) > 1100 and np.abs(ball_pos[1]) < 125:
action = ActionCmd()
action.target.x = 0
action.target.y = 0
action.maxvel = 0
pub.publish(action)
#print('sleeping')
time.sleep(1.5)
#rate2.sleep()
#Get robot position and heading in global frame
r = data.robotinfo[my_id]
my_pos = np.array([r.pos.x, r.pos.y])
my_theta = r.heading.theta
#Get teammate position and heading in global frame
r_2 = data.robotinfo[mate_id]
mate_pos = np.array([r_2.pos.x, r_2.pos.y])
mate_theta = r.heading.theta
#Get obstacle positions in global frame
obstacles = data.obstacleinfo
obstacle_list = np.empty((0,3), float)
for p in obstacles.pos:
obstacle_list = np.concatenate((obstacle_list, np.array([[p.x, p.y, 75]])))
# In range to pass
passing = False
if isdribble and in_range(my_pos, mate_pos, dist=500):
dist_to_teammate = LA.norm(my_pos - mate_pos)
dist_to_goal = LA.norm(my_pos - opponent_goal)
ang_to_teammate = np.arctan2(mate_pos[1] - my_pos[1], mate_pos[0] - my_pos[0]) - my_theta
obstructed_goal = not exists_clear_path(opponent_goal, my_pos, obstacle_list)
obstructed_mate = not exists_clear_path(mate_pos, my_pos, obstacle_list)
if ((dist_to_teammate < dist_to_goal) \
or (obstructed_goal and not obstructed_mate)) \
and (dist_to_teammate > 200) \
and (ang_to_teammate-np.pi/30 < my_theta and my_theta < ang_to_teammate+np.pi/30):
target = my_pos
thetaDes = ang_to_teammate
action.maxvel = 250
action.shootPos = 1
action.strength = dist_to_teammate / 50
passing = True
def has_ball_priority(my_pos, mate_pos):
my_dist_to_ball = LA.norm(my_pos - ball_pos)
mate_dist_to_ball = LA.norm(mate_pos - ball_pos)
return my_dist_to_ball < mate_dist_to_ball
def team_has_ball():
return ball_handler_current[0:5] == ROBOT_NAME[0:5]
# WINGMAN: GET OPEN
if not passing and not has_ball_priority(my_pos, mate_pos) and team_has_ball():
target = [mate_pos[0]+(opponent_goal[0]-mate_pos[0])/2, 100-mate_pos[1]/2]
thetaDes = np.arctan2(mate_pos[1] - my_pos[1], mate_pos[0] - my_pos[0]) - my_theta
target = transform(target[0], target[1], my_pos[0], my_pos[1], my_theta)
action.maxvel = 300
elif not passing and not has_ball_priority(my_pos, mate_pos):
target = plan(ball_pos, my_pos, obstacle_list, obstacle_radius, 400)
thetaDes = np.arctan2(target[1] - my_pos[1], target[0] - my_pos[0]) - my_theta
target = transform(target[0], target[1], my_pos[0], my_pos[1], my_theta)
action.maxvel = 300
# AGGRESSOR: GET BALL
if not passing and has_ball_priority(my_pos, mate_pos):
action.maxvel = 250
#print(obstacle_list)
#print(r.isdribble)
target = plan(ball_pos, my_pos, obstacle_list, obstacle_radius, 400)
thetaDes = np.arctan2(target[1] - my_pos[1], target[0] - my_pos[0]) - my_theta
#print(isdribble)
clear_shot = exists_clear_path(opponent_goal, my_pos, obstacle_list)
if isdribble and np.linalg.norm(opponent_goal - my_pos) > shoot_range:
target = plan(opponent_goal, my_pos, obstacle_list, obstacle_radius, 400)
thetaDes = np.arctan2(opponent_goal[1] - my_pos[1], opponent_goal[0] - my_pos[0]) - my_theta
elif clear_shot and isdribble and np.linalg.norm(opponent_goal - my_pos) < shoot_range:
thetaDes = np.arctan2(opponent_goal[1] - my_pos[1], opponent_goal[0] - my_pos[0]) - my_theta
target = my_pos
action.shootPos = 1
action.strength = 200
elif isdribble and np.linalg.norm(opponent_goal - my_pos) < shoot_range:
dist_to_teammate = LA.norm(my_pos - mate_pos)
dist_to_goal = LA.norm(my_pos - opponent_goal)
ang_to_teammate = np.arctan2(mate_pos[1] - my_pos[1], mate_pos[0] - my_pos[0]) - my_theta
obstructed_goal = not exists_clear_path(opponent_goal, my_pos, obstacle_list)
obstructed_mate = not exists_clear_path(mate_pos, my_pos, obstacle_list)
target = my_pos
thetaDes = ang_to_teammate
action.maxvel = 250
action.shootPos = 1
action.strength = dist_to_teammate / 50
passing = True
target = transform(target[0], target[1], my_pos[0], my_pos[1], my_theta)
#Generate target position and heading in global frame from real-time psuedo A-star path planning algorithm
# target = plan(ball_pos, my_pos, obstacle_list, 100, 400)
# thetaDes = np.arctan2(target[1] - my_pos[1], target[0] - my_pos[0])
# For plotting
# my_position_x.append(my_pos[0])
# my_position_y.append(my_pos[1])
# targets_generated_x.append(target[0])
# targets_generated_y.append(target[1])
#Convert target from global coordinate frame to robot coordinate frame for use by hwcontroller
#Generate ActionCmd() and publish to hwcontroller
action.target.x = target[0]
action.target.y = target[1]
action.maxw = 300
action.handle_enable = 1
action.target_ori = thetaDes
pub.publish(action)
rate.sleep()
# # For plotting path and path plan of robot, after 100 pathing iterations
# if len(targets_generated_x) > 100:
# plt.plot(targets_generated_x, targets_generated_y, 'g*--', label='Dynamically Generated Path Plan')
# plt.plot(robot_position_x, robot_position_y, 'xr-', label='Actual Robot Path')
# plt.legend()
# # fig, ax = plt.subplots()
# # ax.scatter(targets_generated_x, targets_generated_y)
# # ax.scatter(robot_position_x, robot_position_y)
# # for i in range(len(targets_generated_x)):
# # ax.annotate(i, (targets_generated_x[i], targets_generated_y[i]))
# # ax.annotate(i, (robot_position_x[i], robot_position_y[i]))
# # for o in obstacle_list:
# # plot_circle(o[0], o[1], o[2])
# #print(targets_generated)
# plt.show()
# time.sleep(100)
def holdingballcallback(data):
global isdribble
isdribble = data.BallIsHolding
def update_holder1(data):
global ball_handler1
ball_handler1 = (data.BallIsHolding, "NuBot1")
update_holder_consolidate()
def update_holder2(data):
global ball_handler2
ball_handler2 = (data.BallIsHolding, "NuBot2")
update_holder_consolidate()
def update_holder3(data):
global ball_handler3
ball_handler3 = (data.BallIsHolding, "NuBot3")
update_holder_consolidate()
def update_holder4(data):
global ball_handler4
ball_handler4 = (data.BallIsHolding, "rival1")
update_holder_consolidate()
def update_holder5(data):
global ball_handler5
ball_handler5 = (data.BallIsHolding, "rival2")
update_holder_consolidate()
def update_holder6(data):
global ball_handler6
ball_handler6 = (data.BallIsHolding, "rival3")
update_holder_consolidate()
def update_holder_consolidate():
global ball_handler_current
l = [ball_handler1, ball_handler2, ball_handler3, ball_handler4, ball_handler5, ball_handler6]
for i in range(6):
a = l[i]
data, name = a
if int(data) == 1:
ball_handler_current = name
return
ball_handler_current = "nobody"
def listener():
rospy.Subscriber("/" + str(ROBOT_NAME) + "/omnivision/OmniVisionInfo", OminiVisionInfo, callback, queue_size=1)
rospy.Subscriber("/" + str(ROBOT_NAME) + "/ballisholding/BallIsHolding", BallIsHolding, holdingballcallback, queue_size=1)
rospy.Subscriber("/NuBot1/ballisholding/BallIsHolding", BallIsHolding, update_holder1, queue_size=1)
rospy.Subscriber("/NuBot2/ballisholding/BallIsHolding", BallIsHolding, update_holder2, queue_size=1)
rospy.Subscriber("/NuBot3/ballisholding/BallIsHolding", BallIsHolding, update_holder3, queue_size=1)
rospy.Subscriber("/rival1/ballisholding/BallIsHolding", BallIsHolding, update_holder4, queue_size=1)
rospy.Subscriber("/rival2/ballisholding/BallIsHolding", BallIsHolding, update_holder5, queue_size=1)
rospy.Subscriber("/rival3/ballisholding/BallIsHolding", BallIsHolding, update_holder6, queue_size=1)
rospy.spin()
if __name__ == '__main__':
try:
listener()
except rospy.ROSInterruptException:
pass
```
#### File: 106a/src/reset_path_planning_demo.py
```python
import rospy
from gazebo_msgs.msg import ModelState
import numpy as np
from nubot_common.msg import ActionCmd, VelCmd, OminiVisionInfo, BallInfo, ObstaclesInfo, RobotInfo
import time
pub = rospy.Publisher('/gazebo/set_model_state', ModelState, queue_size=1)
rospy.init_node('ball_manager', anonymous=True)
def callback(data):
b = data.ballinfo
ball_x = b.pos.x/100
ball_y = b.pos.y/100
if abs(ball_x) >= 11 or abs(ball_y) >= 7:
resetBall = ModelState()
resetBall.model_name = 'football'
resetBall.pose.position.x = 0.0
resetBall.pose.position.y = 0.0
resetBall.pose.position.z = 0.0
resetBall.pose.orientation.x = 0.0
resetBall.pose.orientation.y = 0.0
resetBall.pose.orientation.z = 0.0
resetBall.pose.orientation.w = 0.0
pub.publish(resetBall)
reset_nubot = ModelState()
reset_nubot.model_name = 'NuBot1'
reset_nubot.pose.position.x = -10.5
reset_nubot.pose.position.y = 0
reset_nubot.pose.position.z = 0.0
reset_nubot.pose.orientation.x = 0.0
reset_nubot.pose.orientation.y = 0.0
reset_nubot.pose.orientation.z = 0.0
reset_nubot.pose.orientation.w = 0.0
pub.publish(reset_nubot)
def listener():
rospy.Subscriber("/NuBot1/omnivision/OmniVisionInfo", OminiVisionInfo, callback, queue_size=1)
rospy.spin()
if __name__ == '__main__':
try:
listener()
except rospy.ROSInterruptException:
pass
``` |
{
"source": "106research/research",
"score": 3
} |
#### File: 106research/research/app.py
```python
import os
from flask import Flask, request, send_file, send_from_directory, session
from werkzeug import secure_filename
# belows include self-define libs and func
from wav_to_STT import input_filename
from jiebaCut import func_cut
from chatbot_response import Chat_with_Bot
# aboves include self-define libs and func
import numpy as np
import json
from chatbot import chatbot
project= chatbot.Chatbot()
project.main(['--modelTag', '1108', '--test', 'daemon'])
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = 'uploads/'
app.config['ALLOWED_EXTENSIONS'] = set(['wave', 'wav'])
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1] in app.config['ALLOWED_EXTENSIONS']
@app.route("/", methods=['get', 'POST'])
def index():
if request.method == 'POST':
file = request.files['file']
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
# return redirect(url_for('uploaded_file',filename = filename))
#wav2sep(filename)
# below finished word translate and cutting
asking = func_cut(input_filename(filename))
responsing = Chat_with_Bot(asking, project)
print(responsing)
# above print the predition of response without tag
ans = (filename)
# below decode json from nvidia digits output
# print(ans.decode('utf8').replace("\n"," "))
# return ans.decode('utf8')
return (responsing)
return '''
<doctype html>
<title>test upload</title>
<h1>Upload NoTag</h1>
<form action="" method="post" enctype=multipart/form-data>
<p><input type=file name=file>
<input type=submit name=upload></p>
</form>
'''
@app.route("/#")
def uploaded_file(filename):
return send_from_directory(app.config['UPLOAD_FOLDER'], filename)
if __name__ == "__main__":
app.run(host='0.0.0.0', port=8080)
```
#### File: 106research/research/chatbot_response.py
```python
def Chat_with_Bot(sentence_in, project):
# project = chatbot.Chatbot()
# project.main(['--modelTag', '1108', '--test', 'daemon'])
# --modelTag 1108
# --test daemon
answer = project.daemonPredict(sentence=sentence_in)
# project.daemonClose()
del project
return answer
if __name__ == "__main__":
print(Chat_with_Bot())
``` |
{
"source": "10779164/chaostoolkit-ansible",
"score": 2
} |
#### File: chaostoolkit_ansible/machine/actions.py
```python
import os
import json
import time
from time import sleep
from chaoslib.exceptions import FailedActivity
from chaoslib.types import Configuration, Secrets
from logzero import logger
from .. import ansible_api_client
# from ..types import SaltStackResponse
#from .constants import OS_LINUX, OS_WINDOWS
from .constants import BURN_CPU, FILL_DISK, NETWORK_UTIL, \
BURN_IO, KILLALL_PROCESSES, KILL_PROCESS, SHELL
__all__ = ["burn_cpu", "fill_disk", "network_latency", "burn_io",
"network_loss", "network_corruption", "network_advanced",
"killall_processes", "kill_process","shell"]
def burn_cpu(host: str = None,
user: str = "root",
execution_duration: str = "60",
configuration: Configuration = None):
"""
burn CPU up to 100% at random machines.
Parameters
----------
host: str
Filter the virtual machines. If the filter is omitted all machines in
the subscription will be selected as potential chaos candidates.
execution_duration : str, optional
Duration of the stress test (in seconds) that generates high CPU usage.
Defaults to 60 seconds.
configuration : Configuration
Chaostoolkit Configuration
secrets : Secrets
Chaostoolkit Secrets
"""
logger.debug("Start burn_cpu")
param = dict()
param["duration"] = execution_duration
param["host"] = host
return __default_ansible_experiment__(host=host,
execution_duration=execution_duration,
param=param,
experiment_type=BURN_CPU,
configuration=configuration,
)
def fill_disk(host: str = None,
user: str = "root",
execution_duration: str = "120",
size: str = "1000",
configuration: Configuration = None):
"""
For now do not have this scenario, fill the disk with random data.
Parameters
----------
host: str
Filter the virtual machines. If the filter is omitted all machines in
the subscription will be selected as potential chaos candidates.
execution_duration : str, optional
Lifetime of the file created. Defaults to 120 seconds.
size : str
Size of the file created on the disk. Defaults to 1GB(1000M).
configuration : Configuration
Chaostoolkit Configuration
secrets : Secrets
Chaostoolkit Secrets
"""
logger.debug(
"Start fill_disk: configuration='{}', host='{}'".format(
configuration, host))
param = dict()
param["execution_duration"] = execution_duration
param["size"] = size
return __default_ansible_experiment__(host=host,
execution_duration=execution_duration,
param=param,
experiment_type=FILL_DISK,
configuration=configuration,
)
def burn_io(host: str = None,
execution_duration: str = "60",
configuration: Configuration = None):
"""
Increases the Disk I/O operations per second of the virtual machine.
Parameters
----------
host: str
Filter the virtual machines. If the filter is omitted all machines in
the subscription will be selected as potential chaos candidates.
execution_duration : str, optional
Lifetime of the file created. Defaults to 120 seconds.
configuration : Configuration
Chaostoolkit Configuration
secrets : Secrets
Chaostoolkit Secrets
"""
logger.debug(
"Start burn_io: configuration='{}', host='{}'".format(
configuration, host))
param = dict()
param["duration"] = execution_duration
param["host"] = host
return __default_ansible_experiment__(host=host,
execution_duration=execution_duration,
param=param,
experiment_type=BURN_IO,
configuration=configuration,
)
def network_advanced(host: str = None,
execution_duration: str = "60",
command: str = "",
device: str = "eth0",
configuration: Configuration = None):
"""
do a customized operations on the virtual machine via Linux - TC.
For windows, no solution as for now.
Parameters
----------
host: str
Filter the virtual machines. If the filter is omitted all machines in
the subscription will be selected as potential chaos candidates.
execution_duration : str, optional
Lifetime of the file created. Defaults to 60 seconds.
command : str
the tc command, e.g. loss 15%
configuration : Configuration
Chaostoolkit Configuration
secrets : Secrets
Chaostoolkit Secrets
"""
logger.debug(
"Start network_advanced: configuration='{}', host='{}'".format(
configuration, host))
param = dict()
param["duration"] = execution_duration
param["param"] = command
param["device"] = device
param["host"] = host
return __default_ansible_experiment__(host=host,
execution_duration=execution_duration,
param=param,
experiment_type=NETWORK_UTIL,
configuration=configuration,
)
def network_loss(host: str = None,
execution_duration: str = "60",
loss_ratio: str = "5%",
device: str = "eth0",
configuration: Configuration = None):
"""
do a network loss operations on the virtual machine via Linux - TC.
For windows, no solution as for now.
Parameters
----------
host: str
Filter the virtual machines. If the filter is omitted all machines in
the subscription will be selected as potential chaos candidates.
execution_duration : str, optional
Lifetime of the file created. Defaults to 60 seconds.
loss_ratio : str:
loss_ratio = "30%"
configuration : Configuration
Chaostoolkit Configuration
secrets : Secrets
Chaostoolkit Secrets
"""
logger.debug(
"Start network_advanced: configuration='{}', host='{}'".format(
configuration, host))
param = dict()
param["duration"] = execution_duration
param["param"] = "loss " + loss_ratio
param["device"] = device
param["host"] = host
return __default_ansible_experiment__(host=host,
execution_duration=execution_duration,
param=param,
experiment_type=NETWORK_UTIL,
configuration=configuration,
)
def network_corruption(host: str = None,
execution_duration: str = "60",
corruption_ratio: str = "5%",
device: str = "eth0",
configuration: Configuration = None):
"""
do a network loss operations on the virtual machine via Linux - TC.
For windows, no solution as for now.
Parameters
----------
host: str
Filter the virtual machines. If the filter is omitted all machines in
the subscription will be selected as potential chaos candidates.
execution_duration : str, optional
Lifetime of the file created. Defaults to 60 seconds.
corruption_ratio : str:
corruption_ratio = "30%"
configuration : Configuration
Chaostoolkit Configuration
secrets : Secrets
Chaostoolkit Secrets
"""
logger.debug(
"Start network_corruption: configuration='{}', "
"host='{}'".format(configuration, host))
param = dict()
param["duration"] = execution_duration
param["param"] = "corrupt " + corruption_ratio
param["device"] = device
param["host"] = host
return __default_snaible_experiment__(host=host,
execution_duration=execution_duration,
param=param,
experiment_type=NETWORK_UTIL,
configuration=configuration
)
def network_latency(host: str = None,
execution_duration: str = "60",
delay: str = "1000ms",
variance: str = "500ms",
ratio: str = "",
device: str = "eth0",
configuration: Configuration = None):
"""
Increases the response time of the virtual machine.
Parameters
----------
host: str
Filter the virtual machines. If the filter is omitted all machines in
the subscription will be selected as potential chaos candidates.
execution_duration : str, optional
Lifetime of the file created. Defaults to 120 seconds.
delay : str
Added delay in ms. Defaults to 1000ms.
variance : str
Variance of the delay in ms. Defaults to 500ms.
ratio: str = "5%", optional
the specific ratio of how many Variance of the delay in ms.
Defaults to "".
configuration : Configuration
Chaostoolkit Configuration
secrets : Secrets
Chaostoolkit Secrets
"""
logger.debug(
"Start network_latency: configuration='{}', host='{}'".format(
configuration, host))
param = dict()
param["duration"] = execution_duration
param["param"] = "delay " + delay + " " + variance + " " + ratio
param["device"] = device
param["host"] = host
return __default_ansible_experiment__(host=host,
execution_duration=execution_duration,
param=param,
experiment_type=NETWORK_UTIL,
configuration=configuration
)
def killall_processes(host: str = None,
execution_duration: str = "60",
process_name: str = None,
configuration: Configuration = None,
signal: str = ""):
"""
The killall utility kills processes selected by name
refer to https://linux.die.net/man/1/killall
Parameters
----------
host: str
Filter the virtual machines. If the filter is omitted all machines in
the subscription will be selected as potential chaos candidates.
execution_duration : str, optional default to 1 second
This is not technically not useful as the process usually is killed
without and delay, however you can set more seconds here to let the
thread wait for more time to extend your experiment execution in case
you need to watch more on the observation metrics.
process_name : str
Name of the process to be killed
signal : str , default to ""
The signal of killall command, e.g. use -9 to force kill
configuration : Configuration
Chaostoolkit Configuration
secrets : Secrets
Chaostoolkit Secrets
"""
logger.debug(
"Start network_latency: configuration='{}', host='{}'".format(
configuration, host))
param = dict()
param["duration"] = execution_duration
param["process_name"] = process_name
param["signal"] = signal
param["host"] = host
return __default_ansible_experiment__(host=host,
execution_duration=execution_duration,
param=param,
experiment_type=KILLALL_PROCESSES,
configuration=configuration
)
def kill_process(host: str = None,
execution_duration: str = "60",
process: str = None,
configuration: Configuration = None,
signal: str = ""):
"""
kill -s [signal_as_below] [processname]
HUP INT QUIT ILL TRAP ABRT EMT FPE KILL BUS SEGV SYS PIPE ALRM TERM URG
STOP TSTP CONT CHLD TTIN TTOU IO XCPU XFSZ VTALRM PROF WINCH INFO USR1 USR2
Parameters
----------
host: str
Filter the virtual machines. If the filter is omitted all machines in
the subscription will be selected as potential chaos candidates.
execution_duration : str, optional default to 1 second
This is not technically not useful as the process usually is killed
without and delay, however you can set more seconds here to let the
thread wait for more time to extend your experiment execution in case
you need to watch more on the observation metrics.
process : str
pid or process that kill command accepts
signal : str , default to ""
The signal of kill command, use kill -l for help
configuration : Configuration
Chaostoolkit Configuration
secrets : Secrets
Chaostoolkit Secrets
"""
logger.debug(
"Start network_latency: configuration='{}', host='{}'".format(
configuration, host))
param = dict()
param["duration"] = execution_duration
param["process_name"] = process
param["signal"] = signal
param["host"] = host
return __default_ansible_experiment__(host=host,
user="root",
execution_duration=execution_duration,
param=param,
experiment_type="kill_process",
configuration=configuration
)
def shell(host: str = None,
execution_duration: str = "1",
command: str = None,
configuration: Configuration = None,
):
"""
Execute the Ansible shell module
Parameters
----------
host: str
Filter the virtual machines. If the filter is omitted all machines in
the subscription will be selected as potential chaos candidates.
execution_duration : str, optional default to 1 second
This is not technically not useful as the process usually is killed
without and delay, however you can set more seconds here to let the
thread wait for more time to extend your experiment execution in case
you need to watch more on the observation metrics.
command : str
Shell command
configuration : Configuration
Chaostoolkit Configuration
secrets : Secrets
Chaostoolkit Secrets
"""
logger.debug(
"Start execute shell command: configuration='{}', host='{}'".format(
configuration, host))
param = dict()
param["duration"] = execution_duration
param["command"] = command
param["host"] = host
return __shell_ansible_experiment__(host=host,
user="root",
execution_duration=execution_duration,
param=param,
experiment_type=SHELL,
configuration=configuration
)
###############################################################################
# Private helper functions
###############################################################################
def __default_ansible_experiment__(host: str = None,
user: str = "root",
execution_duration: str = "60",
param: dict = None,
experiment_type: str = None,
configuration: Configuration = None,
#user: user
):
#user = configuration['ansible_instance']['user']
if host == None:
raise FailedActivity("No host be found...")
#script
#
script_content = __construct_script_content__(experiment_type, param)
cur_time = str(time.strftime('%Y-%m-%d_%H:%M:%S'))
script_filename = experiment_type + cur_time + ".sh"
script = "/tmp/" + script_filename
file = open(script, 'w+', encoding='UTF-8')
file.write(script_content)
file.close()
client = ansible_api_client()
try:
response = client.run_script(host, user, script)
return response
except Exception as e:
raise FailedActivity(
"failed issuing a execute of shell script via ansible API " + str(e)
)
def __shell_ansible_experiment__(host: str = None,
user: str = "root",
execution_duration: str = "60",
param: dict = None,
experiment_type: str = None,
configuration: Configuration = None,
):
if host == None:
raise FailedActivity("No host be found...")
#shell
command = str(param["command"])
if command == None:
raise FailedActivity("Command not be empty...")
client = ansible_api_client()
try:
response = client.run_cmd(host, user, command)
return response
except Exception as e:
raise FailedActivity(
"failed issuing a execute of shell command via ansible API " + str(e)
)
def __construct_script_content__(experiment_type, parameters):
script_name = experiment_type +".sh"
cmd_param = '\n'.join(['='.join([k, "'"+v+"'"]) for k, v in parameters.items()])
with open(os.path.join(os.path.dirname(__file__),"scripts", script_name)) as file:
script_content = file.read()
# merge duration
script_content = cmd_param + "\n" + script_content
return script_content
``` |
{
"source": "1079278593/TreasureChest",
"score": 2
} |
#### File: kernel-cache-tests/auxkc-kext-bind-to-pageablekc-codeless-kext/test.py
```python
import os
import KernelCollection
# codeless kext's are in the PRELINK_INFO in the pageableKC, but can be a dependency for auxKC kexts
def check(kernel_cache):
kernel_cache.buildKernelCollection("arm64", "/auxkc-kext-bind-to-pageablekc-codeless-kext/main.kc", "/auxkc-kext-bind-to-pageablekc-codeless-kext/main.kernel", None, [], [])
kernel_cache.analyze("/auxkc-kext-bind-to-pageablekc-codeless-kext/main.kc", ["-layout", "-arch", "arm64"])
assert len(kernel_cache.dictionary()["dylibs"]) == 1
assert kernel_cache.dictionary()["dylibs"][0]["name"] == "com.apple.kernel"
# Now build an pageable cache using the baseline kernel collection
kernel_cache.buildPageableKernelCollection("arm64", "/auxkc-kext-bind-to-pageablekc-codeless-kext/pageable.kc", "/auxkc-kext-bind-to-pageablekc-codeless-kext/main.kc", "/auxkc-kext-bind-to-pageablekc-codeless-kext/extensions-pageablekc", ["com.apple.bar", "com.apple.codeless"], [])
kernel_cache.analyze("/auxkc-kext-bind-to-pageablekc-codeless-kext/pageable.kc", ["-layout", "-arch", "arm64"])
assert len(kernel_cache.dictionary()["dylibs"]) == 1
assert kernel_cache.dictionary()["dylibs"][0]["name"] == "com.apple.bar"
# Now build an aux cache using the baseline kernel collection
kernel_cache.buildAuxKernelCollection("arm64", "/auxkc-kext-bind-to-pageablekc-codeless-kext/aux.kc", "/auxkc-kext-bind-to-pageablekc-codeless-kext/main.kc", "/auxkc-kext-bind-to-pageablekc-codeless-kext/pageable.kc", "/auxkc-kext-bind-to-pageablekc-codeless-kext/extensions-auxkc", ["com.apple.foo"], [])
kernel_cache.analyze("/auxkc-kext-bind-to-pageablekc-codeless-kext/aux.kc", ["-layout", "-arch", "arm64"])
# Check the fixups
kernel_cache.analyze("/auxkc-kext-bind-to-pageablekc-codeless-kext/aux.kc", ["-fixups", "-arch", "arm64"])
assert len(kernel_cache.dictionary()["fixups"]) == 1
assert kernel_cache.dictionary()["fixups"]["0x4000"] == "kc(1) + 0x8000"
assert len(kernel_cache.dictionary()["dylibs"]) == 1
assert kernel_cache.dictionary()["dylibs"][0]["name"] == "com.apple.foo"
assert kernel_cache.dictionary()["dylibs"][0]["fixups"] == "none"
# [~]> xcrun -sdk iphoneos cc -arch arm64 -Wl,-static -mkernel -nostdlib -Wl,-add_split_seg_info -Wl,-no_data_const -Wl,-rename_section,__TEXT,__text,__TEXT_EXEC,__text -Wl,-e,__start -Wl,-pagezero_size,0x0 -Wl,-pie main.c -o main.kernel
# [~]> xcrun -sdk iphoneos cc -arch arm64 -Wl,-kext -mkernel -nostdlib -Wl,-add_split_seg_info -Wl,-no_data_const foo.c -o extensions-auxkc/foo.kext/foo
# [~]> xcrun -sdk iphoneos cc -arch arm64 -Wl,-kext -mkernel -nostdlib -Wl,-add_split_seg_info -Wl,-no_data_const bar.c -o extensions-pageablekc/bar.kext/bar
# [~]> rm -r extensions-*/*.kext/*.ld
```
#### File: kernel-cache-tests/auxkc-pageablekc-vtable-patching-namespaces-locals/test.py
```python
import os
import KernelCollection
# This is the same as auxkc-pageablekc-vtable-patching-namespaces
# but this test has all local symbols instead of global symbols
# The kernel has class OSObject and subclass KernelClass
# foo.kext sublclasses KernelClass to get Foo1, and subclasses that to get Foo2
# bar.kext sublclasses Foo1 to get Bar1, and subclasses that to get Bar2
# In KernelClass the vtable layout is:
# [ ..., foo() kernelClassUsed0() ]
# In Foo1, the layout is:
# [ ..., foo() kernelClass_RESERVED0(), foo1Used0(), foo1Used1() ]
# In Foo2, the layout is:
# [ ..., foo() kernelClass_RESERVED0(), foo1Used0(), foo1_RESERVED1(), foo1_RESERVED2(), foo1_RESERVED3() ]
# In Bar1, the layout is:
# [ ..., foo() kernelClass_RESERVED0(), foo1Used0(), foo1_RESERVED1(), foo1_RESERVED2(), foo1_RESERVED3() ]
# In Bar2, the layout is:
# [ ..., foo() kernelClass_RESERVED0(), foo1Used0(), foo1_RESERVED1(), foo1_RESERVED2(), foo1_RESERVED3() ]
# All kext's will end up getting the vtable entry after foo() patched to kernelClassUsed0()
# Foo2, Bar1, Bar2, will also get the vtable entry after foo1Used0() patched to foo1Used1()
def findGlobalSymbolVMAddr(kernel_cache, dylib_index, symbol_name):
for symbol_and_addr in kernel_cache.dictionary()["dylibs"][dylib_index]["global-symbols"]:
if symbol_and_addr["name"] == symbol_name:
return symbol_and_addr["vmAddr"]
return None
def findLocalSymbolVMAddr(kernel_cache, dylib_index, symbol_name):
for symbol_and_addr in kernel_cache.dictionary()["dylibs"][dylib_index]["local-symbols"]:
if symbol_and_addr["name"] == symbol_name:
return symbol_and_addr["vmAddr"]
return None
def findFixupVMAddr(kernel_cache, fixup_name):
for fixup_vmaddr, fixup_target in kernel_cache.dictionary()["fixups"].iteritems():
if fixup_target == fixup_name:
return fixup_vmaddr
return None
def findPagableFixupVMAddr(kernel_cache, dylib_index, fixup_name):
for fixup_vmaddr, fixup_target in kernel_cache.dictionary()["dylibs"][dylib_index]["fixups"].iteritems():
if fixup_target == fixup_name:
return fixup_vmaddr
return None
def findAuxFixupVMAddr(kernel_cache, dylib_index, fixup_name):
for fixup_vmaddr, fixup_target in kernel_cache.dictionary()["dylibs"][dylib_index]["fixups"].iteritems():
if fixup_target == fixup_name:
return fixup_vmaddr
return None
def offsetVMAddr(vmAddr, offset):
het_int = int(vmAddr, 16)
het_int = het_int + offset
return ''.join([ '0x', hex(het_int).upper()[2:] ])
def check(kernel_cache):
enableLogging = False
kernel_cache.buildKernelCollection("x86_64", "/auxkc-pageablekc-vtable-patching-namespaces-locals/main.kc", "/auxkc-pageablekc-vtable-patching-namespaces-locals/main.kernel", "", [], [])
kernel_cache.analyze("/auxkc-pageablekc-vtable-patching-namespaces-locals/main.kc", ["-layout", "-arch", "x86_64"])
assert len(kernel_cache.dictionary()["dylibs"]) == 1
assert kernel_cache.dictionary()["dylibs"][0]["name"] == "com.apple.kernel"
# Get the addresses for the symbols we are looking at. This will make it easier to work out the fixup slots
kernel_cache.analyze("/auxkc-pageablekc-vtable-patching-namespaces-locals/main.kc", ["-symbols", "-arch", "x86_64"])
# From kernel, we want to know where the vtable is, and the foo() and kernelClassUsed0() slots in that vtable
# KernelClass::foo()
kernelClassFooVMAddr = findGlobalSymbolVMAddr(kernel_cache, 0, "__ZN1X11KernelClass3fooEv")
if enableLogging:
print "kernelClassFooVMAddr: " + kernelClassFooVMAddr
# KernelClass::kernelClassUsed0()
kernelClassUsed0VMAddr = findGlobalSymbolVMAddr(kernel_cache, 0, "__ZN1X11KernelClass16kernelClassUsed0Ev")
if enableLogging:
print "kernelClassUsed0VMAddr: " + kernelClassUsed0VMAddr
# Check the fixups
kernel_cache.analyze("/auxkc-pageablekc-vtable-patching-namespaces-locals/main.kc", ["-fixups", "-arch", "x86_64"])
# In vtable for Foo, we match the entry for Foo::foo() by looking for its value on the RHS of the fixup
kernelFooFixupAddr = findFixupVMAddr(kernel_cache, "kc(0) + " + kernelClassFooVMAddr + " : pointer64")
if enableLogging:
print "kernelFooFixupAddr: " + kernelFooFixupAddr
# Then the following fixup should be to KernelClass::kernelClassUsed0()
kernelFooNextFixupAddr = offsetVMAddr(kernelFooFixupAddr, 8)
if enableLogging:
print "kernelFooNextFixupAddr: " + kernelFooNextFixupAddr
assert kernel_cache.dictionary()["fixups"][kernelFooNextFixupAddr] == "kc(0) + " + kernelClassUsed0VMAddr + " : pointer64"
# From this point on, the vmAddr for __ZN1X11KernelClass16kernelClassUsed0Ev is an offset in to kc(0)
# so we want to turn it from a vmAddr to vmOffset by subtracting the base address of 0x4000 which is on __HIB
kernelClassUsed0VMOffset = offsetVMAddr(kernelClassUsed0VMAddr, -0x4000)
if enableLogging:
print "kernelClassUsed0VMOffset: " + kernelClassUsed0VMOffset
# -----------------------------------------------------------
# Now build an pageable cache using the baseline kernel collection
kernel_cache.buildPageableKernelCollection("x86_64", "/auxkc-pageablekc-vtable-patching-namespaces-locals/pageable.kc", "/auxkc-pageablekc-vtable-patching-namespaces-locals/main.kc", "/auxkc-pageablekc-vtable-patching-namespaces-locals/extensions", ["com.apple.foo1", "com.apple.foo2"], [])
kernel_cache.analyze("/auxkc-pageablekc-vtable-patching-namespaces-locals/pageable.kc", ["-layout", "-arch", "x86_64"])
assert len(kernel_cache.dictionary()["dylibs"]) == 2
assert kernel_cache.dictionary()["dylibs"][0]["name"] == "com.apple.foo1"
assert kernel_cache.dictionary()["dylibs"][1]["name"] == "com.apple.foo2"
# Get the addresses for the symbols we are looking at. This will make it easier to work out the fixup slots
kernel_cache.analyze("/auxkc-pageablekc-vtable-patching-namespaces-locals/pageable.kc", ["-symbols", "-arch", "x86_64"])
# From foo1, find the vtable and its override of foo()
# Foo1::foo()
pageableFoo1FooVMAddr = findLocalSymbolVMAddr(kernel_cache, 0, "__ZN1X4Foo13fooEv")
if enableLogging:
print "pageableFoo1FooVMAddr: " + pageableFoo1FooVMAddr
pageableFoo1FooUsed0VMAddr = findLocalSymbolVMAddr(kernel_cache, 0, "__ZN1X4Foo19foo1Used0Ev")
if enableLogging:
print "pageableFoo1FooUsed0VMAddr: " + pageableFoo1FooUsed0VMAddr
pageableFoo1FooUsed1VMAddr = findLocalSymbolVMAddr(kernel_cache, 0, "__ZN1X4Foo19foo1Used1Ev")
if enableLogging:
print "pageableFoo1FooUsed1VMAddr: " + pageableFoo1FooUsed1VMAddr
# From foo2, find the vtable and its override of foo()
# Foo2::foo()
pageableFoo2FooVMAddr = findLocalSymbolVMAddr(kernel_cache, 1, "__ZN1X4Foo23fooEv")
if enableLogging:
print "pageableFoo2FooVMAddr: " + pageableFoo2FooVMAddr
# Also find Foo2::foo1Used0() as it overrides foo1Used0 from the superclass
pageableFoo2FooUsed0VMAddr = findLocalSymbolVMAddr(kernel_cache, 1, "__ZN1X4Foo29foo1Used0Ev")
if enableLogging:
print "pageableFoo2FooUsed0VMAddr: " + pageableFoo2FooUsed0VMAddr
# Check the fixups
kernel_cache.analyze("/auxkc-pageablekc-vtable-patching-namespaces-locals/pageable.kc", ["-fixups", "-arch", "x86_64"])
kernel_cache.dictionary()["fixups"] == "none"
# --- foo1.kext ---
# The vtable we have is [ ..., foo(), kernelClass_RESERVED0(), foo1Used0(), foo1Used1() ]
# and we want [ ..., foo(), kernelClassUsed0(), foo1Used0(), foo1Used1() ]
# In vtable for Foo1, we match the entry for Foo1::foo() by looking for its value on the RHS of the fixup
pageableFoo1FooFixupAddr = findPagableFixupVMAddr(kernel_cache, 0, "kc(1) + " + pageableFoo1FooVMAddr)
if enableLogging:
print "pageableFoo1FooFixupAddr: " + pageableFoo1FooFixupAddr
# Then the following fixup should be to KernelClass::kernelClassUsed0()
pageableFoo1FooNextFixupAddr = offsetVMAddr(pageableFoo1FooFixupAddr, 8)
if enableLogging:
print "pageableFoo1FooNextFixupAddr: " + pageableFoo1FooNextFixupAddr
assert kernel_cache.dictionary()["dylibs"][0]["fixups"][pageableFoo1FooNextFixupAddr] == "kc(0) + " + kernelClassUsed0VMOffset
# Then we should have foo1Used0()
pageableFoo1FooNextFixupAddr = offsetVMAddr(pageableFoo1FooFixupAddr, 16)
if enableLogging:
print "pageableFoo1FooNextFixupAddr: " + pageableFoo1FooNextFixupAddr
assert kernel_cache.dictionary()["dylibs"][0]["fixups"][pageableFoo1FooNextFixupAddr] == "kc(1) + " + pageableFoo1FooUsed0VMAddr
# And then foo1Used1()
pageableFoo1FooNextFixupAddr = offsetVMAddr(pageableFoo1FooFixupAddr, 24)
if enableLogging:
print "pageableFoo1FooNextFixupAddr: " + pageableFoo1FooNextFixupAddr
assert kernel_cache.dictionary()["dylibs"][0]["fixups"][pageableFoo1FooNextFixupAddr] == "kc(1) + " + pageableFoo1FooUsed1VMAddr
# --- foo2.kext ---
# The vtable we have is [ ..., foo(), kernelClass_RESERVED0(), foo1Used0(), foo1_RESERVED1() ]
# and we want [ ..., foo(), kernelClassUsed0(), foo1Used0(), foo1Used1() ]
# In vtable for Foo2, we match the entry for Foo2::foo() by looking for its value on the RHS of the fixup
pageableFoo2FooFixupAddr = findPagableFixupVMAddr(kernel_cache, 1, "kc(1) + " + pageableFoo2FooVMAddr)
if enableLogging:
print "pageableFoo2FooFixupAddr: " + pageableFoo2FooFixupAddr
# Then the following fixup should be to KernelClass::kernelClassUsed0()
pageableFoo2FooNextFixupAddr = offsetVMAddr(pageableFoo2FooFixupAddr, 8)
if enableLogging:
print "pageableFoo2FooNextFixupAddr: " + pageableFoo2FooNextFixupAddr
assert kernel_cache.dictionary()["dylibs"][1]["fixups"][pageableFoo2FooNextFixupAddr] == "kc(0) + " + kernelClassUsed0VMOffset
# Then we should have foo1Used0(), but Foo2 overrides that, so it should be the Foo2 implementation, not the Foo1 implementation
pageableFoo2FooNextFixupAddr = offsetVMAddr(pageableFoo2FooFixupAddr, 16)
if enableLogging:
print "pageableFoo2FooNextFixupAddr: " + pageableFoo2FooNextFixupAddr
assert kernel_cache.dictionary()["dylibs"][1]["fixups"][pageableFoo2FooNextFixupAddr] == "kc(1) + " + pageableFoo2FooUsed0VMAddr
# And then foo1Used1()
pageableFoo2FooNextFixupAddr = offsetVMAddr(pageableFoo2FooFixupAddr, 24)
if enableLogging:
print "pageableFoo2FooNextFixupAddr: " + pageableFoo2FooNextFixupAddr
assert kernel_cache.dictionary()["dylibs"][1]["fixups"][pageableFoo2FooNextFixupAddr] == "kc(1) + " + pageableFoo1FooUsed1VMAddr
# -----------------------------------------------------------
# Now build an aux cache using the baseline kernel collection
kernel_cache.buildAuxKernelCollection("x86_64", "/auxkc-pageablekc-vtable-patching-namespaces-locals/aux.kc", "/auxkc-pageablekc-vtable-patching-namespaces-locals/main.kc", "/auxkc-pageablekc-vtable-patching-namespaces-locals/pageable.kc", "/auxkc-pageablekc-vtable-patching-namespaces-locals/extensions", ["com.apple.bar1", "com.apple.bar2"], [])
kernel_cache.analyze("/auxkc-pageablekc-vtable-patching-namespaces-locals/aux.kc", ["-layout", "-arch", "x86_64"])
assert len(kernel_cache.dictionary()["dylibs"]) == 2
assert kernel_cache.dictionary()["dylibs"][0]["name"] == "com.apple.bar1"
assert kernel_cache.dictionary()["dylibs"][1]["name"] == "com.apple.bar2"
# Get the addresses for the symbols we are looking at. This will make it easier to work out the fixup slots
kernel_cache.analyze("/auxkc-pageablekc-vtable-patching-namespaces-locals/aux.kc", ["-symbols", "-arch", "x86_64"])
# From bar1, find the vtable and its override of foo()
# Bar1::foo()
auxBar1FooVMAddr = findLocalSymbolVMAddr(kernel_cache, 0, "__ZN1X4Bar13fooEv")
if enableLogging:
print "auxBar1FooVMAddr: " + auxBar1FooVMAddr
# From bar2, find the vtable and its override of foo()
# Bar1::foo()
auxBar2FooVMAddr = findLocalSymbolVMAddr(kernel_cache, 1, "__ZN4Bar23fooEv")
if enableLogging:
print "auxBar2FooVMAddr: " + auxBar2FooVMAddr
# Check the fixups
kernel_cache.analyze("/auxkc-pageablekc-vtable-patching-namespaces-locals/aux.kc", ["-fixups", "-arch", "x86_64"])
# --- foo1.kext ---
# The vtable we have is [ ..., foo(), kernelClass_RESERVED0(), foo1Used0(), foo1Used1() ]
# and we want [ ..., foo(), kernelClassUsed0(), foo1Used0(), foo1Used1() ]
# In vtable for Bar1, we match the entry for Bar1::foo() by looking for its value on the RHS of the fixup
auxBar1FooFixupAddr = findAuxFixupVMAddr(kernel_cache, 0, "kc(3) + " + auxBar1FooVMAddr)
if enableLogging:
print "auxBar1FooFixupAddr: " + auxBar1FooFixupAddr
# Then the following fixup should be to KernelClass::kernelClassUsed0()
auxBar1FooNextFixupAddr = offsetVMAddr(auxBar1FooFixupAddr, 8)
if enableLogging:
print "auxBar1FooNextFixupAddr: " + auxBar1FooNextFixupAddr
assert kernel_cache.dictionary()["dylibs"][0]["fixups"][auxBar1FooNextFixupAddr] == "kc(0) + " + kernelClassUsed0VMOffset
# Then we should have foo1Used0() from Foo2 as it overrides it from Foo1
auxBar1FooNextFixupAddr = offsetVMAddr(auxBar1FooFixupAddr, 16)
if enableLogging:
print "auxBar1FooNextFixupAddr: " + auxBar1FooNextFixupAddr
assert kernel_cache.dictionary()["dylibs"][0]["fixups"][auxBar1FooNextFixupAddr] == "kc(1) + " + pageableFoo2FooUsed0VMAddr
# And then foo1Used1()
auxBar1FooNextFixupAddr = offsetVMAddr(auxBar1FooFixupAddr, 24)
if enableLogging:
print "auxBar1FooNextFixupAddr: " + auxBar1FooNextFixupAddr
assert kernel_cache.dictionary()["dylibs"][0]["fixups"][auxBar1FooNextFixupAddr] == "kc(1) + " + pageableFoo1FooUsed1VMAddr
# --- bar2.kext ---
# The vtable we have is [ ..., foo(), kernelClass_RESERVED0(), foo1Used0(), foo1_RESERVED1() ]
# and we want [ ..., foo(), kernelClassUsed0(), foo1Used0(), foo1Used1() ]
# In vtable for Foo2, we match the entry for Foo2::foo() by looking for its value on the RHS of the fixup
auxBar2FooFixupAddr = findAuxFixupVMAddr(kernel_cache, 1, "kc(3) + " + auxBar2FooVMAddr)
if enableLogging:
print "auxBar2FooFixupAddr: " + auxBar2FooFixupAddr
# Then the following fixup should be to KernelClass::kernelClassUsed0()
auxBar2FooNextFixupAddr = offsetVMAddr(auxBar2FooFixupAddr, 8)
if enableLogging:
print "auxBar2FooNextFixupAddr: " + auxBar2FooNextFixupAddr
assert kernel_cache.dictionary()["dylibs"][1]["fixups"][auxBar2FooNextFixupAddr] == "kc(0) + " + kernelClassUsed0VMOffset
# Then we should have foo1Used0() from Foo2 as it overrides it from Foo1
auxBar2FooNextFixupAddr = offsetVMAddr(auxBar2FooFixupAddr, 16)
if enableLogging:
print "auxBar2FooNextFixupAddr: " + auxBar2FooNextFixupAddr
assert kernel_cache.dictionary()["dylibs"][1]["fixups"][auxBar2FooNextFixupAddr] == "kc(1) + " + pageableFoo2FooUsed0VMAddr
# And then foo1Used1()
auxBar2FooNextFixupAddr = offsetVMAddr(auxBar2FooFixupAddr, 24)
if enableLogging:
print "auxBar2FooNextFixupAddr: " + auxBar2FooNextFixupAddr
assert kernel_cache.dictionary()["dylibs"][1]["fixups"][auxBar2FooNextFixupAddr] == "kc(1) + " + pageableFoo1FooUsed1VMAddr
# [~]> xcrun -sdk macosx.internal cc -arch x86_64 -Wl,-static -mkernel -nostdlib -Wl,-e,__start -Wl,-pie main.cpp kernel.cpp -Wl,-pagezero_size,0x0 -o main.kernel -Wl,-image_base,0x10000 -Wl,-segaddr,__HIB,0x4000 -Wl,-add_split_seg_info -Wl,-install_name,/usr/lib/swift/split.seg.v2.hack -iwithsysroot /System/Library/Frameworks/Kernel.framework/Headers -Wl,-sectcreate,__LINKINFO,__symbolsets,SymbolSets.plist -Wl,-segprot,__LINKINFO,r--,r-- -std=c++11 -DKERNEL_USED=1 -Wl,-exported_symbol,__ZN1X11KernelClass10gMetaClassE -Wl,-exported_symbol,__ZN8OSObject10gMetaClassE -Wl,-exported_symbol,__ZNK11OSMetaClass19instanceConstructedEv
# [~]> xcrun -sdk macosx.internal cc -arch x86_64 -Wl,-kext -mkernel -nostdlib -Wl,-add_split_seg_info -Wl,-no_data_const foo1.cpp -o extensions/foo1.kext/foo1 -iwithsysroot /System/Library/Frameworks/Kernel.framework/Headers -std=c++11 -DFOO1_USED0=1 -DFOO1_USED1=1 -Wl,-exported_symbol,__ZN1X4Foo110gMetaClassE -Wl,-exported_symbol,__ZN1X4Foo1C2EPK11OSMetaClass -Wl,-exported_symbol,__ZTVN1X4Foo1E -Wl,-exported_symbol,__ZN1X4Foo1D2Ev
# [~]> xcrun -sdk macosx.internal cc -arch x86_64 -Wl,-kext -mkernel -nostdlib -Wl,-add_split_seg_info -Wl,-no_data_const foo2.cpp -o extensions/foo2.kext/foo2 -iwithsysroot /System/Library/Frameworks/Kernel.framework/Headers -std=c++11 -DFOO1_USED0=1 -Wl,-exported_symbol,__ZN1X4Foo210gMetaClassE -Wl,-exported_symbol,__ZN1X4Foo2C2EPK11OSMetaClass -Wl,-exported_symbol,__ZTVN1X4Foo2E -Wl,-exported_symbol,__ZN1X4Foo2D2Ev
# [~]> xcrun -sdk macosx.internal cc -arch x86_64 -Wl,-kext -mkernel -nostdlib -Wl,-add_split_seg_info -Wl,-data_const bar1.cpp -o extensions/bar1.kext/bar1 -iwithsysroot /System/Library/Frameworks/Kernel.framework/Headers -std=c++11 -DFOO1_USED0=1 -Wl,-exported_symbol,__ZN1X4Bar110gMetaClassE -Wl,-exported_symbol,__ZN1X4Bar1C2EPK11OSMetaClass -Wl,-exported_symbol,__ZTVN1X4Bar1E -Wl,-exported_symbol,__ZN1X4Bar1D2Ev
# [~]> xcrun -sdk macosx.internal cc -arch x86_64 -Wl,-kext -mkernel -nostdlib -Wl,-add_split_seg_info -Wl,-data_const bar2.cpp -o extensions/bar2.kext/bar2 -iwithsysroot /System/Library/Frameworks/Kernel.framework/Headers -std=c++11 -DFOO1_USED0=1
```
#### File: kernel-cache-tests/kernel-vtable-patching-error/test.py
```python
import os
import KernelCollection
# Bar has a superclass in Foo, but we don't export that symbol. This causes the vtable patcher to fail
def check(kernel_cache):
kernel_cache.buildKernelCollection("x86_64", "/kernel-vtable-patching-error/main.kc", "/kernel-vtable-patching-error/main.kernel", "/kernel-vtable-patching-error/extensions", ["com.apple.bar"], ["-json-errors"])
assert len(kernel_cache.dictionary()) == 1
# bar
assert kernel_cache.dictionary()[0]["id"] == "com.apple.bar"
assert len(kernel_cache.dictionary()[0]["errors"]) == 1
assert kernel_cache.dictionary()[0]["errors"][0] == "Cannot find symbol for metaclass pointed to by '__ZN3Bar10superClassE'. Expected symbol '__ZN3Foo10gMetaClassE' to be defined in another kext"
# [~]> xcrun -sdk macosx.internal cc -arch x86_64 -Wl,-static -mkernel -nostdlib -Wl,-e,__start -Wl,-pie main.cpp -Wl,-pagezero_size,0x0 -o main.kernel -Wl,-image_base,0x10000 -Wl,-segaddr,__HIB,0x4000 -Wl,-add_split_seg_info -Wl,-install_name,/usr/lib/swift/split.seg.v2.hack -iwithsysroot /System/Library/Frameworks/Kernel.framework/Headers -Wl,-sectcreate,__LINKINFO,__symbolsets,SymbolSets.plist -Wl,-segprot,__LINKINFO,r--,r-- -DFOO_USED=1
# [~]> xcrun -sdk macosx.internal cc -arch x86_64 -Wl,-kext -mkernel -nostdlib -Wl,-add_split_seg_info -Wl,-no_data_const bar.cpp -o extensions/bar.kext/bar -iwithsysroot /System/Library/Frameworks/Kernel.framework/Headers
# [~]> rm -r extensions/*.kext/*.ld
```
#### File: kernel-cache-tests/kext-bind-missing-dep/test.py
```python
import os
import KernelCollection
# Check that we get sensible errors on the bad kext
def check(kernel_cache):
kernel_cache.buildKernelCollection("arm64", "/kext-bind-missing-dep/main.kc", "/kext-bind-missing-dep/main.kernel", "/kext-bind-missing-dep/extensions", ["com.apple.foo"], ["-json-errors"])
assert len(kernel_cache.dictionary()) == 1
assert kernel_cache.dictionary()[0]["id"] == "com.apple.foo"
assert len(kernel_cache.dictionary()[0]["errors"]) == 1
assert kernel_cache.dictionary()[0]["errors"][0] == "Failed to bind '_bar' as could not find a kext with 'com.apple.bar' bundle-id"
# [~]> xcrun -sdk iphoneos.internal cc -arch arm64 -Wl,-static -mkernel -nostdlib -Wl,-add_split_seg_info -Wl,-rename_section,__TEXT,__text,__TEXT_EXEC,__text -Wl,-e,__start -Wl,-pagezero_size,0x0 -Wl,-pie main.c -o main.kernel
# [~]> xcrun -sdk iphoneos.internal cc -arch arm64 -Wl,-kext -mkernel -nostdlib -Wl,-add_split_seg_info -Wl,-no_data_const foo.c -o extensions/foo.kext/foo
# [~]> rm -r extensions/*.kext/*.ld
```
#### File: kernel-cache-tests/kext-missing-weak-bind/test.py
```python
import os
import KernelCollection
# Check that weak binds can be missing, so long as we check for the magic symbol
def check(kernel_cache):
kernel_cache.buildKernelCollection("arm64", "/kext-missing-weak-bind/main.kc", "/kext-missing-weak-bind/main.kernel", "/kext-missing-weak-bind/extensions", ["com.apple.foo", "com.apple.bar"], [])
kernel_cache.analyze("/kext-missing-weak-bind/main.kc", ["-layout", "-arch", "arm64"])
assert kernel_cache.dictionary()["cache-segments"][3]["name"] == "__DATA_CONST"
assert kernel_cache.dictionary()["cache-segments"][3]["vmAddr"] == "0x18000"
assert len(kernel_cache.dictionary()["dylibs"]) == 3
assert kernel_cache.dictionary()["dylibs"][0]["name"] == "com.apple.kernel"
assert kernel_cache.dictionary()["dylibs"][1]["name"] == "com.apple.bar"
assert kernel_cache.dictionary()["dylibs"][2]["name"] == "com.apple.foo"
# Symbols
kernel_cache.analyze("/kext-missing-weak-bind/main.kc", ["-symbols", "-arch", "arm64"])
# kernel
assert kernel_cache.dictionary()["dylibs"][0]["name"] == "com.apple.kernel"
assert kernel_cache.dictionary()["dylibs"][0]["global-symbols"][2]["name"] == "_gOSKextUnresolved"
assert kernel_cache.dictionary()["dylibs"][0]["global-symbols"][2]["vmAddr"] == "0x20000"
# Check the fixups
kernel_cache.analyze("/kext-missing-weak-bind/main.kc", ["-fixups", "-arch", "arm64"])
assert len(kernel_cache.dictionary()["fixups"]) == 4
assert kernel_cache.dictionary()["fixups"]["0x18000"] == "kc(0) + 0x20000"
assert kernel_cache.dictionary()["fixups"]["0x18008"] == "kc(0) + 0x20000"
assert kernel_cache.dictionary()["fixups"]["0x18010"] == "kc(0) + 0x20000"
assert kernel_cache.dictionary()["fixups"]["0x18018"] == "kc(0) + 0x20000"
assert len(kernel_cache.dictionary()["dylibs"]) == 3
assert kernel_cache.dictionary()["dylibs"][0]["name"] == "com.apple.kernel"
assert kernel_cache.dictionary()["dylibs"][0]["fixups"] == "none"
assert kernel_cache.dictionary()["dylibs"][1]["name"] == "com.apple.bar"
assert kernel_cache.dictionary()["dylibs"][1]["fixups"] == "none"
assert kernel_cache.dictionary()["dylibs"][2]["name"] == "com.apple.foo"
assert kernel_cache.dictionary()["dylibs"][2]["fixups"] == "none"
# [~]> xcrun -sdk iphoneos.internal cc -arch arm64 -Wl,-static -mkernel -nostdlib -Wl,-add_split_seg_info -Wl,-rename_section,__TEXT,__text,__TEXT_EXEC,__text -Wl,-e,__start -Wl,-pagezero_size,0x0 -Wl,-pie -Wl,-sectcreate,__LINKINFO,__symbolsets,SymbolSets.plist -Wl,-segprot,__LINKINFO,r--,r-- main.c -o main.kernel
# [~]> xcrun -sdk iphoneos.internal cc -arch arm64 -Wl,-kext -mkernel -nostdlib -Wl,-add_split_seg_info foo.c -o extensions/foo.kext/foo
# [~]> xcrun -sdk iphoneos.internal cc -arch arm64 -Wl,-kext -mkernel -nostdlib -Wl,-add_split_seg_info bar.c -o extensions/bar.kext/bar -Wl,-fixup_chains
# [~]> rm -r extensions/*.kext/*.ld
```
#### File: kernel-cache-tests/kmod-info-errors/test.py
```python
import os
import KernelCollection
# Error for bad kmod info
def check(kernel_cache):
kernel_cache.buildKernelCollection("x86_64", "/kmod-info-errors/main.kc", "/kmod-info-errors/main.kernel", "/kmod-info-errors/extensions", ["com.apple.foo", "com.apple.bar"], ["-json-errors"])
assert len(kernel_cache.dictionary()) == 2
# bar
assert kernel_cache.dictionary()[0]["id"] == "com.apple.bar"
assert len(kernel_cache.dictionary()[0]["errors"]) == 1
assert kernel_cache.dictionary()[0]["errors"][0] == "unsupported kmod_info version of 2"
# foo
assert kernel_cache.dictionary()[1]["id"] == "com.apple.foo"
assert len(kernel_cache.dictionary()[1]["errors"]) == 1
assert kernel_cache.dictionary()[1]["errors"][0] == "unsupported kmod_info version of 2"
# [~]> xcrun -sdk macosx.internal cc -arch x86_64 -Wl,-static -mkernel -nostdlib -Wl,-e,__start -Wl,-pie main.c -Wl,-pagezero_size,0x0 -o main.kernel -Wl,-image_base,0x200000 -Wl,-segaddr,__HIB,0x100000 -Wl,-add_split_seg_info -Wl,-install_name,/usr/lib/swift/split.seg.v2.hack
# [~]> xcrun -sdk macosx.internal cc -arch x86_64 -Wl,-kext -mkernel -nostdlib -Wl,-add_split_seg_info foo.c -o extensions/foo.kext/foo
# [~]> xcrun -sdk macosx.internal cc -arch x86_64 -Wl,-kext -mkernel -nostdlib -Wl,-add_split_seg_info bar.c -o extensions/bar.kext/bar
# [~]> rm -r extensions/*.kext/*.ld
```
#### File: kernel-cache-tests/large-auxkc-errors/test.py
```python
import os
import KernelCollection
# The arm64e auxKC has a lower memory limit than other KCs. Verify that we get an error with only 64MB in there.
def check(kernel_cache):
# First build a kernel collection
kernel_cache.buildKernelCollection("arm64e", "/large-auxkc-errors/main.kc", "/large-auxkc-errors/main.kernel", "/large-auxkc-errors/extensions", [], [])
kernel_cache.analyze("/large-auxkc-errors/main.kc", ["-layout", "-arch", "arm64e"])
# Now build an aux cache using the baseline kernel collection
kernel_cache.buildAuxKernelCollection("arm64e", "/large-auxkc-errors/aux.kc", "/large-auxkc-errors/main.kc", "", "/large-auxkc-errors/extensions", ["com.apple.foo", "com.apple.bar"], ["-json-errors"])
assert len(kernel_cache.dictionary()) == 1
assert "kernel collection size exceeds maximum size of 67108864" in kernel_cache.dictionary()[0]
# [~]> xcrun -sdk iphoneos.internal cc -arch arm64e -Wl,-static -mkernel -nostdlib -Wl,-add_split_seg_info -Wl,-rename_section,__TEXT,__text,__TEXT_EXEC,__text -Wl,-e,__start -Wl,-pagezero_size,0x0 -Wl,-pie main.c -o main.kernel
# [~]> xcrun -sdk iphoneos.internal cc -arch arm64e -Wl,-kext -mkernel -nostdlib -Wl,-add_split_seg_info -Wl,-no_data_const foo.c -o extensions/foo.kext/foo
# [~]> xcrun -sdk iphoneos.internal cc -arch arm64e -Wl,-kext -mkernel -nostdlib -Wl,-add_split_seg_info -Wl,-no_data_const bar.c -o extensions/bar.kext/bar
# [~]> rm -r extensions/*.kext/*.ld
```
#### File: kernel-cache-tests/pageablekc-uuid/test.py
```python
import os
import KernelCollection
# Verify that an pageableKC references the UUID of the base KC
def check(kernel_cache):
# First build a kernel collection
kernel_cache.buildKernelCollection("arm64", "/pageablekc-uuid/main.kc", "/pageablekc-uuid/main.kernel", "/pageablekc-uuid/extensions", [], [])
kernel_cache.analyze("/pageablekc-uuid/main.kc", ["-layout", "-arch", "arm64"])
# Check the kernel UUID
kernel_cache.analyze("/pageablekc-uuid/main.kc", ["-uuid", "-arch", "arm64"])
kernelUUID = kernel_cache.dictionary()["uuid"]
assert kernelUUID != "00000000-0000-0000-0000-000000000000"
assert kernelUUID == kernel_cache.dictionary()["prelink-info-uuid"]
# Now build an pageable cache using the baseline kernel collection
kernel_cache.buildPageableKernelCollection("arm64", "/pageablekc-uuid/pageable.kc", "/pageablekc-uuid/main.kc", "/pageablekc-uuid/extensions", ["com.apple.foo", "com.apple.bar"], [])
# Check the pageable UUID
kernel_cache.analyze("/pageablekc-uuid/pageable.kc", ["-uuid", "-arch", "arm64"])
assert kernel_cache.dictionary()["uuid"] != "00000000-0000-0000-0000-000000000000"
assert kernel_cache.dictionary()["prelink-info-base-uuid"] == kernelUUID
# [~]> xcrun -sdk iphoneos.internal cc -arch arm64 -Wl,-static -mkernel -nostdlib -Wl,-add_split_seg_info -Wl,-rename_section,__TEXT,__text,__TEXT_EXEC,__text -Wl,-e,__start -Wl,-pagezero_size,0x0 -Wl,-pie main.c -o main.kernel
# [~]> xcrun -sdk iphoneos.internal cc -arch arm64 -Wl,-kext -mkernel -nostdlib -Wl,-add_split_seg_info foo.c -o extensions/foo.kext/foo
# [~]> xcrun -sdk iphoneos.internal cc -arch arm64 -Wl,-kext -mkernel -nostdlib -Wl,-add_split_seg_info bar.c -o extensions/bar.kext/bar
# [~]> rm -r extensions/*.kext/*.ld
```
#### File: kernel-cache-tests/pageablekc-vtable-patching/test.py
```python
import os
import KernelCollection
# This verifies that a kext can patch vtables against another kext
# We put foo.kext in the base KC so that the patch slot in bar.kext has to know to use the correct fixup level in the fixup chain
def check(kernel_cache):
kernel_cache.buildKernelCollection("x86_64", "/pageablekc-vtable-patching/main.kc", "/pageablekc-vtable-patching/main.kernel", "/pageablekc-vtable-patching/extensions", ["com.apple.foo"], [])
kernel_cache.analyze("/pageablekc-vtable-patching/main.kc", ["-layout", "-arch", "x86_64"])
assert len(kernel_cache.dictionary()["dylibs"]) == 2
assert kernel_cache.dictionary()["dylibs"][0]["name"] == "com.apple.kernel"
assert kernel_cache.dictionary()["dylibs"][1]["name"] == "com.apple.foo"
# Get the addresses for the symbols we are looking at. This will make it easier to work out the fixup slots
kernel_cache.analyze("/pageablekc-vtable-patching/main.kc", ["-symbols", "-arch", "x86_64"])
# From foo, we want to know where the vtable is, and the foo() and fooUsed0() slots in that vtable
# Foo::foo()
assert kernel_cache.dictionary()["dylibs"][1]["global-symbols"][6]["name"] == "__ZN3Foo3fooEv"
assert kernel_cache.dictionary()["dylibs"][1]["global-symbols"][6]["vmAddr"] == "0x16ED0"
# Foo::fooUsed0()
assert kernel_cache.dictionary()["dylibs"][1]["global-symbols"][7]["name"] == "__ZN3Foo8fooUsed0Ev"
assert kernel_cache.dictionary()["dylibs"][1]["global-symbols"][7]["vmAddr"] == "0x16EF0"
# Check the fixups
kernel_cache.analyze("/pageablekc-vtable-patching/main.kc", ["-fixups", "-arch", "x86_64"])
# In vtable for Foo, we match the entry for Foo::foo() by looking for its value on the RHS of the fixup
assert kernel_cache.dictionary()["fixups"]["0x1D150"] == "kc(0) + 0x16ED0"
# Then the following fixup should be to Foo::fooUsed0()
assert kernel_cache.dictionary()["fixups"]["0x1D158"] == "kc(0) + 0x16EF0"
# -----------------------------------------------------------
# Now build an pageable cache using the baseline kernel collection
kernel_cache.buildPageableKernelCollection("x86_64", "/pageablekc-vtable-patching/pageable.kc", "/pageablekc-vtable-patching/main.kc", "/pageablekc-vtable-patching/extensions", ["com.apple.bar"], [])
kernel_cache.analyze("/pageablekc-vtable-patching/pageable.kc", ["-layout", "-arch", "x86_64"])
assert len(kernel_cache.dictionary()["dylibs"]) == 1
assert kernel_cache.dictionary()["dylibs"][0]["name"] == "com.apple.bar"
# Get the addresses for the symbols we are looking at. This will make it easier to work out the fixup slots
kernel_cache.analyze("/pageablekc-vtable-patching/pageable.kc", ["-symbols", "-arch", "x86_64"])
# From bar, find the vtable and its override of foo()
# Bar::foo()
assert kernel_cache.dictionary()["dylibs"][0]["global-symbols"][3]["name"] == "__ZN3Bar3fooEv"
assert kernel_cache.dictionary()["dylibs"][0]["global-symbols"][3]["vmAddr"] == "0x4F10"
# Check the fixups
kernel_cache.analyze("/pageablekc-vtable-patching/pageable.kc", ["-fixups", "-arch", "x86_64"])
# In bar, again match the entry for its Bar::foo() symbol
assert kernel_cache.dictionary()["dylibs"][0]["fixups"]["0x8150"] == "kc(1) + 0x4F10"
# And if the patching was correct, then following entry should be to Foo::fooUsed0()
assert kernel_cache.dictionary()["dylibs"][0]["fixups"]["0x8158"] == "kc(0) + 0x12EF0"
# [~]> xcrun -sdk macosx.internal cc -arch x86_64 -Wl,-static -mkernel -nostdlib -Wl,-e,__start -Wl,-pie main.cpp -Wl,-pagezero_size,0x0 -o main.kernel -Wl,-image_base,0x10000 -Wl,-segaddr,__HIB,0x4000 -Wl,-add_split_seg_info -Wl,-install_name,/usr/lib/swift/split.seg.v2.hack -iwithsysroot /System/Library/Frameworks/Kernel.framework/Headers -Wl,-sectcreate,__LINKINFO,__symbolsets,SymbolSets.plist -Wl,-segprot,__LINKINFO,r--,r--
# [~]> xcrun -sdk macosx.internal cc -arch x86_64 -Wl,-kext -mkernel -nostdlib -Wl,-add_split_seg_info -Wl,-no_data_const foo.cpp -o extensions/foo.kext/foo -iwithsysroot /System/Library/Frameworks/Kernel.framework/Headers -DFOO_USED=1
# [~]> xcrun -sdk macosx.internal cc -arch x86_64 -Wl,-kext -mkernel -nostdlib -Wl,-add_split_seg_info -Wl,-data_const bar.cpp -o extensions/bar.kext/bar -iwithsysroot /System/Library/Frameworks/Kernel.framework/Headers
# [~]> rm -r extensions/*.kext/*.ld
``` |
{
"source": "1081/pymapdl",
"score": 3
} |
#### File: docker/mapdl/reduce_mapdl.py
```python
import shutil
import math
import stat
import time
import os
import numpy as np
source_directory = '/ansys_inc/v211/'
def convert_size(size_bytes):
if size_bytes == 0:
return "0B"
size_name = ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB")
i = int(math.floor(math.log(size_bytes, 1024)))
p = math.pow(1024, i)
s = round(size_bytes / p, 2)
return "%s %s" % (s, size_name[i])
cutoff = 1607646816.3588207
total_size = 0
times = []
recent_files = []
for root, dirs, files in os.walk(source_directory, topdown=False):
for name in files:
if os.path.isfile(os.path.join(root, name)):
stats = os.stat(os.path.join(root, name))
# accesstime = time.ctime(stats[stat.ST_ATIME])
accesstime = stats[stat.ST_ATIME]
times.append(accesstime)
if accesstime > cutoff:
filepath = os.path.join(root, name)
# print('%40s %s' % (name, time.ctime(accesstime)))
recent_files.append((filepath, name))
total_size += os.path.getsize(filepath)
times = np.array(times)
# copy files over to a new directory while sorting by file size (for
# cool printouts!)
masked_times = times[times > cutoff]
for i in np.argsort(masked_times):
accesstime = masked_times[i]
print(recent_files[i][0])
# print('%40s %s' % (recent_files[i][1], time.ctime(accesstime)))
dest = recent_files[i][0].replace('/v211/', '/v211_docker/')
os.makedirs(os.path.dirname(dest), exist_ok=True)
shutil.copy(recent_files[i][0], dest)
print(np.sum(times > cutoff), 'out of', len(times))
print(convert_size(total_size))
``` |
{
"source": "1082sqnatc/missionspacelab2019",
"score": 3
} |
#### File: missionspacelab2019/src/dayornight.py
```python
from PIL import Image
from PIL import ImageOps
import numpy
import time
import cv2
# Calculates the average brightness of a 200x200 image, sampling every 10 pixels for speed
def calcBrightness(img):
x = 0
y = 0
c_brightness = 0;
count = 0
while x < 200 and y < 200:
pixelBGR = img.getpixel((x,y))
B, G, R = pixelBGR
c_brightness = (c_brightness + ((B+G+R) / 3))
if x == 199:
y = y + 10
x = 0
else:
x = x + 10
count = count + 1
print("total brightness: " + str(c_brightness) + ", sample count: " + str(count))
return c_brightness / count
# Detects a good day vs night image of a portal
# Eliminates Night, and sunrise/sunset 'flared' images
def isDay(image):
#img = open(imageFile, 'rb')
#with Image.open(img) as image:
width, height = image.size # Determins width and height of image and stores it
print("Width =", width, "\n Height =", height) # Prints width and height
total_pixels = width * height # Calculates the total number of pixels in the image
print("Total Pixels =", total_pixels) # Prints total pixels
c_width = width / 2 # Calculates middle of x-axis
c_height = height / 2 # Calculates middle of y-axis
store_total = 40000 # Stores total pixels of the crop area
#img_cv2 = cv2.imread(imageFile)
border_tl = (0, 0, 200, 200) # Crops image to top left 200x200
border_tr = ((width-200), 0, width, 200) # Crops image to top right 200x200
border_bl = (0, (height-200), 200, height) # Crops image to botom left 200x200
border_br = ((width-200), (height-200), width, height) # Crops image to bottom right 200x200
border_ct = ((c_width-100), (c_height-100), (c_width+100), (c_height+100)) # Crops image to center 200x200
Top_Left = image.crop(border_tl) # " "
Top_Right = image.crop(border_tr) # " "
Bottom_Left = image.crop(border_bl) # " "
Bottom_Right = image.crop(border_br) # " "
Center_Data = image.crop(border_ct) # " "
print("pixels in tl: " + str(Top_Left.size))
print("pixels in tr: " + str(Top_Right.size))
print("pixels in bl: " + str(Bottom_Left.size))
print("pixels in br: " + str(Bottom_Right.size))
print("pixels in ct: " + str(Center_Data.size))
#Top_Left = numpy.array(Top_Left)
#Top_Right = numpy.array(Top_Right)
#Bottom_Left = numpy.array(Bottom_Left)
#Bottom_Right = numpy.array(Bottom_Right)
#Center_Data = numpy.array(Center_Data)
#Top_Left = cv2.cvtColor(Top_Left, cv2.COLOR_RGB2BGR)
#Top_Right = cv2.cvtColor(Top_Right, cv2.COLOR_RGB2BGR)
#Bottom_Left = cv2.cvtColor(Bottom_Left, cv2.COLOR_RGB2BGR)
#Bottom_Right = cv2.cvtColor(Bottom_Right, cv2.COLOR_RGB2BGR)
#Center_Data = cv2.cvtColor(Center_Data, cv2.COLOR_RGB2BGR)
c_brightness = calcBrightness(Center_Data)
tl_brightness = calcBrightness(Top_Left)
tr_brightness = calcBrightness(Top_Right)
bl_brightness = calcBrightness(Bottom_Left)
br_brightness = calcBrightness(Bottom_Right)
avg = (tl_brightness + tr_brightness + bl_brightness + br_brightness) / 4
rg = c_brightness - avg
print("Centre vs border diff: " + str(rg))
# Our testing shows a rg of 40 is a good analog for day vs night/flared pictures
if rg > 40:
return True
else:
return False
```
#### File: missionspacelab2019/src/stitcher.py
```python
import numpy as np
import imutils
import cv2
import math
# Custom absolute function
def myfabs(x):
if x < 0:
return x*-1
else :
return x
# Heavily customised CV2 based stitcher algorithm, tuned to suit images with transparency
# Will also cut the resultant 'black' border from the joined image
def stitch(images, ratio=0.75, reprojThresh=4.0,
showMatches=False):
(imageB, imageA) = images
(kpsA, featuresA) = detectAndDescribe(imageA)
(kpsB, featuresB) = detectAndDescribe(imageB)
M = matchKeypoints(kpsA, kpsB,
featuresA, featuresB, ratio, reprojThresh)
if M is None:
return None
(matches, H, status) = M
firstMatchLeftIdx = matches[0][0]
firstMatchRightIdx = matches[0][1]
firstMatchLeftKps = kpsB[firstMatchLeftIdx]
firstMatchRightKps = kpsA[firstMatchRightIdx]
print("left kps")
print(firstMatchLeftKps)
print("right kps")
print(firstMatchRightKps)
yDrift = math.ceil(myfabs(firstMatchLeftKps[1] - firstMatchRightKps[1]))
xDrift = math.ceil(myfabs(firstMatchLeftKps[0] - firstMatchRightKps[0]))
print("xDrift: " + str(xDrift))
print("yDrift: " + str(yDrift))
newX = (imageB.shape[1]) + math.ceil(myfabs(xDrift))
newY = (imageB.shape[0]) + math.ceil(myfabs(yDrift))
print("New image size: " + str(newX) + "x" + str(newY))
#result = cv2.warpPerspective(imageA, H,
# (imageA.shape[1] + imageB.shape[1], imageA.shape[0]))
#result = cv2.warpPerspective(imageA, H,
# (imageB.shape[1] + math.ceil(myfabs(xDrift)),
# (imageB.shape[0] + math.ceil(myfabs(yDrift)) ) ))
imageAWarped = cv2.warpPerspective(imageA, H,
(newX,newY), borderMode=cv2.BORDER_TRANSPARENT )
result = imageAWarped.copy()
result[0:(imageB.shape[0]),
0:(imageB.shape[1])] = imageB
(h,w,c) = imageAWarped.shape
maxj = 0
maxi = 0
mini = h
minj = w
for i in range(h):
for j in range(w):
color = imageAWarped[i,j]
hascolor = (color[0] != 0 or color[1] != 0 or color[2] != 0)
haseithercolor = hascolor
if haseithercolor == False:
colororig = result[i,j]
if colororig[0] != 0 or colororig[1] != 0 or colororig[2] != 0:
haseithercolor = True
if hascolor:
result[i,j] = color
if haseithercolor:
if i > maxi:
maxi = i
if j > maxj:
maxj = j
if j<minj:
minj = j
if i < mini:
mini = i
# trim to maxi and maxj
print("maxi: " + str(maxi) + ", maxj: " + str(maxj))
print("mini: " + str(mini) + ", minj: " + str(minj))
result = result[minj:maxj,mini:maxi]
#result[0:(imageAWarped.shape[0]),
# 0:(imageAWarped.shape[1])] = imageAWarped
if showMatches:
vis = drawMatches(imageA, imageB, kpsA, kpsB, matches,
status)
return (result, vis, xDrift, yDrift)
return result
# Feature detection in the image
def detectAndDescribe(image):
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
descriptor = cv2.ORB_create(nfeatures=1500)
(kps, features) = descriptor.detectAndCompute(image, None)
kps = np.float32([kp.pt for kp in kps])
return (kps, features)
# Matching keypoints between two images
def matchKeypoints(kpsA, kpsB, featuresA, featuresB,
ratio, reprojThresh):
matcher = cv2.DescriptorMatcher_create("BruteForce")
rawMatches = matcher.knnMatch(featuresA, featuresB, 2)
matches = []
for m in rawMatches:
if len(m) == 2 and m[0].distance < m[1].distance * ratio:
matches.append((m[0].trainIdx, m[0].queryIdx))
if len(matches) > 4:
ptsA = np.float32([kpsA[i] for (_, i) in matches])
ptsB = np.float32([kpsB[i] for (i, _) in matches])
(H, status) = cv2.findHomography(ptsA, ptsB, cv2.RANSAC,
reprojThresh)
return (matches, H, status)
return None
# Draw the matches on an image (for testing)
def drawMatches(imageA, imageB, kpsA, kpsB, matches, status):
(hA, wA) = imageA.shape[:2]
(hB, wB) = imageB.shape[:2]
vis = np.zeros((max(hA, hB), wA + wB, 3), dtype="uint8")
vis[0:hA, 0:wA] = imageA
vis[0:hB, wA:] = imageB
for ((trainIdx, queryIdx), s) in zip(matches, status):
if s == 1:
ptA = (int(kpsA[queryIdx][0]), int(kpsA[queryIdx][1]))
ptB = (int(kpsB[trainIdx][0]) + wA, int(kpsB[trainIdx][1]))
cv2.line(vis, ptA, ptB, (0, 255, 0), 1)
return vis
``` |
{
"source": "108360224/watch_video",
"score": 3
} |
#### File: main/python/make_first_page.py
```python
import cv2
import numpy as np
from PIL import Image, ImageDraw, ImageFont
import requests
def make_first_page(src,text):
def cv2ImgAddText(img, text, left, top, textColor=(0, 0, 0), textSize=10):
if (isinstance(img, np.ndarray)): #判断是否OpenCV图片类型
img = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
draw = ImageDraw.Draw(img)
fontText = ImageFont.truetype("font/simsun.ttc", textSize, encoding="utf-8")
draw.text((left, top), text, textColor, font=fontText)
return cv2.cvtColor(np.asarray(img), cv2.COLOR_RGB2BGR)
try:
response = requests.get(src,timeout=3)
nparr = np.frombuffer(response.content, np.uint8)
im = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
except:
im=np.zeros((90,120,3), np.uint8)
im=cv2.resize(im,(90,120))
im=cv2.copyMakeBorder(src=im,left=0,right=0,top=0,bottom=25,borderType=cv2.BORDER_CONSTANT,value=[255, 255, 255])
tex=''
if len(text)>12:
if len(text)>24:
tex=text[:12]+'\n'+text[12:24]+'\n'+text[24:]
else:
tex=text
im=cv2ImgAddText(im,tex,2,121)
#cv2.imwrite("CV.jpg", im)
return im
```
#### File: main/python/menu.py
```python
from bs4 import BeautifulSoup
import requests
import re
class Menu():
def __init__(self,URL='http://www.99kubo.tv'):
html = requests.get(URL).text
self.soup = BeautifulSoup(html, 'lxml')
self.URL=URL
re_channel=re.compile('mm\d{0,3}')
channel_elem = self.soup.select_one('body > div.top > div.menu > div.mainmenu_top')
menu_href = tuple(a['href'] for a in channel_elem.find_all('a',{'id':re_channel,'target':""}))
menu_string = tuple(a.string for a in channel_elem.find_all('a',{'id':re_channel,'target':""}))
self.menu_list=(menu_string,menu_href)
sort_elem = self.soup.select_one('body > div.top > div.menu > div.mainmenu_bottom')
sort_elem_ul=sort_elem.select('ul')
re_href=re.compile('$(?!html)')
sort_href=tuple(tuple(re.sub(r'^(?!/)','/',a['href']) for a in ul.find_all('a',{'href':re_href}) if re.match(r'(?!\d){1,4}', a.string))[:-10] for ul in sort_elem_ul)+((),)
sort_string=tuple(tuple(a.string for a in ul.find_all('a',{'href':re_href}) if re.match(r'(?!\d){1,4}', a.string))[:-10] for ul in sort_elem_ul)+((),)
self.sort_list=(sort_string,sort_href)
def get_menu_list(self):
return self.menu_list
def get_child_list(self):
return self.sort_list
def get_link(self,menu,sort=''):
URL=self.URL
try:
if sort=='':
URL+=self.menu_list[1][self.menu_list[0].index(menu)]
else:
dex=self.menu_list[0].index(menu)
URL+=self.sort_list[1][dex][self.sort_list[0][dex].index(sort)]
except:
URL+=""
return URL
```
#### File: main/python/test.py
```python
from bs4 import BeautifulSoup
import requests
import re
from urllib.parse import unquote
from make_first_page import make_first_page
class Film():
def __init__(self,url):
html = requests.get('http://www.99kubo.tv'+url).text
soup = BeautifulSoup(html, 'lxml')
a=soup.select_one('body > div.main > div.list > div.listlf > dl > span > a:nth-child(3)')
self.URL='http://www.99kubo.tv/'+a['href']
html = requests.get(self.URL).text
self.soup = BeautifulSoup(html, 'lxml')
url_list=()
img_list=()
#title_list=()
ul=self.soup.select_one('body > div.main > div.list > div.listlf > ul')
for li in ul.select('li'):
a=li.select('a')[0]
url_list+=(a['href'],)
img=a.find_all('img')[0]
im=make_first_page(img['data-original'],img['alt'])
img_list+=(im,)
#title_list+=(img['alt'],)
self.film_list=(url_list,img_list)
def get_film_list(self):
return self.film_list
def load_new_film(self):
url_list=()
img_list=()
#title_list=()
tag=self.soup.select_one('body > div.main > div.list > div.listlf > div')
a=tag.find_all('a')[-1]
html = requests.get('http://www.99kubo.tv/'+a['href']).text
self.soup = BeautifulSoup(html, 'lxml')
ul=self.soup.select_one('body > div.main > div.list > div.listlf > ul')
for li in ul.select('li'):
a=li.select('a')[0]
url_list+=(a['href'],)
img=a.find_all('img')[0]
im=make_first_page(img['data-original'],img['alt'])
img_list+=(im,)
#title_list+=(img['alt'],)
self.film_list=(url_list,img_list)
``` |
{
"source": "1084667371/AutoLabel",
"score": 3
} |
#### File: AutoLabel/libs/settings.py
```python
import pickle
import os
import sys
class Settings(object):
def __init__(self):
# Be default, the home will be in the same folder as labelImg
home = os.path.expanduser("~")
self.data = {}
# self.path = os.path.join(home, '.labelImgSettings.pkl')
self.path = os.path.join(home, '.autoOCRSettings.pkl')
def __setitem__(self, key, value):
self.data[key] = value
def __getitem__(self, key):
return self.data[key]
def get(self, key, default=None):
if key in self.data:
return self.data[key]
return default
def save(self):
if self.path:
with open(self.path, 'wb') as f:
pickle.dump(self.data, f, pickle.HIGHEST_PROTOCOL)
return True
return False
def load(self):
try:
if os.path.exists(self.path):
with open(self.path, 'rb') as f:
self.data = pickle.load(f)
return True
except:
print('Loading setting failed')
return False
def reset(self):
if os.path.exists(self.path):
os.remove(self.path)
print('Remove setting pkl file ${0}'.format(self.path))
self.data = {}
self.path = None
```
#### File: 1084667371/AutoLabel/scratch.py
```python
import sys
from PyQt5.QtCore import QRegExp
from PyQt5.QtGui import QTextCharFormat, QTextDocument, QTextCursor
from PyQt5.QtWidgets import (QApplication, QMainWindow, QTextEdit,
QToolBar, QLineEdit, QPushButton, QColorDialog, QHBoxLayout, QWidget)
# 高亮当前界面中的相关字体
class TextEdit(QMainWindow):
def __init__(self, parent=None):
super(TextEdit, self).__init__(parent)
self.textEdit = QTextEdit(self)
self.setCentralWidget(self.textEdit)
widget = QWidget(self)
vb = QHBoxLayout(widget)
vb.setContentsMargins(0, 0, 0, 0)
self.findText = QLineEdit(self)
self.findText.setText('self')
findBtn = QPushButton('高亮', self)
findBtn.clicked.connect(self.highlight)
vb.addWidget(self.findText)
vb.addWidget(findBtn)
tb = QToolBar(self)
tb.addWidget(widget)
self.addToolBar(tb)
def setText(self, text):
self.textEdit.setPlainText(text)
def highlight(self):
text = self.findText.text() # 输入框中的文字
if not text:
return
col = QColorDialog.getColor(self.textEdit.textColor(), self)
if not col.isValid():
return
# 恢复默认的颜色
cursor = self.textEdit.textCursor()
cursor.select(QTextCursor.Document)
cursor.setCharFormat(QTextCharFormat())
cursor.clearSelection()
self.textEdit.setTextCursor(cursor)
# 文字颜色
fmt = QTextCharFormat()
fmt.setForeground(col)
# 正则
expression = QRegExp(text)
self.textEdit.moveCursor(QTextCursor.Start)
cursor = self.textEdit.textCursor()
# 循环查找设置颜色
pos = 0
index = expression.indexIn(self.textEdit.toPlainText(), pos)
while index >= 0:
cursor.setPosition(index)
cursor.movePosition(QTextCursor.Right,
QTextCursor.KeepAnchor, len(text))
cursor.mergeCharFormat(fmt)
pos = index + expression.matchedLength()
index = expression.indexIn(self.textEdit.toPlainText(), pos)
if __name__ == '__main__':
app = QApplication(sys.argv)
textEdit = TextEdit()
textEdit.resize(800, 600)
textEdit.show()
textEdit.setText(open(sys.argv[0], 'rb').read().decode())
sys.exit(app.exec_())
``` |
{
"source": "1084667371/R-Drop-Paddle",
"score": 3
} |
#### File: R-Drop-Paddle/models/modeling.py
```python
import paddle
import paddle.nn as nn
from paddle.nn.initializer import TruncatedNormal, Constant
# 参数初始化配置
trunc_normal_ = TruncatedNormal(std=.02)
zeros_ = Constant(value=0.)
ones_ = Constant(value=1.)
# x[int] -> tuple(x, x)
def to_2tuple(x):
return tuple([x] * 2)
class Identity(nn.Layer):
def __init__(self):
super(Identity, self).__init__()
def forward(self, input):
return input
class PatchEmbed(nn.Layer):
def __init__(self, img_size=32, patch_size=16, in_chans=3, embed_dim=768):
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
num_patches = (img_size[1] // patch_size[1]) * \
(img_size[0] // patch_size[0])
self.img_size = img_size
self.patch_size = patch_size
self.num_patches = num_patches
self.proj = nn.Conv2D(in_chans, embed_dim,
kernel_size=patch_size, stride=patch_size)
def forward(self, x):
B, C, H, W = x.shape
x = self.proj(x).flatten(2).transpose((0, 2, 1))
return x
class Attention(nn.Layer):
def __init__(self, dim, num_heads=12, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias_attr=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
B, N, C = x.shape
qkv = self.qkv(x).reshape((B, N, 3, self.num_heads, C //
self.num_heads)).transpose((2, 0, 3, 1, 4))
q, k, v = qkv[0], qkv[1], qkv[2]
attn = (q.matmul(k.transpose((0, 1, 3, 2)))) * self.scale
attn = nn.functional.softmax(attn, axis=-1)
attn = self.attn_drop(attn)
x = (attn.matmul(v)).transpose((0, 2, 1, 3)).reshape((B, N, C))
x = self.proj(x)
x = self.proj_drop(x)
return x
class Mlp(nn.Layer):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
def drop_path(x, drop_prob=0., training=False):
if drop_prob == 0. or not training:
return x
keep_prob = paddle.to_tensor(1 - drop_prob)
shape = (x.shape[0],) + (1,) * (x.ndim - 1)
random_tensor = keep_prob + paddle.rand(shape, dtype=x.dtype)
random_tensor = paddle.floor(random_tensor) # binarize
output = x.divide(keep_prob) * random_tensor
return output
class DropPath(nn.Layer):
def __init__(self, drop_prob=None):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
def forward(self, x):
return drop_path(x, self.drop_prob, self.training)
class Block(nn.Layer):
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., act_layer=nn.GELU, norm_layer='nn.LayerNorm', epsilon=1e-5):
super().__init__()
self.norm1 = eval(norm_layer)(dim, epsilon=epsilon)
self.attn = Attention(
dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
# NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
self.drop_path = DropPath(drop_path) if drop_path > 0. else Identity()
self.norm2 = eval(norm_layer)(dim, epsilon=epsilon)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim,
act_layer=act_layer, drop=drop)
def forward(self, x):
# Norm + Attention + DropPath + Residual Connect
x = x + self.drop_path(self.attn(self.norm1(x)))
# Norm + MLP + DropPath + Residual Connect
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
class VisionTransformer(nn.Layer):
def __init__(self, img_size=224, patch_size=16, in_chans=3, class_dim=100, embed_dim=768, depth=12,
num_heads=12, mlp_ratio=4, qkv_bias=False, qk_scale=None, drop_rate=0., attn_drop_rate=0.,
drop_path_rate=0., norm_layer='nn.LayerNorm', epsilon=1e-5, **args):
super().__init__()
self.class_dim = class_dim
self.num_features = self.embed_dim = embed_dim
self.patch_embed = PatchEmbed(
img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim)
num_patches = self.patch_embed.num_patches
self.pos_embed = self.create_parameter(
shape=(1, num_patches + 1, embed_dim), default_initializer=zeros_)
self.add_parameter("pos_embed", self.pos_embed)
self.cls_token = self.create_parameter(
shape=(1, 1, embed_dim), default_initializer=zeros_)
self.add_parameter("cls_token", self.cls_token)
self.pos_drop = nn.Dropout(p=drop_rate)
dpr = [x for x in paddle.linspace(0, drop_path_rate, depth)]
self.blocks = nn.LayerList([
Block(
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, epsilon=epsilon)
for i in range(depth)])
self.norm = eval(norm_layer)(embed_dim, epsilon=epsilon)
# Classifier head
self.head = nn.Linear(
embed_dim, class_dim) if class_dim > 0 else Identity()
trunc_normal_(self.pos_embed)
trunc_normal_(self.cls_token)
self.apply(self._init_weights)
# 参数初始化
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight)
if isinstance(m, nn.Linear) and m.bias is not None:
zeros_(m.bias)
elif isinstance(m, nn.LayerNorm):
zeros_(m.bias)
ones_(m.weight)
# 获取图像特征
def forward_features(self, x):
B = x.shape[0]
x = self.patch_embed(x)
cls_tokens = self.cls_token.expand((B, -1, -1))
x = paddle.concat((cls_tokens, x), axis=1)
x = x + self.pos_embed
x = self.pos_drop(x)
for blk in self.blocks:
x = blk(x)
x = self.norm(x)
return x[:, 0]
def forward(self, x):
x = self.forward_features(x)
x = self.head(x)
return x
```
#### File: R-Drop-Paddle/utils/data_utils.py
```python
import logging
from paddle.vision import transforms, datasets
from paddle.io import DataLoader
def get_loader(args):
transform_train = transforms.Compose([
transforms.RandomResizedCrop((args.img_size, args.img_size), scale=(0.05, 1.0)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
])
transform_test = transforms.Compose([
transforms.Resize((args.img_size, args.img_size)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
])
if args.dataset == "cifar10":
trainset = datasets.Cifar10(mode="train",
download=True,
transform=transform_train)
testset = datasets.Cifar10(mode="test",
download=True,
transform=transform_test)
else:
trainset = datasets.Cifar100(mode="train",
download=True,
transform=transform_train)
testset = datasets.Cifar100(mode="test",
download=True,
transform=transform_test)
train_loader = DataLoader(trainset,
batch_size=args.train_batch_size,
num_workers=1,
use_shared_memory=True)
test_loader = DataLoader(testset,
batch_size=args.eval_batch_size,
num_workers=1,
use_shared_memory=True)
return train_loader, test_loader
``` |
{
"source": "108806/drf_adv_test",
"score": 3
} |
#### File: app/core/models.py
```python
from django.db import models
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager, \
PermissionsMixin
class UserManager(BaseUserManager):
def create_user(self, email, password=None, **extra_fields):
"""Creates and saves a new user"""
if not email: raise ValueError('No valid email provided')
user = self.model(email=self.normalize_email(email), **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, password):
"""Creates and saves a new superuser"""
user = self.create_user(email, password)
user.is_staff = True
user.is_superuser = True
user.save(using=self._db)
return user
class User(AbstractBaseUser, PermissionsMixin):
"""Custom user model that supports using email instead of username"""
email = models.EmailField(max_length=255, unique=True)
name = models.CharField(max_length=255)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
objects = UserManager()
USERNAME_FIELD = 'email'
class TestUser(models.Model):
"""Delme asap"""
field = models.EmailField(max_length=255, unique=True)
``` |
{
"source": "108anup/pubplot",
"score": 2
} |
#### File: pubplot/examples/barplot.py
```python
from pubplot import Document
from pubplot.document_classes import acm_sigconf
def single_plot():
style = Document(acm_sigconf)
fig, ax = style.subfigures()
ax.bar(range(11), range(11))
fig.save('single_barplot')
def fill_subplot_axes(axes, label=False):
for ax in axes:
ax.bar(range(11), [-i for i in range(11)])
if label:
ax.set_xlabel('lalala')
ax.set_ylabel('lalala')
def subplots(nrows, ncols, label=False):
style = Document(acm_sigconf)
fig, ax = style.subfigures(nrows, ncols, width=style.textwidth)
fill_subplot_axes(ax, label=label)
fig.save('subbarplots1')
fig, ax = style.subfigures(nrows, ncols)
fill_subplot_axes(ax, label=label)
fig.save('subbarplots2')
def main():
single_plot()
subplots(2, 3, label=True)
if __name__ == '__main__':
main()
```
#### File: pubplot/pubplot/helpers.py
```python
import matplotlib as mpl
class RCParamWrapper(object):
"""Matplotlib object wrapper for nonglobal RCParams.
This wraps Matplotlib objects which allows us set RCparams per object.
Optionally can use a function that returns an object so that the object is
lazy initialized
Attributes:
obj: A matplotlib object or a function that returns such object
rc: Matplotlib RCparams
"""
def __init__(self, obj, rc):
if callable(obj): # lazy initialization
self.lazy_obj = obj
self.obj = None
else:
self.obj = obj
self.lazy_obj = None
self.rc = RCParams(rc)
def __getattr__(self, item):
if self.obj is None:
with mpl.rc_context(rc=self.rc.get_rc_to_function(item)):
self.obj = self.lazy_obj()
attr = getattr(self.obj, item)
if not callable(attr):
return attr
# the following function wraps whatever method is being called in an
# rc_context, this enforces the rcparams while being transparent to the
# user
def method(*args, **kwargs):
with mpl.rc_context(rc=self.rc.get_rc_to_function(item)):
return attr(*args, **kwargs)
return method
def dict_select(my_dict, term, expect=True):
return {k: v for k, v in my_dict.items() if expect == k.startswith(term)}
class RCParams(object):
"""Handle plot-specific rcParams
Stores rcParams, optionally filtering plot-specific options.
Args:
rc_dict: rcParams style dict. With plot-specific options prepended with
the plot type.
Attributes:
rc_dict: rcParams style dict. With plot-specific options prepended with
the plot type.
"""
def __init__(self, rc_dict):
if isinstance(rc_dict, RCParams):
self.rc_dict = rc_dict.rc_dict
else:
self.rc_dict = rc_dict
def get_rc_to_function(self, func):
plain_rc = dict_select(self.rc_dict, ':', expect=False)
func_rc = dict_select(self.rc_dict, ':{}:'.format(func))
func_opt_len = len(func)+2
func_rc = {k[func_opt_len:]: v for k, v in func_rc.items()}
plain_rc.update(func_rc)
return plain_rc
``` |
{
"source": "1090504117/PyStructureLearning",
"score": 4
} |
#### File: 1090504117/PyStructureLearning/DijkstraPath.py
```python
processed = {}
processing = {}
path_nums = {}
parents = {}
graph = {}
graph["Start"] = {}
graph["Start"]["A"] = 6
graph["Start"]["B"] = 2
graph["Start"]["C"] = 3
graph["A"] = {}
graph["A"]["Fin"] = 2
graph["B"] = {}
graph["B"]["A"] = 3
graph["B"]["Fin"] = 5
graph["C"] = {}
graph["C"]["A"] = 3
graph["C"]["B"] = 1
graph["C"]["Fin"] = 3
graph["Fin"] = {} # 终点没有邻居
def print_path(end_key):
key = end_key
path_str = key
while parents.has_key(key):
key = parents[key]
path_str = key + '-->' + path_str
print path_str
def find_lowest_cost_node(path_nums):
lowest_cost_key = processing.keys()[0]
for key, value in processing.items():
if path_nums[key] < path_nums[lowest_cost_key]:
lowest_cost_key = key
return lowest_cost_key
def func(start_key, end_key):
path_nums[start_key] = 0
processing[start_key] = True
lowest_cost_key = None
while True:
lowest_cost_key = find_lowest_cost_node(path_nums)
if lowest_cost_key == end_key or not lowest_cost_key:
if path_nums.has_key(end_key):
return path_nums[end_key]
else:
return -1
del processing[lowest_cost_key]
processed[lowest_cost_key] = True
for key, value in graph[lowest_cost_key].items():
if not processed.has_key(key):
processing[key] = True
if not path_nums.has_key(key) or (path_nums[lowest_cost_key] + value) < path_nums[key]:
path_nums[key] = path_nums[lowest_cost_key] + value
parents[key] = lowest_cost_key
'''
字典用法,has_key 或者 in。None和数字要小心
改进,变换一下字典的结构
processing 与 path_nums需要分开
'''
def Test():
print func('Start', 'Fin')
print_path('Fin')
``` |
{
"source": "109318083/Speech_pre",
"score": 2
} |
#### File: Speech_pre/test/ctc_tensorflow_test.py
```python
import os
import operator
import random
import time
import io
import numpy as np
import tensorflow as tf
from audio_reader import AudioReader
from file_logger import FileLogger
from utils import FIRST_INDEX, sparse_tuple_from
from utils import convert_inputs_to_ctc_format
sample_rate = 16000
# Some configs
num_features = 26 # log filter bank or MFCC features
# Accounting the 0th index + space + blank label = 28 characters
num_classes = ord('z') - ord('a') + 1 + 1 + 1
# Hyper-parameters
num_epochs = 1
num_hidden = 256
batch_size = 346
num_examples = 1
num_batches_per_epoch = 10
audio = AudioReader(audio_dir='test', cache_dir='test_cache', sample_rate=sample_rate)
file_logger = FileLogger('out_test.tsv', ['curr_epoch', 'train_cost', 'train_ler', 'val_cost', 'val_ler'])
def next_batch(bs=batch_size, train=True):
x_batch = []
y_batch = []
seq_len_batch = []
original_batch = []
i=0
for k in range(bs):
ut_length_dict = dict([(k, len(v['target'])) for (k, v) in audio.cache.items()])
utterances = sorted(ut_length_dict.items(), key=operator.itemgetter(0))
test_index = 346
if train:
utterances = [a[0] for a in utterances[test_index:]]
else:
utterances = [a[0] for a in utterances[:test_index]]
training_element = audio.cache[utterances[i]]
target_text = training_element['target']
audio_buffer = training_element['audio']
x, y, seq_len, original = convert_inputs_to_ctc_format(audio_buffer,
sample_rate,
'whatever',
num_features)
x_batch.append(x)
y_batch.append(y)
seq_len_batch.append(seq_len)
original_batch.append(original)
i+=1
y_batch = sparse_tuple_from(y_batch)
seq_len_batch = np.array(seq_len_batch)[:, 0]
for i, pad in enumerate(np.max(seq_len_batch) - seq_len_batch):
x_batch[i] = np.pad(x_batch[i], ((0, 0), (0, pad), (0, 0)), mode='constant', constant_values=0)
x_batch = np.concatenate(x_batch, axis=0)
return x_batch, y_batch, seq_len_batch, original_batch
def decode_batch(d, original, phase='training'):
for jj in range(batch_size): # just for visualisation purposes. we display only 2.
values = d.values[np.where(d.indices[:, 0] == jj)]
str_decoded = ''.join([chr(x) for x in np.asarray(values) + FIRST_INDEX])
# Replacing blank label to none
str_decoded = str_decoded.replace(chr(ord('z') + 1), '')
# Replacing space label to space
str_decoded = str_decoded.replace(chr(ord('a') - 1), ' ')
print(str_decoded)
output_txt = io.open("output.txt", "a", encoding="utf-8")
result = str(jj+1) + ' ' + str_decoded + '\n'
output_txt.writelines(result)
output_txt.close()
def run_ctc():
# make sure the values match the ones in generate_audio_cache.py
merged = tf.summary.merge_all()
with tf.Session() as session:
saver = tf.train.import_meta_graph('model/ctc-5615-236.meta')
saver.restore(session, tf.train.latest_checkpoint('model/'))
graph = tf.get_default_graph()
inputs, targets, seq_len, original = next_batch(train=False)
input_x = graph.get_operation_by_name("inputs").outputs[0]
val_feed = {"inputs:0": inputs, "seq_len:0": seq_len}
logits = graph.get_tensor_by_name("Reshape_1:0")
decoded, log_prob = tf.nn.ctc_greedy_decoder(logits, seq_len)
d = session.run(decoded[0], feed_dict=val_feed)
decode_batch(d, original, phase='validation')
if __name__ == '__main__':
run_ctc()
``` |
{
"source": "1098994933/cgcnn",
"score": 2
} |
#### File: cgcnn/cgcnn/cif.py
```python
from __future__ import print_function, division
import csv
import functools
import json
import os
import random
import warnings
import numpy as np
import torch
from pymatgen.core.structure import Structure
from torch.utils.data import Dataset, DataLoader
from torch.utils.data.dataloader import default_collate
from torch.utils.data.sampler import SubsetRandomSampler
class GaussianDistance(object):
"""
Expands the distance by Gaussian basis.
Unit: angstrom
"""
def __init__(self, dmin, dmax, step, var=None):
"""
Parameters
----------
dmin: float
Minimum interatomic distance
dmax: float
Maximum interatomic distance
step: float
Step size for the Gaussian filter
"""
assert dmin < dmax
assert dmax - dmin > step
self.filter = np.arange(dmin, dmax+step, step)
if var is None:
var = step
self.var = var
def expand(self, distances):
"""
Apply Gaussian disntance filter to a numpy distance array
Parameters
----------
distance: np.array shape n-d array
A distance matrix of any shape
Returns
-------
expanded_distance: shape (n+1)-d array
Expanded distance matrix with the last dimension of length
len(self.filter)
"""
return np.exp(-(distances[..., np.newaxis] - self.filter)**2 /
self.var**2)
class AtomInitializer(object):
"""
Base class for intializing the vector representation for atoms.
!!! Use one AtomInitializer per dataset !!!
"""
def __init__(self, atom_types):
self.atom_types = set(atom_types)
self._embedding = {}
def get_atom_fea(self, atom_type):
assert atom_type in self.atom_types
return self._embedding[atom_type]
def load_state_dict(self, state_dict):
self._embedding = state_dict
self.atom_types = set(self._embedding.keys())
self._decodedict = {idx: atom_type for atom_type, idx in
self._embedding.items()}
def state_dict(self):
return self._embedding
def decode(self, idx):
if not hasattr(self, '_decodedict'):
self._decodedict = {idx: atom_type for atom_type, idx in
self._embedding.items()}
return self._decodedict[idx]
class AtomCustomJSONInitializer(AtomInitializer):
"""
Initialize atom feature vectors using a JSON file, which is a python
dictionary mapping from element number to a list representing the
feature vector of the element.
Parameters
----------
elem_embedding_file: str
The path to the .json file
"""
def __init__(self, elem_embedding_file):
with open(elem_embedding_file) as f:
elem_embedding = json.load(f)
elem_embedding = {int(key): value for key, value
in elem_embedding.items()}
atom_types = set(elem_embedding.keys())
super(AtomCustomJSONInitializer, self).__init__(atom_types)
for key, value in elem_embedding.items():
self._embedding[key] = np.array(value, dtype=float)
class CIFData(Dataset):
"""
The CIFData dataset is a wrapper for a dataset where the crystal structures
are stored in the form of CIF files. The dataset should have the following
directory structure:
root_dir
├── id_prop.csv
├── atom_init.json
├── id0.cif
├── id1.cif
├── ...
id_prop.csv: a CSV file with two columns. The first column recodes a
unique ID for each crystal, and the second column recodes the value of
target property.
atom_init.json: a JSON file that stores the initialization vector for each
element.
ID.cif: a CIF file that recodes the crystal structure, where ID is the
unique ID for the crystal.
Parameters
----------
root_dir: str
The path to the root directory of the dataset
max_num_nbr: int
The maximum number of neighbors while constructing the crystal graph
radius: float
The cutoff radius for searching neighbors
dmin: float
The minimum distance for constructing GaussianDistance
step: float
The step size for constructing GaussianDistance
random_seed: int
Random seed for shuffling the dataset
Returns
-------
atom_fea: torch.Tensor shape (n_i, atom_fea_len)
nbr_fea: torch.Tensor shape (n_i, M, nbr_fea_len)
nbr_fea_idx: torch.LongTensor shape (n_i, M)
target: torch.Tensor shape (1, )
cif_id: str or int
"""
def __init__(self, root_dir, max_num_nbr=12, radius=8, dmin=0, step=0.2,
random_seed=123):
self.root_dir = root_dir
self.max_num_nbr, self.radius = max_num_nbr, radius
assert os.path.exists(root_dir), 'root_dir does not exist!'
id_prop_file = os.path.join(self.root_dir, 'id_prop.csv')
assert os.path.exists(id_prop_file), 'id_prop.csv does not exist!'
with open(id_prop_file) as f:
reader = csv.reader(f)
self.id_prop_data = [row for row in reader]
random.seed(random_seed)
random.shuffle(self.id_prop_data)
atom_init_file = os.path.join(self.root_dir, 'atom_init.json')
assert os.path.exists(atom_init_file), 'atom_init.json does not exist!'
self.ari = AtomCustomJSONInitializer(atom_init_file)
self.gdf = GaussianDistance(dmin=dmin, dmax=self.radius, step=step)
def __len__(self):
return len(self.id_prop_data)
@functools.lru_cache(maxsize=None) # Cache loaded structures
def __getitem__(self, idx):
cif_id, target = self.id_prop_data[idx]
crystal = Structure.from_file(os.path.join(self.root_dir,
cif_id+'.cif'))
atom_fea = np.vstack([self.ari.get_atom_fea(crystal[i].specie.number)
for i in range(len(crystal))])
print(len(crystal))
atom_fea = torch.Tensor(atom_fea)
all_nbrs = crystal.get_all_neighbors(self.radius, include_index=True)
all_nbrs = [sorted(nbrs, key=lambda x: x[1]) for nbrs in all_nbrs]
nbr_fea_idx, nbr_fea = [], []
for nbr in all_nbrs:
if len(nbr) < self.max_num_nbr:
warnings.warn('{} not find enough neighbors to build graph. '
'If it happens frequently, consider increase '
'radius.'.format(cif_id))
nbr_fea_idx.append(list(map(lambda x: x[2], nbr)) +
[0] * (self.max_num_nbr - len(nbr)))
nbr_fea.append(list(map(lambda x: x[1], nbr)) +
[self.radius + 1.] * (self.max_num_nbr -
len(nbr)))
else:
nbr_fea_idx.append(list(map(lambda x: x[2],
nbr[:self.max_num_nbr])))
nbr_fea.append(list(map(lambda x: x[1],
nbr[:self.max_num_nbr])))
nbr_fea_idx, nbr_fea = np.array(nbr_fea_idx), np.array(nbr_fea)
nbr_fea = self.gdf.expand(nbr_fea)
atom_fea = torch.Tensor(atom_fea)
nbr_fea = torch.Tensor(nbr_fea)
nbr_fea_idx = torch.LongTensor(nbr_fea_idx)
target = torch.Tensor([float(target)])
return (atom_fea, nbr_fea, nbr_fea_idx), target, cif_id
if __name__ == '__main__':
cif_data = CIFData(r"C:\Users\10989\PycharmProjects\8_CGCNN\cgcnn\data\sample-classification")
idx = 4
(atom_fea, nbr_fea, nbr_fea_idx), target, cif_id = cif_data.__getitem__(idx)
print("atom_fea:")
print(atom_fea.shape)
print(nbr_fea.shape)
print(nbr_fea_idx.shape)
print(target)
print(cif_id)
```
#### File: cgcnn/pre-trained/solve.py
```python
from itertools import combinations, permutations
# def get_max_profit():
# pass
a = [1, 2, 3, 4]
b = [2, 4, 6, 8]
c = list(combinations(a, 2))
if __name__ == '__main__':
# N = input()
# startTime = input().split()
# endTime = input().split()
# profit = input().split()
N = 4
print(list(combinations([i for i in range(N)], 2)))
startTime = [1,5,4,1]
endTime = [2,6,6,5]
profit = [10,15,20,15]
average = [profit[i]/(endTime[i]-startTime[i]) for i in range(len(startTime))]
print(average)
# chose all possiable
print(startTime[(0,1)])
``` |
{
"source": "1099policy/ten99policy-python",
"score": 2
} |
#### File: ten99policy-python/t99/error.py
```python
from __future__ import absolute_import, division, print_function
import t99
from t99.six import python_2_unicode_compatible
@python_2_unicode_compatible
class T99Error(Exception):
def __init__(
self,
message=None,
http_body=None,
http_status=None,
json_body=None,
headers=None,
code=None,
):
super(T99Error, self).__init__(message)
if http_body and hasattr(http_body, "decode"):
try:
http_body = http_body.decode("utf-8")
except BaseException:
http_body = (
"<Could not decode body as utf-8. "
"Please report to <EMAIL>>"
)
self._message = message
self.http_body = http_body
self.http_status = http_status
self.json_body = json_body
self.headers = headers or {}
self.code = code
self.request_id = self.headers.get("request-id", None)
self.error = self.construct_error_object()
def __str__(self):
msg = self._message or "<empty message>"
if self.request_id is not None:
return u"Request {0}: {1}".format(self.request_id, msg)
else:
return msg
# Returns the underlying `Exception` (base class) message, which is usually
# the raw message returned by T99's API. This was previously available
# in python2 via `error.message`. Unlike `str(error)`, it omits "Request
# req_..." from the beginning of the string.
@property
def user_message(self):
return self._message
def __repr__(self):
return "%s(message=%r, http_status=%r, request_id=%r)" % (
self.__class__.__name__,
self._message,
self.http_status,
self.request_id,
)
def construct_error_object(self):
if (
self.json_body is None
or "error" not in self.json_body
or not isinstance(self.json_body["error"], dict)
):
return None
return t99.api_resources.error_object.ErrorObject.construct_from(
self.json_body["error"], t99.api_key
)
class APIError(T99Error):
pass
class APIConnectionError(T99Error):
def __init__(
self,
message,
http_body=None,
http_status=None,
json_body=None,
headers=None,
code=None,
should_retry=False,
):
super(APIConnectionError, self).__init__(
message, http_body, http_status, json_body, headers, code
)
self.should_retry = should_retry
class T99ErrorWithParamCode(T99Error):
def __repr__(self):
return (
"%s(message=%r, param=%r, code=%r, http_status=%r, "
"request_id=%r)"
% (
self.__class__.__name__,
self._message,
self.param,
self.code,
self.http_status,
self.request_id,
)
)
class IdempotencyError(T99Error):
pass
class InvalidRequestError(T99ErrorWithParamCode):
def __init__(
self,
message,
param,
code=None,
http_body=None,
http_status=None,
json_body=None,
headers=None,
):
super(InvalidRequestError, self).__init__(
message, http_body, http_status, json_body, headers, code
)
self.param = param
class AuthenticationError(T99Error):
pass
class PermissionError(T99Error):
pass
class RateLimitError(T99Error):
pass
class SignatureVerificationError(T99Error):
def __init__(self, message, sig_header, http_body=None):
super(SignatureVerificationError, self).__init__(message, http_body)
self.sig_header = sig_header
``` |
{
"source": "10allday-Software/marketplace-env",
"score": 3
} |
#### File: 10allday-Software/marketplace-env/link-sources.py
```python
import argparse
import os
def main():
parser = argparse.ArgumentParser(description='link to source code')
parser.add_argument(
'--root',
help='root directory where you cloned the source code')
args = parser.parse_args()
if not args.root:
args.root = raw_input('What directory is your source code in? ')
args.root = os.path.abspath(os.path.expanduser(args.root))
env_root = os.path.dirname(__file__)
errors = False
for name in [
'fireplace',
'solitude',
'solitude-auth',
'spartacus',
'webpay',
'zamboni',
'zippy',
]:
full_name = os.path.join(args.root, name)
if not os.path.exists(full_name):
print
print (
'** Repository at {path} does not exist. Run:\n'
'git clone <EMAIL>:10allday-Software/{repo_name}.git {path}'
.format(path=full_name, repo_name=name)
)
print
errors = True
continue
dest = os.path.join(env_root, 'docker', 'source-links', name)
print '{dest} -> {source}'.format(source=full_name, dest=dest)
if os.path.exists(dest):
print '(already exists)'
else:
os.symlink(full_name, dest)
if errors:
parser.error('Not all symlinks were successful')
if __name__ == '__main__':
main()
``` |
{
"source": "10allday-Software/pontoon",
"score": 3
} |
#### File: checks/tests/test_pontoon_db.py
```python
from unittest.mock import MagicMock
import pytest
from pontoon.checks.libraries.pontoon_db import get_max_length, run_checks
@pytest.fixture()
def get_entity_mock():
"""
Create an entity mock with comment, resource.path and extension.
"""
def _f(extension, comment="", string="", allows_empty_translations=False):
entity = MagicMock()
entity.comment = comment
entity.string = string
entity.resource.format = extension
entity.resource.path = "test." + extension
entity.resource.allows_empty_translations = allows_empty_translations
return entity
yield _f
@pytest.mark.parametrize(
"comment, expected",
(
("MAX_LENGTH: 24", 24),
("MAX_LENGTH: 4", 4),
("MAX_LENGTH: 4", 4),
("MAX_LENGTH:4 ", 4),
("MAX_LENGTH: 42 ", 42),
("MAX_LENGTH: 42\n MAX_LENGTH: 10 ", 42),
("MAX_LENGTH: 123 characters", 123),
("MAX_LENGTH: 4\naaaa", 4),
("bbbb \n MAX_LENGTH: 4\naaaa", 4),
("MAX_LENGTH: 4 characters\naaaa", 4),
("bbbb\nMAX_xLENGTH: 4 characters\naaaa", None),
("bbbb\nMAX_LENGTH: z characters\naaaa", None),
("bbbb\nMAX_LENGTH:\n 4 characters\naaaa", None),
),
)
def test_too_long_translation_max_length(comment, expected):
"""
Checks should return an error if a translation is too long.
"""
assert get_max_length(comment) == expected
def test_too_long_translation_valid_length(get_entity_mock):
"""
Checks shouldn't return an error if a translation isn't too long.
"""
assert run_checks(get_entity_mock("lang", "MAX_LENGTH: 4"), "", "0123") == {}
def test_too_long_translation_html_tags(get_entity_mock):
"""
HTML tags can't be included in the MAX_LENGTH check.
"""
assert (
run_checks(
get_entity_mock("lang", "MAX_LENGTH: 4"),
"",
'<a href="pontoon.mozilla.org">01</a><i>23</i>',
)
== {}
)
assert run_checks(
get_entity_mock("lang", "MAX_LENGTH: 4"),
"",
'<a href="pontoon.mozilla.org">012</a><i>23</i>',
) == {"pErrors": ["Translation too long"]}
# Check if entities are causing false errors
assert (
run_checks(
get_entity_mock("lang", "MAX_LENGTH: 4"),
"",
'<a href="pontoon.mozilla.org">ł </a><i>ń </i>',
)
== {}
)
assert run_checks(
get_entity_mock("lang", "MAX_LENGTH: 4"),
"",
'<a href="pontoon.mozilla.org">ł </a><i>ń </i>',
) == {"pErrors": ["Translation too long"]}
def test_too_long_translation_invalid_length(get_entity_mock):
"""
Checks should return an error if a translation is too long.
"""
assert run_checks(get_entity_mock("lang", "MAX_LENGTH: 2"), "", "0123") == {
"pErrors": ["Translation too long"]
}
def test_ending_newline(get_entity_mock):
"""
Original and translation in a PO file must either both end
in a newline, or none of them should.
"""
assert run_checks(get_entity_mock("po"), "Original", "Translation\n") == {
"pErrors": ["Ending newline mismatch"]
}
assert run_checks(get_entity_mock("po"), "Original\n", "Translation") == {
"pErrors": ["Ending newline mismatch"]
}
assert run_checks(get_entity_mock("po"), "Original\n", "Translation\n") == {}
assert run_checks(get_entity_mock("po"), "Original", "Translation") == {}
def test_empty_translations(get_entity_mock):
"""
Empty translations shouldn't be allowed for some extensions.
"""
assert run_checks(get_entity_mock("po"), "", "") == {
"pErrors": ["Empty translations are not allowed"]
}
def test_lang_newlines(get_entity_mock):
"""Newlines aren't allowed in lang files"""
assert run_checks(get_entity_mock("lang"), "", "aaa\nbbb") == {
"pErrors": ["Newline characters are not allowed"]
}
assert run_checks(get_entity_mock("po"), "", "aaa\nbbb") == {}
def test_ftl_parse_error(get_entity_mock):
"""Invalid FTL strings are not allowed"""
assert run_checks(get_entity_mock("ftl", string="key = value"), "", "key =") == {
"pErrors": ['Expected message "key" to have a value or attributes']
}
assert (
run_checks(
get_entity_mock("ftl", string="key = value"), "", "key = translation"
)
== {}
)
def test_ftl_non_localizable_entries(get_entity_mock):
"""Non-localizable entries are not allowed"""
assert run_checks(get_entity_mock("ftl", string="key = value"), "", "[[foo]]") == {
"pErrors": ["Expected an entry start"]
}
def test_ftl_id_missmatch(get_entity_mock):
"""ID of the source string and translation must be the same"""
assert run_checks(
get_entity_mock("ftl", string="key = value"), "", "key1 = translation"
) == {"pErrors": ["Translation key needs to match source string key"]}
```
#### File: homepage/migrations/0002_initial_data.py
```python
from django.db import migrations
from pathlib import Path
def get_homepage_content():
module_dir = Path(__file__).parent.parent
file_path = module_dir / "templates/homepage_content.html"
return file_path.read_text()
def create_homepage_entry(apps, schema_editor):
Homepage = apps.get_model("homepage", "Homepage")
if Homepage.objects.count() == 0:
Homepage.objects.create(text=get_homepage_content(), title="Localize Mozilla")
def remove_homepage_entry(apps, schema_editor):
Homepage = apps.get_model("homepage", "Homepage")
try:
homepage = Homepage.objects.last()
except Homepage.DoesNotExist:
return
homepage.delete()
class Migration(migrations.Migration):
dependencies = [
("homepage", "0001_squashed_0002_add_homepage_content"),
]
operations = [
migrations.RunPython(
code=create_homepage_entry, reverse_code=remove_homepage_entry,
),
]
```
#### File: sync/tests/__init__.py
```python
import os.path
from unittest.mock import patch, PropertyMock
import factory
from pontoon.base.models import Project
from pontoon.base.tests import (
EntityFactory,
LocaleFactory,
ProjectFactory,
RepositoryFactory,
ResourceFactory,
TestCase,
TranslationFactory,
)
from pontoon.base.utils import aware_datetime
from pontoon.sync.changeset import ChangeSet
from pontoon.sync.models import ProjectSyncLog, RepositorySyncLog, SyncLog
from pontoon.sync.vcs.models import VCSEntity, VCSProject, VCSResource, VCSTranslation
FAKE_CHECKOUT_PATH = os.path.join(os.path.dirname(__file__), "fake-checkout",)
PROJECT_CONFIG_CHECKOUT_PATH = os.path.join(
os.path.dirname(__file__), "project-config-checkout",
)
class VCSEntityFactory(factory.Factory):
resource = None
key = "key"
string = "string"
string_plural = ""
comments = factory.List([])
source = factory.List([])
order = factory.Sequence(lambda n: n)
class Meta:
model = VCSEntity
class VCSTranslationFactory(factory.Factory):
key = factory.Sequence(lambda n: f"key-{n}")
strings = factory.Dict({})
comments = factory.List([])
fuzzy = False
class Meta:
model = VCSTranslation
class SyncLogFactory(factory.django.DjangoModelFactory):
class Meta:
model = SyncLog
class ProjectSyncLogFactory(factory.django.DjangoModelFactory):
sync_log = factory.SubFactory(SyncLogFactory)
project = factory.SubFactory(ProjectFactory)
class Meta:
model = ProjectSyncLog
class RepositorySyncLogFactory(factory.django.DjangoModelFactory):
project_sync_log = factory.SubFactory(ProjectSyncLogFactory)
repository = factory.SubFactory(RepositoryFactory)
class Meta:
model = RepositorySyncLog
class FakeCheckoutTestCase(TestCase):
"""Parent class for tests that use the fake l10n repo checkout."""
def setUp(self):
self.now = aware_datetime(1970, 1, 1)
timezone_patch = patch("pontoon.sync.tasks.timezone")
self.mock_timezone = timezone_patch.start()
self.addCleanup(timezone_patch.stop)
self.mock_timezone.now.return_value = self.now
self.translated_locale = LocaleFactory.create(code="translated-locale")
self.inactive_locale = LocaleFactory.create(code="inactive-locale")
self.repository = RepositoryFactory()
self.db_project = ProjectFactory.create(
name="db-project",
locales=[self.translated_locale],
repositories=[self.repository],
)
self.main_db_resource = ResourceFactory.create(
project=self.db_project, path="main.lang", format="lang"
)
self.other_db_resource = ResourceFactory.create(
project=self.db_project, path="other.lang", format="lang"
)
self.missing_db_resource = ResourceFactory.create(
project=self.db_project, path="missing.lang", format="lang"
)
self.main_db_entity = EntityFactory.create(
resource=self.main_db_resource,
string="Source String",
key="Source String",
obsolete=False,
)
self.other_db_entity = EntityFactory.create(
resource=self.other_db_resource,
string="Other Source String",
key="Other Source String",
obsolete=False,
)
self.main_db_translation = TranslationFactory.create(
entity=self.main_db_entity,
plural_form=None,
locale=self.translated_locale,
string="Translated String",
date=aware_datetime(1970, 1, 1),
approved=True,
extra={"tags": []},
)
# Load paths from the fake locale directory.
checkout_path_patch = patch.object(
Project,
"checkout_path",
new_callable=PropertyMock,
return_value=FAKE_CHECKOUT_PATH,
)
checkout_path_patch.start()
self.addCleanup(checkout_path_patch.stop)
vcs_changed_files = {
self.main_db_resource.path: [self.translated_locale],
self.other_db_resource.path: [self.translated_locale],
self.missing_db_resource.path: [self.translated_locale],
}
changed_files_patch = patch.object(
VCSProject,
"changed_files",
new_callable=PropertyMock,
return_value=vcs_changed_files,
)
changed_files_patch.start()
self.addCleanup(changed_files_patch.stop)
source_repository = patch.object(
Project,
"source_repository",
new_callable=PropertyMock,
return_value=self.db_project.repositories.all()[0],
)
source_repository.start()
self.addCleanup(source_repository.stop)
self.vcs_project = VCSProject(self.db_project)
self.main_vcs_resource = self.vcs_project.resources[self.main_db_resource.path]
self.other_vcs_resource = self.vcs_project.resources[
self.other_db_resource.path
]
self.missing_vcs_resource = self.vcs_project.resources[
self.missing_db_resource.path
]
self.main_vcs_entity = self.main_vcs_resource.entities["Source String"]
self.main_vcs_translation = self.main_vcs_entity.translations[
"translated-locale"
]
# Mock VCSResource.save() for each resource to avoid altering
# the filesystem.
resource_save_patch = patch.object(VCSResource, "save")
resource_save_patch.start()
self.addCleanup(resource_save_patch.stop)
self.changeset = ChangeSet(
self.db_project,
self.vcs_project,
aware_datetime(1970, 1, 1),
self.translated_locale,
)
```
#### File: tests/utils/test_translations.py
```python
from unittest.mock import MagicMock, patch
import pytest
from pontoon.base.models import Translation
from pontoon.tags.utils import TagsLatestTranslationsTool
def test_util_tags_stats_tool(tag_data_init_kwargs):
# tests instantiation of translations tool
kwargs = tag_data_init_kwargs
tr_tool = TagsLatestTranslationsTool(**kwargs)
for k, v in kwargs.items():
assert getattr(tr_tool, k) == v
@pytest.mark.django_db
def test_util_tags_translation_tool_get_data(
tag_matrix, calculate_tags_latest, tag_test_kwargs,
):
# for different parametrized kwargs, tests that the calculated
# latest data matches expectations from long-hand calculation
name, kwargs = tag_test_kwargs
# calculate expectations
exp = calculate_tags_latest(**kwargs)
# get the data, and coalesce to translations dictionary
tr_tool = TagsLatestTranslationsTool(**kwargs)
data = tr_tool.coalesce(tr_tool.get_data())
# get a pk dictionary of all translations
translations = Translation.objects.select_related("user").in_bulk()
assert len(data) == len(exp)
for k, (pk, date) in exp.items():
assert data[k]["date"] == date
assert data[k]["string"] == translations.get(pk).string
if name.endswith("_exact"):
assert len(data) == 1
elif name.endswith("_no_match"):
assert len(data) == 0
elif name.endswith("_match"):
assert len(data) > 0
elif name.endswith("_contains"):
assert 1 < len(data) < len(tag_matrix["tags"])
elif name == "empty":
pass
else:
raise ValueError(f"Unsupported assertion type: {name}")
@patch("pontoon.tags.utils.TagsLatestTranslationsTool.get_data")
def test_util_tags_translation_tool_data(data_mock):
# ensures latest translation data is coalesced and cached
# correctly
tr_tool = TagsLatestTranslationsTool()
# set up mock return for get_data that can be used like
# qs.iterator()
data_m = [
dict(entity__resource__tag="foo"),
dict(entity__resource__tag="bar"),
]
data_m2 = [dict(entity__resource__tag="baz")]
iterator_m = MagicMock()
iterator_m.iterator.return_value = data_m
data_mock.return_value = iterator_m
# get data from the tool
result = tr_tool.data
# we got back data from data_m coalesced to a dictionary
# with the groupby fields as keys
assert result == dict(foo=data_m[0], bar=data_m[1])
assert iterator_m.iterator.called
# lets reset the mock and change the return value
iterator_m.reset_mock()
iterator_m.iterator.return_value = data_m2
# and get the data again
result = tr_tool.data
# which was cached, so nothing changed
assert not iterator_m.iterator.called
assert result == dict(foo=data_m[0], bar=data_m[1])
# after deleting the cache...
del tr_tool.__dict__["data"]
# ...we get the new value
result = tr_tool.data
assert iterator_m.iterator.called
assert result == dict(baz=data_m2[0])
@pytest.mark.django_db
def test_util_tags_translation_tool_groupby(
tag_matrix, tag_test_kwargs, calculate_tags_latest, user_a, user_b,
):
name, kwargs = tag_test_kwargs
# hmm, translations have no users
# - set first 3rd to user_a, and second 3rd to user_b
total = Translation.objects.count()
first_third_users = Translation.objects.all()[: total / 3].values_list("pk")
second_third_users = Translation.objects.all()[
total / 3 : 2 * total / 3
].values_list("pk")
(Translation.objects.filter(pk__in=first_third_users).update(user=user_a))
(Translation.objects.filter(pk__in=second_third_users).update(user=user_b))
# calculate expectations grouped by locale
exp = calculate_tags_latest(groupby="locale", **kwargs)
# calculate data from tool grouped by locale
tr_tool = TagsLatestTranslationsTool(groupby="locale", **kwargs)
data = tr_tool.coalesce(tr_tool.get_data())
# get a pk dictionary of all translations
translations = Translation.objects.select_related("user").in_bulk()
assert len(data) == len(exp)
for k, (pk, date) in exp.items():
# check all of the expected values are correct for the
# translation and user
translation = translations.get(pk)
assert data[k]["date"] == date
assert data[k]["string"] == translation.string
assert data[k]["approved_date"] == translation.approved_date
user = translation.user
if user:
assert data[k]["user__email"] == user.email
assert data[k]["user__first_name"] == user.first_name
else:
assert data[k]["user__email"] is None
assert data[k]["user__first_name"] is None
```
#### File: pontoon/translations/views.py
```python
from django.contrib.auth.decorators import login_required
from django.db import transaction
from django.http import JsonResponse
from django.shortcuts import get_object_or_404
from django.utils import timezone
from django.utils.datastructures import MultiValueDictKeyError
from django.views.decorators.http import require_POST
from pontoon.actionlog.models import ActionLog
from pontoon.actionlog.utils import log_action
from pontoon.base import utils
from pontoon.base.models import (
TranslatedResource,
Translation,
)
from pontoon.checks.libraries import run_checks
from pontoon.checks.utils import are_blocking_checks
from pontoon.translations import forms
@require_POST
@utils.require_AJAX
@login_required(redirect_field_name="", login_url="/403")
@transaction.atomic
def create_translation(request):
"""
Create a new translation.
"""
form = forms.CreateTranslationForm(request.POST)
if not form.is_valid():
problems = []
for field, errors in form.errors.items():
problems.append(
'Error validating field `{}`: "{}"'.format(field, " ".join(errors))
)
return JsonResponse(
{"status": False, "message": "\n".join(problems)}, status=400
)
entity = form.cleaned_data["entity"]
string = form.cleaned_data["translation"]
locale = form.cleaned_data["locale"]
plural_form = form.cleaned_data["plural_form"]
original = form.cleaned_data["original"]
ignore_warnings = form.cleaned_data["ignore_warnings"]
approve = form.cleaned_data["approve"]
force_suggestions = form.cleaned_data["force_suggestions"]
paths = form.cleaned_data["paths"]
machinery_sources = form.cleaned_data["machinery_sources"]
project = entity.resource.project
# Read-only translations cannot saved
if utils.readonly_exists(project, locale):
return JsonResponse(
{
"status": False,
"message": "Forbidden: This string is in read-only mode.",
},
status=403,
)
translations = Translation.objects.filter(
entity=entity, locale=locale, plural_form=plural_form,
)
same_translations = translations.filter(string=string)
# If same translation exists in the DB, don't save it again.
if same_translations:
return JsonResponse({"status": False, "same": True})
# Look for failed checks.
# Checks are disabled for the tutorial.
use_checks = project.slug != "tutorial"
user = request.user
failed_checks = None
if use_checks:
failed_checks = run_checks(
entity, locale.code, original, string, user.profile.quality_checks,
)
if are_blocking_checks(failed_checks, ignore_warnings):
return JsonResponse({"status": False, "failedChecks": failed_checks})
now = timezone.now()
can_translate = user.can_translate(project=project, locale=locale) and (
not force_suggestions or approve
)
translation = Translation(
entity=entity,
locale=locale,
plural_form=plural_form,
string=string,
user=user,
date=now,
approved=can_translate,
machinery_sources=machinery_sources,
)
if can_translate:
translation.approved_user = user
translation.approved_date = now
translation.save(failed_checks=failed_checks)
log_action(ActionLog.ActionType.TRANSLATION_CREATED, user, translation=translation)
if translations:
translation = entity.reset_active_translation(
locale=locale, plural_form=plural_form,
)
return JsonResponse(
{
"status": True,
"translation": translation.serialize(),
"stats": TranslatedResource.objects.stats(project, paths, locale),
}
)
@utils.require_AJAX
@login_required(redirect_field_name="", login_url="/403")
@transaction.atomic
def delete_translation(request):
"""Delete given translation."""
try:
translation_id = request.POST["translation"]
except MultiValueDictKeyError as e:
return JsonResponse(
{"status": False, "message": f"Bad Request: {e}"}, status=400,
)
translation = get_object_or_404(Translation, pk=translation_id)
entity = translation.entity
project = entity.resource.project
locale = translation.locale
# Read-only translations cannot be deleted
if utils.readonly_exists(project, locale):
return JsonResponse(
{
"status": False,
"message": "Forbidden: This string is in read-only mode.",
},
status=403,
)
# Only privileged users or authors can delete translations
if not translation.rejected or not (
request.user.can_translate(locale, project)
or request.user == translation.user
or translation.approved
):
return JsonResponse(
{
"status": False,
"message": "Forbidden: You can't delete this translation.",
},
status=403,
)
translation.delete()
log_action(
ActionLog.ActionType.TRANSLATION_DELETED,
request.user,
entity=entity,
locale=locale,
)
return JsonResponse({"status": True})
@utils.require_AJAX
@login_required(redirect_field_name="", login_url="/403")
@transaction.atomic
def approve_translation(request):
"""Approve given translation."""
try:
t = request.POST["translation"]
ignore_warnings = request.POST.get("ignore_warnings", "false") == "true"
paths = request.POST.getlist("paths[]")
except MultiValueDictKeyError as e:
return JsonResponse(
{"status": False, "message": f"Bad Request: {e}"}, status=400,
)
translation = get_object_or_404(Translation, pk=t)
entity = translation.entity
project = entity.resource.project
locale = translation.locale
user = request.user
# Read-only translations cannot be approved
if utils.readonly_exists(project, locale):
return JsonResponse(
{
"status": False,
"message": "Forbidden: This string is in read-only mode.",
},
status=403,
)
if translation.approved:
return JsonResponse(
{
"status": False,
"message": "Forbidden: This translation is already approved.",
},
status=403,
)
# Only privileged users can approve translations
if not user.can_translate(locale, project):
return JsonResponse(
{
"status": False,
"message": "Forbidden: You don't have permission to approve this translation.",
},
status=403,
)
# Check for errors.
# Checks are disabled for the tutorial.
use_checks = project.slug != "tutorial"
if use_checks:
failed_checks = run_checks(
entity,
locale.code,
entity.string,
translation.string,
user.profile.quality_checks,
)
if are_blocking_checks(failed_checks, ignore_warnings):
return JsonResponse(
{"string": translation.string, "failedChecks": failed_checks}
)
translation.approve(user)
log_action(ActionLog.ActionType.TRANSLATION_APPROVED, user, translation=translation)
active_translation = translation.entity.reset_active_translation(
locale=locale, plural_form=translation.plural_form,
)
return JsonResponse(
{
"translation": active_translation.serialize(),
"stats": TranslatedResource.objects.stats(project, paths, locale),
}
)
@utils.require_AJAX
@login_required(redirect_field_name="", login_url="/403")
@transaction.atomic
def unapprove_translation(request):
"""Unapprove given translation."""
try:
t = request.POST["translation"]
paths = request.POST.getlist("paths[]")
except MultiValueDictKeyError as e:
return JsonResponse(
{"status": False, "message": f"Bad Request: {e}"}, status=400,
)
translation = get_object_or_404(Translation, pk=t)
project = translation.entity.resource.project
locale = translation.locale
# Read-only translations cannot be un-approved
if utils.readonly_exists(project, locale):
return JsonResponse(
{
"status": False,
"message": "Forbidden: This string is in read-only mode.",
},
status=403,
)
# Only privileged users or authors can un-approve translations
if not (
request.user.can_translate(locale, project)
or request.user == translation.user
or translation.approved
):
return JsonResponse(
{
"status": False,
"message": "Forbidden: You can't unapprove this translation.",
},
status=403,
)
translation.unapprove(request.user)
log_action(
ActionLog.ActionType.TRANSLATION_UNAPPROVED,
request.user,
translation=translation,
)
active_translation = translation.entity.reset_active_translation(
locale=locale, plural_form=translation.plural_form,
)
return JsonResponse(
{
"translation": active_translation.serialize(),
"stats": TranslatedResource.objects.stats(project, paths, locale),
}
)
@utils.require_AJAX
@login_required(redirect_field_name="", login_url="/403")
@transaction.atomic
def reject_translation(request):
"""Reject given translation."""
try:
t = request.POST["translation"]
paths = request.POST.getlist("paths[]")
except MultiValueDictKeyError as e:
return JsonResponse(
{"status": False, "message": f"Bad Request: {e}"}, status=400,
)
translation = get_object_or_404(Translation, pk=t)
project = translation.entity.resource.project
locale = translation.locale
# Read-only translations cannot be rejected
if utils.readonly_exists(project, locale):
return JsonResponse(
{
"status": False,
"message": "Forbidden: This string is in read-only mode.",
},
status=403,
)
# Non-privileged users can only reject own unapproved translations
if not request.user.can_translate(locale, project):
if translation.user == request.user:
if translation.approved is True:
return JsonResponse(
{
"status": False,
"message": "Forbidden: You can't reject approved translations.",
},
status=403,
)
else:
return JsonResponse(
{
"status": False,
"message": "Forbidden: You can't reject translations from other users.",
},
status=403,
)
translation.reject(request.user)
log_action(
ActionLog.ActionType.TRANSLATION_REJECTED, request.user, translation=translation
)
active_translation = translation.entity.reset_active_translation(
locale=locale, plural_form=translation.plural_form,
)
return JsonResponse(
{
"translation": active_translation.serialize(),
"stats": TranslatedResource.objects.stats(project, paths, locale),
}
)
@utils.require_AJAX
@login_required(redirect_field_name="", login_url="/403")
@transaction.atomic
def unreject_translation(request):
"""Unreject given translation."""
try:
t = request.POST["translation"]
paths = request.POST.getlist("paths[]")
except MultiValueDictKeyError as e:
return JsonResponse(
{"status": False, "message": f"Bad Request: {e}"}, status=400,
)
translation = get_object_or_404(Translation, pk=t)
project = translation.entity.resource.project
locale = translation.locale
# Read-only translations cannot be un-rejected
if utils.readonly_exists(project, locale):
return JsonResponse(
{
"status": False,
"message": "Forbidden: This string is in read-only mode.",
},
status=403,
)
# Only privileged users or authors can un-reject translations
if not (
request.user.can_translate(locale, project)
or request.user == translation.user
or translation.approved
):
return JsonResponse(
{
"status": False,
"message": "Forbidden: You can't unreject this translation.",
},
status=403,
)
translation.unreject(request.user)
log_action(
ActionLog.ActionType.TRANSLATION_UNREJECTED,
request.user,
translation=translation,
)
active_translation = translation.entity.reset_active_translation(
locale=locale, plural_form=translation.plural_form,
)
return JsonResponse(
{
"translation": active_translation.serialize(),
"stats": TranslatedResource.objects.stats(project, paths, locale),
}
)
``` |
{
"source": "10allday-Software/zamboni",
"score": 2
} |
#### File: mkt/site/views.py
```python
import hashlib
import json
import os
import subprocess
from django.conf import settings
from django.core.exceptions import PermissionDenied
from django.http import (HttpResponse, HttpResponseBadRequest,
HttpResponseNotFound, HttpResponseServerError)
from django.template import RequestContext
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_exempt, requires_csrf_token
from django.views.decorators.http import etag, require_POST
from django.views.generic.base import TemplateView
import commonware.log
import jingo_minify
import waffle
from jingo import get_standard_processors
from jingo.helpers import urlparams
from django_statsd.clients import statsd
from django_statsd.views import record as django_statsd_record
from mkt.carriers import get_carrier
from mkt.detail.views import manifest as mini_manifest
from mkt.site import monitors
from mkt.site.context_processors import get_collect_timings
from mkt.site.helpers import media
from mkt.site.utils import log_cef, render
log = commonware.log.getLogger('z.mkt.site')
# This can be called when CsrfViewMiddleware.process_view has not run,
# therefore needs @requires_csrf_token in case the template needs
# {% csrf_token %}.
@requires_csrf_token
def handler403(request):
# TODO: Bug 793241 for different 403 templates at different URL paths.
return render(request, 'site/403.html', status=403)
def handler404(request):
if request.path_info.startswith('/api/'):
# Pass over to API handler404 view if API was targeted.
return HttpResponseNotFound()
else:
return render(request, 'site/404.html', status=404)
def handler500(request):
if request.path_info.startswith('/api/'):
# Pass over to API handler500 view if API was targeted.
return HttpResponseServerError()
else:
return render(request, 'site/500.html', status=500)
def csrf_failure(request, reason=''):
return render(request, 'site/403.html',
{'because_csrf': 'CSRF' in reason}, status=403)
def manifest(request):
ctx = RequestContext(request)
for processor in get_standard_processors():
ctx.update(processor(request))
data = {
'name': getattr(settings, 'WEBAPP_MANIFEST_NAME',
'Firefox Marketplace'),
'description': 'The Firefox Marketplace',
'developer': {
'name': 'Mozilla',
'url': 'http://mozilla.org',
},
'icons': {
# Using the default addon image until we get a marketplace logo.
'128': media(ctx, 'img/mkt/logos/128.png'),
'64': media(ctx, 'img/mkt/logos/64.png'),
'32': media(ctx, 'img/mkt/logos/32.png'),
},
'activities': {
'marketplace-app': {'href': '/'},
'marketplace-app-rating': {'href': '/'},
'marketplace-category': {'href': '/'},
'marketplace-search': {'href': '/'},
}
}
if get_carrier():
data['launch_path'] = urlparams('/', carrier=get_carrier())
manifest_content = json.dumps(data)
manifest_etag = hashlib.sha256(manifest_content).hexdigest()
@etag(lambda r: manifest_etag)
def _inner_view(request):
response = HttpResponse(
manifest_content,
content_type='application/x-web-app-manifest+json')
return response
return _inner_view(request)
def serve_contribute(request):
filename = os.path.join(settings.ROOT, 'contribute.json')
with open(filename) as fd:
content = fd.read()
return HttpResponse(content, content_type='application/json')
def package_minifest(request):
"""Serve mini manifest ("minifest") for Yulelog's packaged `.zip`."""
if not settings.MARKETPLACE_GUID:
return HttpResponseNotFound()
return mini_manifest(request, settings.MARKETPLACE_GUID)
def yogafire_minifest(request):
"""Serve mini manifest ("minifest") for Yogafire's packaged `.zip`."""
if not settings.YOGAFIRE_GUID:
return HttpResponseNotFound()
return mini_manifest(request, settings.YOGAFIRE_GUID)
def robots(request):
"""Generate a `robots.txt`."""
template = render(request, 'site/robots.txt')
return HttpResponse(template, content_type='text/plain')
@csrf_exempt
@require_POST
def record(request):
# The rate limiting is done up on the client, but if things go wrong
# we can just turn the percentage down to zero.
if get_collect_timings():
return django_statsd_record(request)
raise PermissionDenied
@statsd.timer('mkt.mozmarket.minify')
def minify_js(js):
if settings.UGLIFY_BIN:
log.info('minifying JS with uglify')
return _minify_js_with_uglify(js)
else:
# The YUI fallback here is important
# because YUI compressor is bundled with jingo
# minify and therefore doesn't require any deps.
log.info('minifying JS with YUI')
return _minify_js_with_yui(js)
def _minify_js_with_uglify(js):
sp = _open_pipe([settings.UGLIFY_BIN])
js, err = sp.communicate(js)
if sp.returncode != 0:
raise ValueError('Compressing JS with uglify failed; error: %s'
% err.strip())
return js
def _minify_js_with_yui(js):
jar = os.path.join(os.path.dirname(jingo_minify.__file__), 'bin',
'yuicompressor-2.4.7.jar')
if not os.path.exists(jar):
raise ValueError('Could not find YUI compressor; tried %r' % jar)
sp = _open_pipe([settings.JAVA_BIN, '-jar', jar, '--type', 'js',
'--charset', 'utf8'])
js, err = sp.communicate(js)
if sp.returncode != 0:
raise ValueError('Compressing JS with YUI failed; error: %s'
% err.strip())
return js
def _open_pipe(cmd):
return subprocess.Popen(cmd,
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
class OpensearchView(TemplateView):
content_type = 'text/xml'
template_name = 'mkt/opensearch.xml'
@never_cache
def monitor(request, format=None):
# For each check, a boolean pass/fail status to show in the template
status_summary = {}
results = {}
checks = ['memcache', 'libraries', 'elastic', 'package_signer', 'path',
'receipt_signer', 'settings_check']
for check in checks:
with statsd.timer('monitor.%s' % check) as timer:
status, result = getattr(monitors, check)()
# state is a string. If it is empty, that means everything is fine.
status_summary[check] = {'state': not status,
'status': status}
results['%s_results' % check] = result
results['%s_timer' % check] = timer.ms
# If anything broke, send HTTP 500.
status_code = 200 if all(a['state']
for a in status_summary.values()) else 500
if format == '.json':
return HttpResponse(json.dumps(status_summary), status=status_code)
ctx = {}
ctx.update(results)
ctx['status_summary'] = status_summary
return render(request, 'services/monitor.html', ctx, status=status_code)
def loaded(request):
return HttpResponse('%s' % request.META['wsgi.loaded'],
content_type='text/plain')
@csrf_exempt
@require_POST
def cspreport(request):
"""Accept CSP reports and log them."""
report = ('blocked-uri', 'violated-directive', 'original-policy')
if not waffle.sample_is_active('csp-store-reports'):
return HttpResponse()
try:
v = json.loads(request.body)['csp-report']
# If possible, alter the PATH_INFO to contain the request of the page
# the error occurred on, spec: http://mzl.la/P82R5y
meta = request.META.copy()
meta['PATH_INFO'] = v.get('document-uri', meta['PATH_INFO'])
v = [(k, v[k]) for k in report if k in v]
log_cef('CSPViolation', 5, meta,
signature='CSPREPORT',
msg='A client reported a CSP violation',
cs6=v, cs6Label='ContentPolicy')
except (KeyError, ValueError), e:
log.debug('Exception in CSP report: %s' % e, exc_info=True)
return HttpResponseBadRequest()
return HttpResponse()
class MinimalMetadata(object):
"""
Don't include field and other information for `OPTIONS` requests.
Just return the name and description.
"""
def determine_metadata(self, request, view):
return {
'name': view.get_view_name(),
'description': view.get_view_description()
}
``` |
{
"source": "10bedicu/care",
"score": 2
} |
#### File: api/viewsets/bed.py
```python
from django_filters import rest_framework as filters
from rest_framework import filters as drf_filters
from rest_framework.mixins import (
CreateModelMixin,
DestroyModelMixin,
ListModelMixin,
RetrieveModelMixin,
UpdateModelMixin,
)
from rest_framework.permissions import IsAuthenticated
from rest_framework.viewsets import GenericViewSet
from care.facility.api.serializers.bed import AssetBedSerializer, BedSerializer
from care.facility.models.bed import AssetBed, Bed
from care.users.models import User
from care.utils.cache.cache_allowed_facilities import get_accessible_facilities
from care.utils.filters.choicefilter import CareChoiceFilter, inverse_choices
inverse_bed_type = inverse_choices(Bed.BedTypeChoices)
class BedFilter(filters.FilterSet):
facility = filters.UUIDFilter(field_name="facility__external_id")
location = filters.UUIDFilter(field_name="location__external_id")
bed_type = CareChoiceFilter(choice_dict=inverse_bed_type)
class BedViewSet(ListModelMixin, RetrieveModelMixin, CreateModelMixin, UpdateModelMixin, GenericViewSet):
queryset = Bed.objects.all().select_related("facility", "location").order_by("-created_date")
serializer_class = BedSerializer
lookup_field = "external_id"
filter_backends = (filters.DjangoFilterBackend, drf_filters.SearchFilter)
permission_classes = [IsAuthenticated]
search_fields = ["name"]
filterset_class = BedFilter
def get_queryset(self):
user = self.request.user
queryset = self.queryset
if user.is_superuser:
pass
elif user.user_type >= User.TYPE_VALUE_MAP["StateLabAdmin"]:
queryset = queryset.filter(facility__state=user.state)
elif user.user_type >= User.TYPE_VALUE_MAP["DistrictLabAdmin"]:
queryset = queryset.filter(facility__district=user.district)
else:
allowed_facilities = get_accessible_facilities(user)
queryset = queryset.filter(facility__id__in=allowed_facilities)
return queryset
class AssetBedFilter(filters.FilterSet):
asset = filters.UUIDFilter(field_name="asset__external_id")
bed = filters.UUIDFilter(field_name="bed__external_id")
class AssetBedViewSet(ListModelMixin, RetrieveModelMixin, CreateModelMixin, UpdateModelMixin, GenericViewSet):
queryset = AssetBed.objects.all().select_related("asset", "bed").order_by("-created_date")
serializer_class = AssetBedSerializer
filter_backends = (filters.DjangoFilterBackend,)
filterset_class = AssetBedFilter
def get_queryset(self):
user = self.request.user
queryset = self.queryset
if user.is_superuser:
pass
elif user.user_type >= User.TYPE_VALUE_MAP["StateLabAdmin"]:
queryset = queryset.filter(bed__facility__state=user.state)
elif user.user_type >= User.TYPE_VALUE_MAP["DistrictLabAdmin"]:
queryset = queryset.filter(bed__facility__district=user.district)
else:
allowed_facilities = get_accessible_facilities(user)
queryset = queryset.filter(bed__facility__id__in=allowed_facilities)
return queryset
``` |
{
"source": "10c8/icelang",
"score": 2
} |
#### File: 10c8/icelang/ice.py
```python
import sys
import re
import time
# Data
ep = 0
current = None
returnPoint = 0
currentReturn = 0
returnStack = [returnPoint]
safeVariableAccess = False
execTrue, execFalse, execCode = (False, False, True)
currentIf = 0
ifStack = [[False, False, True]]
types = {}
instances = {}
classes = {
"File": {
"props": {
"name": {"type": "Str", "data": ""},
"data": {"type": "Arr", "data": []},
"size": {"type": "Int", "data": 0}
}
}
}
pointers = {}
variables = {
"EMPTY": {
"data": "__INFO_EMPTYSTACK__",
"type": "Str",
"protected": True
}
}
labels = {}
stack = []
validTypes = ["Int", "Flt", "Str", "FileStr", "Arr"]
validConditions = ["==", "!=", ">", ">=", "<", "<="]
validMulti = ["and", "or"]
validOperations = ["!", "+", "-", "*", "/", "&", "|", "~", "^", "<<", ">>"]
# Utils
def Throw(error):
print "\r\n[Error] " + str(ep+1) + ": " + error
sys.exit(1)
def DbgThrow(failure):
print "\r\n[Failure]: " + failure
sys.exit(1)
def ParseFlag(data, do=True):
typeFlag = data[:1]
typeID = data[1:]
resProtected = False
if do:
if typeFlag == "$": # Variable
if typeID not in variables:
Throw("Unknown variable \"" + typeID + "\".")
if variables[typeID]["protected"] and not safeVariableAccess:
Throw("Trying to access a protected variable. ($"
+ typeID + ")")
resData = variables[typeID]["data"]
resType = variables[typeID]["type"]
resProtected = variables[typeID]["protected"]
if resType == "Str" or resType == "FileStr":
resData = resData
elif resType == "Int":
resData = int(resData)
elif resType == "Flt":
resData = float(resData)
elif resType == "Arr":
resData = "<Array " + typeID + ">"
elif typeFlag == "#": # Field
if current is None or current not in instances:
Throw("Unknown instance.")
if typeID not in instances[current]["fields"]:
Throw("Unknown field \"" + typeID + "\".")
resData = instances[current]["fields"][typeID]["data"]
resType = instances[current]["fields"][typeID]["type"]
resProtected = instances[current]["fields"][typeID]["protected"]
if resType == "Str":
resData = resData.lstrip("\"").rstrip("\"")
elif resType == "Int":
resData = int(resData)
elif resType == "Flt":
resData = float(resData)
elif resType == "Arr":
resData = "<Array " + typeID + ">"
elif typeFlag == "&": # Instance
if typeID not in instances:
Throw("Unknown instance \"" + typeID +"\".")
resData = "<InstanceOf " + instances[typeID]["instanceOf"] + ">"
resType = "Instance"
elif typeFlag == "%": # Pointer
if typeID not in pointers:
Throw("Unknown pointer \"" + typeID + "\".")
resData = "<PointerOf " + pointers[typeID]["pointerOf"] + ">"
resType = "Pointer"
elif typeFlag == "*": # Label
if typeID not in labels:
Throw("Unknown label \""+ typeID +"\".")
resData = labels[typeID]["point"]
resType = "Int"
else:
unquote = data.lstrip("\"").rstrip("\"")
if data == unquote:
if "." in data:
resData = float(data)
resType = "Flt"
else:
resData = int(data)
resType = "Int"
else:
resData = unquote
resType = "Str"
return (resData, resType, typeID, typeFlag, resProtected)
unquote = data.lstrip("\"").rstrip("\"")
if data == unquote:
if "." in data:
resType = "Flt"
else:
resType = "Int"
else:
resType = "Str"
return (data, typeID, typeFlag, resType)
# Then the magic happens:
lines = []
with open(sys.argv[1]) as f:
lines = [l.lstrip().rstrip("\n") for l in f.readlines()]
ep = 0
while ep <= len(lines)-1:
line = lines[ep]
if line == "" or line[0] == "#":
ep += 1
continue
line = [e.lstrip() for e in re.split(''' (?=(?:[^'"]|'[^']*'|"[^"]*")*$)''', line.lstrip())]
opcode = line[0]
args = []
for i in range(1, len(line)):
args.append(line[i].strip())
if opcode == "def":
if len(args) != 1:
Throw("Invalid argument count.")
# Arguments
defName = args[0]
labels[defName] = {}
labels[defName]["point"] = ep+1
ep += 1
continue
# Structs
elif opcode == "struct":
if len(args) != 1:
Throw("Invalid argument count.")
# Arguments
argName = args[0]
argFlag = ParseFlag(argName, False)[2]
if argFlag == "$" or argFlag == "#" or argFlag == "@" or argFlag == "*" or any(char.isdigit() for char in argName):
Throw("Invalid struct name \""+ argName +"\".")
# Function code
if argName in types:
Throw("Struct already exists.")
types[argName] = {}
types[argName]["fields"] = {}
current = argName
ep += 1
continue
elif opcode == "field":
if len(args) != 2:
Throw("Invalid argument count.")
# Arguments
argName = args[0]
argType = args[1]
if any(char.isdigit() for char in argName):
Throw("Invalid field name.")
# Function code
if current is None or current not in types:
Throw("Unknown struct.")
if argType not in validTypes:
Throw("Invalid data type for field.")
if argName in types[current]["fields"]:
Throw("Trying to overwrite an existing field.")
types[current]["fields"][argName] = {}
types[current]["fields"][argName]["data"] = None
types[current]["fields"][argName]["type"] = argType
ep += 1
continue
ep += 1
if "program" not in labels:
DbgThrow("No entrypoint (\"program\" label).")
else:
ep = labels["program"]["point"]
while ep <= len(lines)-1:
line = lines[ep]
if line == "" or line[0] == "#":
ep += 1
continue
line = [e.lstrip() for e in re.split(''' (?=(?:[^'"]|'[^']*'|"[^"]*")*$)''', line.lstrip())]
opcode = line[0]
args = []
for i in range(1, len(line)):
args.append(line[i].strip())
# Condition handling
execTrue = ifStack[currentIf][0]
execFalse = ifStack[currentIf][1]
execCode = ifStack[currentIf][2]
# print ep, opcode, args, (execTrue, execFalse, execCode)
if execTrue:
if opcode == "else":
ifStack[currentIf][0] = False
ifStack[currentIf][2] = False
elif opcode == "endif":
ifStack[currentIf][0] = False
ifStack[currentIf][2] = True
elif execFalse:
if opcode == "else":
ifStack[currentIf][1] = False
ifStack[currentIf][2] = True
elif opcode == "endif":
ifStack[currentIf][1] = False
ifStack[currentIf][2] = True
if not execCode and opcode != "endif" and opcode != "elif":
ep += 1
continue
# Parse opcode
if opcode == "exit":
sys.exit(0)
# Conditions
elif opcode == "if" or opcode == "elif":
if len(args) < 3:
Throw("Invalid argument count.")
if opcode == "if":
currentIf += 1
item = [False, False, True]
ifStack.append(item)
if opcode == "elif" and ifStack[currentIf][0]:
ifStack[currentIf][2] = False
ep += 1
continue
# Arguments
i = 0
answers = []
sep = []
sepName = 0
done = False
result = []
# Enable protected variable access
safeVariableAccess = True
# Function code
while not done:
argA = ParseFlag(args[i])[0]
argB = ParseFlag(args[i+2])[0]
typeA = ParseFlag(args[i])[1]
typeB = ParseFlag(args[i+2])[1]
condition = args[i+1]
if i+3 != len(args):
sepName = args[i+3]
if sepName not in validMulti:
Throw("Invalid condition(s).")
sep.append(sepName)
if condition not in validConditions:
Throw("Unknown condition \"" + condition + "\".")
if (typeA != "Instance" and typeB != "Instance")\
and (typeA not in validTypes or typeB not in validTypes):
Throw("Invalid data type(s).")
if condition == "==":
if argA == argB:
answers.append(True)
else:
answers.append(False)
elif condition == "!=":
if argA != argB:
answers.append(True)
else:
answers.append(False)
elif condition == ">":
if argA > argB:
answers.append(True)
else:
answers.append(False)
elif condition == ">=":
if argA >= argB:
answers.append(True)
else:
answers.append(False)
elif condition == "<":
if argA < argB:
answers.append(True)
else:
answers.append(False)
elif condition == "<=":
if argA <= argB:
answers.append(True)
else:
answers.append(False)
if i+3 == len(args):
done = True
else:
i += 4
e = 0
edone = False
while not edone and len(answers) > 1:
if args[e+3] == "and":
if answers[e] and answers[e+1]:
result.append(True)
else:
result.append(False)
elif args[e+3] == "or":
if answers[e] or answers[e+1]:
result.append(True)
else:
result.append(False)
if e+1 == len(answers):
edone = True
else:
e += 1
if len(result) > 1:
ifStack[currentIf][0] = all(e is True for e in result)
elif result != []:
ifStack[currentIf][0] = result[0]
else:
ifStack[currentIf][0] = answers[0]
ifStack[currentIf][1] = not ifStack[currentIf][0]
ifStack[currentIf][2] = not ifStack[currentIf][1]
# Disable protected variable access
safeVariableAccess = False
ep += 1
continue
elif opcode == "else" or opcode == "endif":
if len(args) != 0:
Throw("Invalid argument count.")
if opcode == "endif":
currentIf -= 1
ep += 1
continue
# Timing
elif opcode == "wait":
if len(args) != 1:
Throw("Invalid argument count.")
# Arguments
argTime = args[0]
argType = ParseFlag(argTime, False)[3]
argFlag = ParseFlag(argTime, False)[2]
# Function code
if argFlag == "$" or argFlag == "#":
argTime = ParseFlag(argTime)[0]
else:
if any(not char.isdigit() for char in argTime) or argType != "Int":
Throw("Invalid argument type (expects \"Int\".")
argTime = float(int(argTime)/1000)
time.sleep(argTime)
ep += 1
continue
# Calls
elif opcode == "def":
ep += 1
continue
elif opcode == "call" or opcode == "jump":
if len(args) != 1:
Throw("Invalid argument count.")
if opcode == "call":
returnPoint = ep+1
# Arguments
argFlag = ParseFlag(args[0], False)[2]
lblName = ParseFlag(args[0], False)[1]
# Function code
if argFlag == "@" or argFlag == "&" or argFlag == "#":
Throw("Invalid variable type.")
if argFlag == "*":
if not lblName in labels:
Throw("Unknown label \""+ lblName +"\".")
ep = labels[lblName]["point"]
elif argFlag == "$":
dataType = ParseFlag(args[0])[1]
if dataType != "Int" and dataType != "Label":
Throw("Invalid execution point \""+ args[0] +"\".")
ep = ParseFlag(args[0])[0]
else:
dataType = ParseFlag(args[0], False)[3]
if dataType != "Int":
Throw("Invalid execution point \""+ args[0] +"\".")
ep = int(args[0])
if opcode == "call":
currentReturn += 1
ifStack[currentIf] = [False, False, True]
returnStack.append(returnPoint)
continue
elif opcode == "return":
if len(args) != 0:
Throw("Invalid argument count.")
ep = returnStack[currentReturn]
del returnStack[currentReturn]
currentReturn -= 1
continue
# Stack
elif opcode == "push":
if len(args) != 1:
Throw("Invalid argument count.")
# Arguments
argFlag = ParseFlag(args[0], False)[2]
if argFlag == "@":
_typeID = ParseFlag(args[0], False)[1]
tempStack = []
if not _typeID in types:
Throw("Unknown struct \""+ _typeID +"\".")
for instance in instances:
if instances[instance]["instanceOf"] == _typeID:
item = {}
item["data"] = instance
item["type"] = "Str"
item["protected"] = False
tempStack.append(item)
for item in reversed(tempStack):
stack.append(item)
elif argFlag == "%":
_typeID = ParseFlag(args[0], False)[1]
if not _typeID in pointers:
Throw("Unknown pointer \""+ _typeID +"\".")
pointerType = pointers[_typeID]["pointerOf"]
if pointerType == "File":
pointerData = pointers[_typeID]["props"]["data"]["data"]
for _line in reversed(pointerData):
item = {}
item["data"] = _line
item["type"] = "FileStr"
item["protected"] = False
stack.append(item)
else:
argData = ParseFlag(args[0])[0]
argType = ParseFlag(args[0])[1]
if argType == "Arr":
if len(argData) > 0:
arrayName = ParseFlag(args[0])[2]
arrayData = variables[arrayName]["data"]
for entry in reversed(arrayData):
item = {}
item["data"] = entry["data"]
item["type"] = entry["type"]
item["protected"] = False
stack.append(item)
else:
item = {}
item["data"] = argData
item["type"] = argType
item["protected"] = False
stack.append(item)
ep += 1
continue
elif opcode == "pop":
if len(args) > 1:
Throw("Invalid argument count.")
# Arguments
if len(args) == 1:
popName = args[0]
popFlag = ParseFlag(popName, False)[2]
if popFlag == "$" or popFlag == "#" or popFlag == "@" or popFlag == "*" or popFlag == "&" or popFlag == "%" or any(char.isdigit() for char in popName):
Throw("Invalid data type.")
if popName in variables:
if variables[popName]["protected"]:
Throw("Trying to write on a protected variable. ($"+ popName +")")
else:
popName = 0
# Function code:
if popName:
if len(stack) != 0:
variables[popName] = stack.pop()
else:
variables[popName] = variables["EMPTY"]
variables[popName]["protected"] = False
else:
if len(stack) != 0:
stack.pop()
ep += 1
continue
elif opcode == "read":
if len(args) != 1:
Throw("Invalid argument count.")
# Arguments
popName = args[0]
popFlag = ParseFlag(popName, False)[2]
if popFlag == "$" or popFlag == "#" or popFlag == "*" or popFlag == "%" or any(char.isdigit() for char in popName):
Throw("Invalid data type.")
if popName in variables:
if variables[popName]["protected"]:
Throw("Trying to write on a protected variable. ($"+ popName +")")
# Function code:
if len(stack) != 0:
variables[popName] = stack[-1]
else:
variables[popName] = variables["EMPTY"]
variables[popName]["protected"] = False
ep += 1
continue
# FIXME When deleting a struct, delete all of its instances too.
elif opcode == "delete":
if len(args) != 1:
Throw("Invalid argument count.")
# Arguments
argName = args[0]
argFlag = ParseFlag(argName, False)[2]
if argFlag == "$" or argFlag == "#" or argFlag == "*" or argFlag == "%" or any(char.isdigit() for char in argName):
Throw("Invalid struct name \""+ argName +"\".")
# Function code
if argName in types:
Throw("Trying to delete an unknown struct.")
del(types[argName])
ep += 1
continue
elif opcode == "with":
if len(args) != 1:
Throw("Invalid argument count.")
# Arguments
argFlag = ParseFlag(args[0], False)[2]
if argFlag == "$":
argName = ParseFlag(args[0])[0]
elif argFlag == "&":
argName = ParseFlag(args[0], False)[1]
else:
Throw("Invalid data type.")
# Function code
if not argName in instances:
Throw("Invalid instance \""+ str(argName) +"\".")
current = argName
ep += 1
continue
elif opcode == "end":
current == None
ep += 1
continue
# Variables
# FIXME Exclude special characters from variable names.
elif opcode == "set":
if len(args) != 2:
Throw("Invalid argument count.")
# Arguments
varName = ParseFlag(args[0], False)[0]
varData = ParseFlag(args[1], False)[0]
nameFlag = ParseFlag(args[0], False)[2]
dataFlag = ParseFlag(args[1], False)[2]
dataType = ParseFlag(args[1], False)[1]
# Function code
if nameFlag in ["$", "@", "*", "&", "%"] or any(not char.isalpha() for char in varName[1:]):
Throw("Invalid variable name \""+ varName +"\".")
if dataFlag == "&":
Throw("Invalid data type.")
if dataFlag == "@": # New instance:
varData = varData[1:]
if not varData in types:
Throw("Unknown struct \""+ varData +"\".")
if varName in instances or varName in types:
Throw("Trying to overwrite a struct/instance.")
instances[varName] = {}
instances[varName]["instanceOf"] = varData
instances[varName]["fields"] = {}
for field in types[varData]["fields"]:
instances[varName]["fields"][field] = {}
instances[varName]["fields"][field]["data"] = None
instances[varName]["fields"][field]["type"] = types[varData]["fields"][field]["type"]
instances[varName]["fields"][field]["protected"] = False
current = varName
elif nameFlag == "#": # Set field
if current == None or not current in instances:
Throw("Invalid struct.")
varName = varName[1:]
dataType = ParseFlag(args[1])[1]
varData = ParseFlag(args[1])[0]
if not varName in instances[current]["fields"]:
Throw("Unknown field \""+ varName +"\".")
fieldType = instances[current]["fields"][varName]["type"]
if dataType != fieldType:
Throw("Incompatible data type \""+ dataType +"\" (field expects \""+ fieldType +"\").")
instances[current]["fields"][varName]["data"] = varData
elif dataFlag == "%": # New pointer:
varData = varData[1:]
if not varData in classes:
Throw("Unknown class \""+ varData +"\".")
if varName in pointers or varName in classes:
Throw("Trying to overwrite a class/pointer.")
pointers[varName] = {}
pointers[varName]["pointerOf"] = varData
pointers[varName]["props"] = {}
for prop in classes[varData]["props"]:
pointers[varName]["props"][prop] = {}
pointers[varName]["props"][prop]["data"] = None
pointers[varName]["props"][prop]["type"] = classes[varData]["props"][prop]["type"]
else:
if varName in variables:
if variables[varName]["protected"]:
Throw("Trying to write on a protected variable. ($"+ varName +")")
if varData == "Arr":
variables[varName] = {}
variables[varName]["type"] = "Arr"
variables[varName]["data"] = []
else:
variables[varName] = {}
variables[varName]["type"] = ParseFlag(varData)[1]
varData = ParseFlag(varData)[0]
if variables[varName]["type"] == "Int":
varData = int(varData)
elif variables[varName]["type"] == "Flt":
varData = float(varData)
variables[varName]["data"] = varData
variables[varName]["protected"] = False
ep += 1
continue
elif opcode == "insert":
if len(args) < 2:
Throw("Invalid argument count.")
varName = args[0]
if not varName in variables:
Throw("Unknown variable \""+ varName +"\".")
if variables[varName]["protected"]:
Throw("Trying to write on a protected variable. ($"+ varName +")")
if variables[varName]["type"] != "Arr":
Throw("Variable \"" + varName +"\" is not an array.")
for arg in args[1:]:
# Arguments
argData = ParseFlag(arg)[0]
argType = ParseFlag(arg)[1]
# Function code
item = {}
item["data"] = argData
item["type"] = argType
variables[varName]["data"].append(item)
ep += 1
continue
elif opcode == "seek":
if len(args) != 3:
Throw("Invalid argument count.")
arrayName = args[0]
index = ParseFlag(args[1])[0]
varName = args[2]
if not arrayName in variables:
Throw("Unknown variable \""+ arrayName +"\".")
if varName in variables:
if variables[varName]["protected"]:
Throw("Trying to access a protected variable. ($"+ varName +")")
if variables[arrayName]["protected"]:
Throw("Trying to access a protected variable. ($"+ arrayName +")")
if variables[arrayName]["type"] != "Arr":
Throw("Variable \"" + arrayName +"\" is not an array.")
arrayData = variables[arrayName]["data"]
if index > len(arrayData)-1:
Throw("Index out of range. ("+ str(index) +")")
variables[varName] = {}
variables[varName]["data"] = arrayData[index]["data"]
variables[varName]["type"] = arrayData[index]["type"]
variables[varName]["protected"] = False
ep += 1
continue
elif opcode == 'frseek':
if len(args) != 3:
Throw('Invalid argument count.')
pointerName = args[0]
justName = pointerName[1:]
index = ParseFlag(args[1])[0]
varName = args[2]
if not justName in pointers:
Throw('Unknown variable \''+ pointerName +'\'.')
if varName in variables:
if variables[varName]['protected']:
Throw('Trying to access a protected variable. ($'+ varName +')')
if pointers[justName]['pointerOf'] != 'File':
Throw('Variable "' + pointerName +'" is not a file pointer.')
fileData = pointers[justName]['props']['data']['data']
if index > len(fileData)-1:
Throw('Index out of range. ('+ str(index) +')')
variables[varName] = {}
variables[varName]['data'] = fileData[index]
variables[varName]['type'] = 'FileStr'
variables[varName]['protected'] = False
ep += 1
continue
elif opcode == "size":
if len(args) != 2:
Throw("Invalid argument count.")
arrayName = args[0]
varName = args[1]
justName = arrayName[1:]
if nameFlag in ["@", "*", "&"] or any(not char.isalpha() for char in varName):
Throw("Invalid variable name \""+ varName +"\".")
elif not justName in variables and not justName in pointers:
Throw("Unknown variable \""+ arrayName +"\".")
nameFlag = ParseFlag(arrayName, False)[2]
nameProtected = ParseFlag(arrayName, True)[4]
if varName in variables:
varProtected = ParseFlag("$"+varName, True)[4]
if varProtected:
Throw("Trying to access a protected variable. ("+ varName +")")
if nameProtected:
Throw("Trying to access a protected variable. ("+ arrayName +")")
arrayType = ParseFlag(arrayName, True)[1]
if arrayType == "Arr":
arrayData = variables[justName]["data"]
elif arrayType == "Pointer":
arrayData = pointers[justName]["props"]["data"]["data"]
else:
Throw("Invalid data type.")
variables[varName] = {}
variables[varName]["data"] = len(arrayData)
variables[varName]["type"] = "Int"
variables[varName]["protected"] = False
ep += 1
continue
elif opcode == "bit":
if len(args) != 3:
Throw("Invalid argument count.")
# Arguments
varA = args[0]
varB = args[2]
flagA = ParseFlag(varA, False)[2]
flagB = ParseFlag(varB, False)[2]
typeA = ParseFlag("$"+ varA)[1]
typeB = ParseFlag(varB, False)[3]
if flagB in ["$", "#"]:
typeB = ParseFlag(varB)[1]
if flagA in ["$", "@", "*", "&"] or any(not char.isalpha() for char in varA):
Throw("Invalid variable name \""+ varA +"\".")
if flagB in ["@", "*", "&", "%"]:
Throw("Invalid variable name \""+ varB +"\".")
if flagB != "$" and any(not char.isdigit() for char in varB):
Throw("Invalid argument type.")
if (typeA != "Int" and typeA != "Flt") or (typeB != "Int" and typeB != "Flt"):
Throw("Invalid argument type(s).")
argB = ParseFlag(args[2])[0]
operation = args[1]
# Function code
if not operation in validOperations:
Throw("Unknown operation \""+ operation +"\".")
if flagA == "#": # Set field
if current == None or not current in instances:
Throw("Invalid struct.")
varName = varA[1:]
if not varName in instances[current]["fields"]:
Throw("Unknown field \""+ varName +"\".")
fieldType = instances[current]["fields"][varName]["type"]
if typeB != fieldType:
Throw("Incompatible data type \""+ typeB +"\" (field expects \""+ fieldType +"\").")
if operation == "!":
instances[current]["fields"][varName]["data"] = -argB
elif operation == "+":
instances[current]["fields"][varName]["data"] += argB
elif operation == "-":
instances[current]["fields"][varName]["data"] -= argB
elif operation == "*":
instances[current]["fields"][varName]["data"] *= argB
elif operation == "/":
instances[current]["fields"][varName]["data"] /= argB
elif operation == "&":
instances[current]["fields"][varName]["data"] = instances[current]["fields"][varName]["data"] & argB
elif operation == "|":
instances[current]["fields"][varName]["data"] = instances[current]["fields"][varName]["data"] | argB
elif operation == "~":
instances[current]["fields"][varName]["data"] = ~ argB
elif operation == "^":
instances[current]["fields"][varName]["data"] = instances[current]["fields"][varName]["data"] ^ argB
elif operation == "<<":
instances[current]["fields"][varName]["data"] = instances[current]["fields"][varName]["data"] << argB
elif operation == ">>":
instances[current]["fields"][varName]["data"] = instances[current]["fields"][varName]["data"] >> argB
else:
if not varA in variables:
Throw("Unknown variable.")
if operation == "!":
variables[varA]["data"] = -argB
elif operation == "+":
variables[varA]["data"] += argB
elif operation == "-":
variables[varA]["data"] -= argB
elif operation == "*":
variables[varA]["data"] *= argB
elif operation == "/":
variables[varA]["data"] /= argB
elif operation == "&":
variables[varA]["data"] = variables[varA]["data"] & argB
elif operation == "|":
variables[varA]["data"] = variables[varA]["data"] | argB
elif operation == "~":
variables[varA]["data"] = ~ argB
elif operation == "^":
variables[varA]["data"] = variables[varA]["data"] ^ argB
elif operation == "<<":
variables[varA]["data"] = variables[varA]["data"] << argB
elif operation == ">>":
variables[varA]["data"] = variables[varA]["data"] >> argB
ep += 1
continue
# I/O
elif opcode == "print":
if len(args) == 0:
print
else:
i = 1
for arg in args:
# Arguments
argData = ParseFlag(arg)[0]
argType = ParseFlag(arg)[1]
# Function code
if i == len(args):
if argType == "FileStr":
sys.stdout.write(str(argData))
continue
if argType == "Str":
if len(argData) > 1:
if argData[-1] != ",":
print argData
else:
sys.stdout.write(argData[:-2])
else:
if argData != ",":
print argData
else:
sys.stdout.write(argData[:-1])
else:
print str(argData)
else:
sys.stdout.write(str(argData))
i += 1
ep += 1
continue
elif opcode == "fread":
if len(args) != 2:
Throw("Invalid argument count.")
# Arguments
varName = args[0]
varFlag = ParseFlag(varName, False)[2]
varType = ParseFlag("%"+varName)[0]
fileName = ParseFlag(args[1])[0]
# Function code
if varFlag in ["$", "#", "@", "*", "&", "%"] or any(not char.isalpha() for char in varName):
Throw("Invalid variable name \""+ varName +"\".")
if varType != "<PointerOf File>":
Throw("FREAD requires a <PointerOf File>, not a "+ varType +".")
try:
dataFile = open(fileName)
except:
Throw("Could not open file \""+ fileName +"\".")
pointers[varName]["props"]["name"]["data"] = fileName
pointers[varName]["props"]["data"]["data"] = []
pointers[varName]["props"]["size"]["data"] = 0
for line in dataFile.readlines():
for c in line:
pointers[varName]["props"]["size"]["data"] += 1
# line = line.strip("\r\n")
pointers[varName]["props"]["data"]["data"].append(line)
# print pointers[varName]["props"]["data"]["data"]
# print pointers[varName]
ep += 1
continue
# FIXME Handle wrong inputs.
elif opcode == "input":
if len(args) != 2:
Throw("Invalid argument count.")
# Arguments
varName = args[0]
varFlag = ParseFlag(varName, False)[2]
varType = args[1]
# Function code
if varFlag == "$" or varFlag == "#" or varFlag == "@" or varFlag == "*" or varFlag == "&" or any(char.isdigit() for char in varName):
Throw("Invalid variable name \""+ varName +"\".")
if not varType in validTypes:
Throw("Invalid data type \""+ varType +"\".")
inData = raw_input()
variables[varName] = {}
variables[varName]["type"] = varType
if varType == "Str":
variables[varName]["data"] = str(inData)
elif varType == "Int":
variables[varName]["data"] = int(inData)
else:
variables[varName]["data"] = float(inData)
variables[varName]["protected"] = False
ep += 1
continue
# Default
else:
Throw("Unknown opcode \""+ opcode +"\".")
sys.exit(1)
print "EOF"
``` |
{
"source": "10c8/pixelbot",
"score": 2
} |
#### File: 10c8/pixelbot/PixelBot.py
```python
# Modular bot for Discord servers
#
# author <NAME>.
# version 0.4
# copyright MIT
##
# Imports
import json
import logging
import re
import warnings
from ConfigManager import ConfigManager
from Client import Client
# Main class
class Bot(object):
def __init__(self):
self.client = None
self.cfg = None
self.cmd_regex = re.compile('(?P<command>\w+)\s*(?P<args>.*)?')
self.settings = {}
self.data = {}
self.plugins = {}
# Set up logging
logging.basicConfig(filename='pixelbot.log',
format='[%(asctime)-15s] %(message)s',
level=logging.INFO)
logging.info('Pixel Bot initialized.')
# Load configuration
logging.info('Loading configuration file.')
self.cfg = ConfigManager('./config.ini')
# Load settings
logging.info('Loading settings.')
print('Loading settings.')
self.settings = {
'discord': {
'token': self.cfg.get('token', section='discord'),
'mod_roles': self.cfg.get('mod_roles',
section='discord').split(',')
},
'options': {
'prefix': self.cfg.get('prefix', section='options')
},
'channels': {
'welcome': self.cfg.get('welcome', section='channels'),
}
}
# Load bot data
logging.info('Loading data.')
print('Loading data.')
self.data = json.loads(open('./data.json').read())
def start(self):
# Initialize the Discord API
self.client = Client(self)
# Login into Discord
logging.info('Logging in...')
print('Logging in...')
try:
self.client.run(self.settings['discord']['token'])
except Exception as e:
logging.critical('Failed.')
logging.critical(e)
print('Logging in to Discord failed.')
exit(1)
def stop(self):
self.client.logout()
def install_plugins(self):
to_load = dict(self.cfg.items('plugins', raw=True))
for name in to_load:
if to_load[name] != 'true':
continue
# try:
loc = {}
exec(open('./plugins/{}.py'.format(name)).read(), loc, {})
with warnings.catch_warnings():
warnings.simplefilter("ignore")
plugin = loc['__plugin__'](self)
self.plugins[plugin.name.lower()] = plugin
# except Exception as e:
# logging.critical('Failed to load plugin "{}".'.format(name))
# logging.critical(e)
# print('Failed to load plugin "{}".'.format(name))
# exit(1)
def saveData(self):
logging.info('Data save requested, making a backup...')
try:
with open('data.json.bkp', 'w+') as bkp:
bkp.write(open('data.json', 'r').read())
except:
logging.critical('Failed.')
print('Failed to save data backup, aborting save.')
exit(1)
logging.info('Done.')
logging.info('Saving data...')
try:
with open('data.json', 'w+') as data:
data.write(json.dumps(self.data))
logging.info('Done.')
except:
logging.critical('Error.')
print('Error while saving data.')
exit(1)
# Main code
if __name__ == '__main__':
try:
bot = Bot()
bot.install_plugins()
bot.start()
except KeyboardInterrupt:
logging.info("KeyboardInterrupt, disconnecting.")
print('\nKeyboardInterrupt')
bot.stop()
``` |
Subsets and Splits