metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "25077667/cyshdormitory.github.io",
"score": 2
} |
#### File: recievedata/recievedata/views.py
```python
from flask import render_template, Flask, request, jsonify
import csv
app = Flask(__name__)
@app.route("/", methods=['POST'])
def submit():
if request.method == 'POST':
return 'Hello ' + request.values.get('username')
return render_template('post_submit.html')
if __name__ == '__main__':
app.debug = True
app.run()
``` |
{
"source": "2510/glusterfs-freebsd",
"score": 3
} |
#### File: 2510/glusterfs-freebsd/make-manifest.py
```python
import argparse
import grp
import json
import os
import pwd
import re
import stat
import subprocess
import sys
def get_abi():
pkg_process = subprocess.Popen(['pkg', '-vv'], stdout=subprocess.PIPE)
abi_regex = re.compile(r'ABI\s*=\s*"(.+)"\s*;\s*')
for line in pkg_process.stdout.readlines():
match = abi_regex.match(line)
if not match is None:
abi = match.group(1)
if abi is None:
return
else:
return abi
def get_files(base_dir, uid=None, gid=None):
manifest_files = {}
for root, dirs, files in os.walk(base_dir):
rel_root = os.path.relpath(root, base_dir)
any = []
any.extend(files)
any.extend(dirs)
for name in any:
path = os.path.join(root, name)
rel_path = os.path.join(rel_root, name)
if rel_path.startswith('./'):
rel_path = rel_path[1:]
elif not rel_path.startswith('/'):
rel_path = '/' + rel_path
path_stat = os.lstat(path)
manifest_files[rel_path] = {
'uname': uid or path_stat.st_uid,
'gname': gid or path_stat.st_gid,
'perm': oct(stat.S_IMODE(path_stat.st_mode))
}
return manifest_files
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Generates MANIFEST for pkg.')
parser.add_argument('--source-manifest', help='source MANIFEST file.', default='manifest.source')
parser.add_argument('--root', help='make install\'ed DESTDIR', default='install-root')
parser.add_argument('--uid', help='uid for installed files. (default: root)', default='root')
parser.add_argument('--gid', help='gid for installed files. (default: wheel)', default='wheel')
args = parser.parse_args()
with open('manifest.source', 'r') as f:
manifest = json.load(f)
manifest['abi'] = get_abi()
manifest['files'] = get_files(args.root, args.uid, args.gid)
print json.dumps(manifest)
``` |
{
"source": "251871553/ops_admin",
"score": 2
} |
#### File: ops_admin/www/views.py
```python
from django.shortcuts import render
# Create your views here.
from django.http import HttpResponse
from django.template import loader
from django.http import StreamingHttpResponse
import os,sys
import logging
LOG_FORMAT = "%(asctime)s - %(levelname)s - %(message)s"
logging.basicConfig(level=logging.INFO, format=LOG_FORMAT)
def index(request):
#return HttpResponse("Hello, world. You're at the polls index.")
template = loader.get_template('www/index.html')
context = {
'latest_question_list': 'hello',
}
return HttpResponse(template.render(context, request))
def welcome(request):
#return HttpResponse("Hello, world. You're at the polls index.")
template = loader.get_template('www/welcome.html')
context = {
'latest_question_list': 'hello',
}
return HttpResponse(template.render(context, request))
def login(request):
#return HttpResponse("Hello, world. You're at the polls index.")
template = loader.get_template('www/login.html')
context = {
'latest_question_list': 'hello',
}
return HttpResponse(template.render(context, request))
def demo(request):
#return HttpResponse("Hello, world. You're at the polls index.")
template = loader.get_template('www/mytest.html')
context = {
'latest_question_list': 'hello',
}
return HttpResponse(template.render(context, request))
def dump(request):
template = loader.get_template('www/pod_info.html')
context = {
'latest_question_list': 'hello',
}
return HttpResponse(template.render(context, request))
def files(request):
#return HttpResponse('xxx')
#context = {'file_list': 'abc.prof'}
#print(os.path.isdir('ops_api/upload_files'))
#a=os.listdir('ops_api/upload_files')
file_path = 'ops_api/upload_files/'
file_list = os.listdir(file_path)
#print(c)
file_info = {}
for filename in file_list:
# print(i)
full_path = file_path + filename
filesize = os.stat(full_path).st_size
#print(filename, round(filesize / 1024))
file_info[filename] = round(filesize / 1024)
#print(file_info)
#context = {'file_list': os.listdir('ops_api/upload_files')}
context = {'file_list': file_info}
return render(request, 'www/files.html', context)
def download(request):
file_name=request.POST.get('filename', None)
#print(file_name)
def file_iterator(file_name, chunk_size=512):
with open(file_name) as f:
while True:
c = f.read(chunk_size)
if c:
yield c
else:
break
file_path = 'ops_api/upload_files/'
logging.info(file_name)
if file_name:
response = StreamingHttpResponse(file_iterator(file_path+file_name))
response['Content-Type'] = 'application/octet-stream'
# Content-Disposition就是当用户想把请求所得的内容存为一个文件的时候提供一个默认的文件名
response['Content-Disposition'] = 'attachment;filename="{}"'.format(file_name)
return response
else:
return HttpResponse('未选择')
``` |
{
"source": "251/shaape",
"score": 3
} |
#### File: shaape/shaape/arrow.py
```python
from polygon import Polygon
from named import Named
from node import Node
from translatable import Translatable
class Arrow(Polygon, Translatable, Named):
def __init__(self, position = (0, 0), node_list = []):
Polygon.__init__(self, node_list)
Translatable.__init__(self, position)
Named.__init__(self)
self.__connected_objects = []
self.__pointed_objects = []
self.add_name('_arrow_')
def scale(self, scale):
Translatable.scale(self, scale)
Polygon.scale(self, scale)
return
def direction(self):
raise NotImplementedError
def add_connected_object(self, obj):
self.__connected_objects.append(obj)
def connected_objects(self):
return self.__connected_objects
def add_pointed_object(self, obj):
self.__pointed_objects.append(obj)
def pointed_objects(self):
return self.__pointed_objects
```
#### File: shaape/shaape/drawingbackend.py
```python
from drawable import Drawable
from polygon import Polygon
from opengraph import OpenGraph
from scalable import Scalable
from background import Background
from arrow import Arrow
from text import Text
class DrawingBackend(object):
DEFAULT_SCALE = 1
DEFAULT_PIXELS_PER_UNIT = 20
DEFAULT_SHADOW_TRANSLATION = (2, 2)
def __init__(self, image_scale = DEFAULT_SCALE, image_width = None, image_height = None):
self.__user_canvas_size = [image_width, image_height]
self._canvas_size = [image_width, image_height]
if image_width:
self._canvas_size[0] = self._canvas_size[0] * image_scale
if image_height:
self._canvas_size[1] = self._canvas_size[1] * image_scale
self._scale = image_scale
self.__aspect_ratio = 0.5
self.__pixels_per_unit = (self.DEFAULT_PIXELS_PER_UNIT * self._scale * self.__aspect_ratio, self.DEFAULT_PIXELS_PER_UNIT * self._scale)
self.__global_scale = (self.DEFAULT_PIXELS_PER_UNIT * self._scale * self.__aspect_ratio, self.DEFAULT_PIXELS_PER_UNIT * self._scale)
return
def scale(self):
return self._scale
def shadow_translation(self):
return (DrawingBackend.DEFAULT_SHADOW_TRANSLATION[0] * self.scale(), DrawingBackend.DEFAULT_SHADOW_TRANSLATION[1] * self.scale())
def set_canvas_size(self, width, height):
if not self.__user_canvas_size[0]:
self._canvas_size[0] = width * self._scale
if not self.__user_canvas_size[1]:
self._canvas_size[1] = height * self._scale
def canvas_size(self):
return self._canvas_size
def unit_size(self):
return self.__pixels_per_unitself.__global_scale
def run(self, drawable_objects, filename):
sortable = lambda x: isinstance(x, Drawable)
sortable_objects = filter(lambda x: sortable(x), drawable_objects)
unsortable_objects = filter(lambda x: not sortable(x), drawable_objects)
drawable_objects = sorted(sortable_objects, key=lambda x: x.min()) + unsortable_objects
for drawable_object in drawable_objects:
if isinstance(drawable_object, Background):
if not self.__user_canvas_size[0]:
if self.__user_canvas_size[1]:
scale = self.__user_canvas_size[1] / drawable_object.size()[1] * self.__aspect_ratio
else:
scale = self.__pixels_per_unit[0]
self._canvas_size[0] = drawable_object.size()[0] * scale
if not self.__user_canvas_size[1]:
if self.__user_canvas_size[0]:
scale = self.__user_canvas_size[0] / drawable_object.size()[0] / self.__aspect_ratio
else:
scale = self.__pixels_per_unit[1]
self._canvas_size[1] = drawable_object.size()[1] * scale
self.__global_scale = [self._canvas_size[0] / drawable_object.size()[0], self._canvas_size[1] / drawable_object.size()[1]]
self._scale = self.__global_scale[0] / (self.DEFAULT_PIXELS_PER_UNIT * self.__aspect_ratio)
for drawable_object in drawable_objects:
if isinstance(drawable_object, Scalable):
drawable_object.scale(self.__global_scale)
self.create_canvas()
self.draw_objects(drawable_objects)
self.export_to_file(filename)
def global_scale(self):
return self.__global_scale
def draw_polygon_shadow(self, obj):
raise NotImplementedError
def draw_polygon(self, obj):
raise NotImplementedError
def draw_open_graph(self, obj):
raise NotImplementedError
def draw_open_graph_shadow(self, obj):
raise NotImplementedError
def draw_text(self, obj):
raise NotImplementedError
def draw_text_shadow(self,obj):
raise NotImplementedError
def push_surface(self):
raise NotImplementedError
def pop_surface(self):
raise NotImplementedError
def translate(self, x, y):
raise NotImplementedError
def blur_surface(self):
raise NotImplementedError
def draw_objects(self, drawable_objects):
objects = [o for o in drawable_objects if isinstance(o, Drawable)]
if objects:
max_depth = max(objects, key=lambda o: o.z_order()).z_order()
objects_lists_per_depth = [[] for x in xrange(max_depth + 1)]
else:
objects_lists_per_depth = []
for o in objects:
objects_lists_per_depth[o.z_order()].append(o)
i = 0
for obj_list in objects_lists_per_depth:
polygons = filter(lambda d: isinstance(d, Polygon) and not isinstance(d, Arrow), obj_list)
text = filter(lambda d: isinstance(d, Text), obj_list)
arrows = filter(lambda d: isinstance(d, Arrow), obj_list)
graphs = filter(lambda d: isinstance(d, OpenGraph), obj_list)
self.push_surface()
self.translate(*self.shadow_translation())
for p in polygons:
if p.style().shadow() == 'on':
self.draw_polygon_shadow(p)
for drawable_object in graphs:
if drawable_object.style().shadow() == 'on':
self.draw_open_graph_shadow(drawable_object)
for drawable_object in text:
if drawable_object.style().shadow() == 'on':
self.draw_text_shadow(drawable_object)
self.blur_surface()
self.pop_surface()
self.push_surface()
for p in polygons:
self.draw_polygon(p)
self.pop_surface()
self.push_surface()
for p in polygons:
self.draw_open_graph(p.frame())
self.pop_surface()
i = i + 1
self.push_surface()
for drawable_object in graphs:
self.draw_open_graph(drawable_object)
self.pop_surface()
self.push_surface()
self.translate(*self.shadow_translation())
for drawable_object in arrows:
if drawable_object.style().shadow() == 'on':
self.draw_polygon_shadow(drawable_object)
self.blur_surface()
self.pop_surface()
self.push_surface()
for drawable_object in arrows:
self.draw_polygon(drawable_object)
self.pop_surface()
self.push_surface()
for drawable_object in text:
self.draw_text(drawable_object)
self.pop_surface()
return
```
#### File: shaape/shaape/edge.py
```python
class Edge:
def __init__(self, node1, node2, above = None, below = None, z_order = None):
self.__start = node1
self.__end = node2
self.__above = above
self.__below = below
self.__z_order = z_order
return
def __getitem__(self, index):
if index == 0:
return self.__start
elif index == 1:
return self.__end
else:
raise IndexError
def start(self):
return self.__start
def end(self):
return self.__end
def above(self):
return self.__above
def below(self):
return self.__below
def z_order(self):
return self.__z_order
def __ccw(self, a, b, c):
return (c[1] - a[1]) * (b[0] - a[0]) > (b[1] - a[1]) * (c[0] - a[0])
def intersects(self, edge):
return self.__ccw(self[0], edge[0], edge[1]) != self.__ccw(self[1], edge[0], edge[1]) and self.__ccw(self[0], self[1], edge[0]) != self.__ccw(self[0], self[1], edge[1])
```
#### File: shaape/shaape/optionparser.py
```python
from parser import Parser
class OptionParser(Parser):
def __init__(self):
super(OptionParser, self).__init__()
return
```
#### File: shaape/shaape/rotatable.py
```python
class Rotatable(object):
def __init__(self, angle = 0):
self.__angle = angle
def set_angle(self, angle):
self.__angle = angle
def angle(self):
return self.__angle
```
#### File: shaape/shaape/styleparser.py
```python
from parser import Parser
from text import Text
from named import Named
from drawable import Drawable
from arrow import Arrow
from polygon import Polygon
from opengraph import OpenGraph
from style import Style
import re
class StyleParser(Parser):
def __init__(self):
super(StyleParser, self).__init__()
return
def run(self, raw_data, objects):
styles = filter(lambda x: isinstance(x, Style), objects)
styles = sorted(styles, key = lambda x: x.priority())
named_drawables = filter(lambda x: isinstance(x, Drawable) and isinstance(x, Named), objects)
default_style = {
'fill' : Style([], 'fill', [[1, 1, 1], [0.5 ,0.5, 0.5]]),
'frame' : Style([], 'frame', [[0, 0, 0], 'solid', 1]),
'line' : Style([], 'fill', [[0, 0, 0, 1], 'solid', 1]),
'arrow' : Style([], 'fill', [[0, 0, 0]]),
'text' : Style([], 'text', [[0, 0, 0], 'no-shadow'])}
for obj in objects:
if isinstance(obj, Drawable):
if isinstance(obj, Arrow):
obj.set_style(default_style['arrow'])
elif isinstance(obj, Polygon):
obj.set_style(default_style['fill'])
obj.frame().set_style(default_style['frame'])
if 'dotted' in obj.options():
obj.style().set_color(Style.COLORS['empty'])
obj.frame().style().set_type('dotted')
if 'emph' in obj.options():
obj.set_width(obj.get_width() * 2)
elif isinstance(obj, OpenGraph):
obj.set_style(default_style['line'])
if 'dotted' in obj.options():
obj.style().set_type('dotted')
if 'emph' in obj.options():
obj.style().set_width(obj.style().width() * 4)
elif isinstance(obj, Text):
obj.set_style(default_style['text'])
for style in styles:
name_pattern = re.compile(style.name_pattern(), re.UNICODE)
for obj in named_drawables:
for name in obj.names():
if name_pattern.match(name):
if style.target_type() == 'frame' and isinstance(obj, Polygon):
target_obj = obj.frame()
elif style.target_type() == 'text' and isinstance(obj, Text):
target_obj = obj
elif style.target_type() == 'fill' and not isinstance(obj, Text):
target_obj = obj
else:
target_obj = None
if target_obj != None:
if style.priority() > target_obj.style().priority():
target_obj.set_style(style)
arrows = filter(lambda x: isinstance(x, Arrow), objects)
for arrow in arrows:
for obj in arrow.pointed_objects():
if obj.style().priority() > arrow.style().priority():
arrow.set_style(obj.style())
for obj in arrow.connected_objects():
if obj.style().priority() > arrow.style().priority():
arrow.set_style(obj.style())
self._parsed_data = raw_data
self._objects = objects
return
```
#### File: shaape/tests/test_drawable.py
```python
from shaape.drawable import Drawable
from shaape.style import Style
import nose
import unittest
from nose.tools import *
class TestDrawable(unittest.TestCase):
def test_init(self):
drawable = Drawable()
assert type(drawable.style()) == Style
def test_style(self):
drawable = Drawable()
style = Style(['abc', 'def', 'geh'], 'line', ['dotted', 'shadow'])
drawable.set_style(style)
assert drawable.style().options() == style.options()
def test_min(self):
drawable = Drawable()
assert_raises(NotImplementedError, drawable.min)
def test_max(self):
drawable = Drawable()
assert_raises(NotImplementedError, drawable.max)
```
#### File: shaape/tests/test_overlayparser.py
```python
from shaape.overlayparser import OverlayParser
from shaape.node import Node
from shaape.opengraph import OpenGraph
from shaape.polygon import Polygon
import nose
import unittest
from nose.tools import *
class TestOverlayParser(unittest.TestCase):
def test_init(self):
parser = OverlayParser()
assert parser != None
def test_cycle_len(self):
parser = OverlayParser()
cycle = [Node(0, 0), Node(4, 0), Node(4, 2), Node(0, 2), Node(0, 0)]
assert parser.cycle_len(cycle) == 12
def test_run(self):
parser = OverlayParser()
parser.run("",[])
parser.run("-",[])
assert len(parser.drawable_objects()) == 1, parser.drawable_objects()
assert type(parser.drawable_objects()[0]) == OpenGraph
parser.run("- -",[])
assert len(parser.drawable_objects()) == 2
assert type(parser.drawable_objects()[0]) == OpenGraph
assert type(parser.drawable_objects()[1]) == OpenGraph
parser.run(["++","++"],[])
assert len(parser.drawable_objects()) == 1, "got " + str(len(parser.drawable_objects())) + " drawable objects " + str(parser.drawable_objects())
assert len([ o for o in parser.drawable_objects() if type(o) == Polygon]) == 1
parser.run(["+--+", "| ++", "| ++", "+--+"],[])
assert len(parser.drawable_objects()) == 2, "got " + str(len(parser.drawable_objects())) + " drawable objects "
assert len([ o for o in parser.drawable_objects() if type(o) == Polygon]) == 2
```
#### File: shaape/tests/test_parser.py
```python
from shaape.parser import Parser
import nose
import unittest
from nose.tools import *
class TestParser(unittest.TestCase):
def test_init(self):
parser = Parser()
assert parser != None
assert parser.parsed_data() == []
assert parser.drawable_objects() == []
def test_run(self):
parser = Parser()
assert_raises(NotImplementedError, parser.run, "", [])
``` |
{
"source": "2525VenturesBV/cfn-tag-provider",
"score": 2
} |
#### File: cfn-tag-provider/src/tag_provider.py
```python
import os
import json
import logging
import boto3
import requests
from cfn_resource_provider import ResourceProvider
log = logging.getLogger()
log.setLevel(os.environ.get("LOG_LEVEL", "INFO"))
request_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"type": "object",
"properties": {
"type": "object",
"required": ["ResourceARN", "Tags"],
"properties": {
"ResourceARN": {"type": "array", "items": {"type": "string"}},
"Tags": {"type": "object"},
},
},
}
class TagProvider(ResourceProvider):
def __init__(self):
super(TagProvider, self).__init__()
self.rg_tagging = boto3.client("resourcegroupstaggingapi")
@property
def resource_arns(self):
return sorted(self.get("ResourceARN"))
@property
def old_resource_arns(self):
return sorted(self.get_old("ResourceARN", self.resource_arns))
@property
def tags(self):
return self.get("Tags")
@property
def old_tags(self):
return self.get_old("Tags", self.tags)
def has_changes(self):
return self.resource_arns != self.old_resource_arns or self.tags != self.old_tags
def check_errors(self, response):
if response["FailedResourcesMap"]:
log.error("response %s", response)
self.fail(response["FailedResourcesMap"][0].get("ErrorMessage"))
return False
return True
def apply_tags(self):
response = self.rg_tagging.tag_resources(
ResourceARNList=self.resource_arns, Tags=self.tags
)
self.check_errors(response)
def create(self):
self.apply_tags()
self.physical_resource_id = self.logical_resource_id
def update(self):
if self.has_changes():
self.delete_old()
self.apply_tags()
else:
self.success("no changes")
def delete_old(self):
keys = list(self.old_tags.keys())
if keys:
response = self.rg_tagging.untag_resources(
ResourceARNList=self.resource_arns, TagKeys=keys
)
def delete(self):
keys = list(self.tags.keys())
if keys:
response = self.rg_tagging.untag_resources(
ResourceARNList=self.resource_arns, TagKeys=keys
)
provider = TagProvider()
def handler(request, context):
return provider.handle(request, context)
``` |
{
"source": "2529342549/turtlebot3_m_learning",
"score": 3
} |
#### File: turtlebot3_dqn/nodes/environment.py
```python
import rospy
import numpy as np
import math
from math import pi
import time
import Tkinter as tk
from PIL import ImageTk, Image
from geometry_msgs.msg import Twist, Point, Pose
from std_srvs.srv import Empty
np.random.seed(1)
PhotoImage = ImageTk.PhotoImage
UNIT = 50 # pixels
HEIGHT = 11 # grid height
WIDTH = 11 # grid width
class Env(tk.Tk):
def __init__(self):
tk.Tk.__init__(self)
self.action_space = ['u', 'd', 'l', 'r']
self.n_actions = len(self.action_space)
self.title('Q Learning')
self.geometry('{0}x{1}'.format(HEIGHT * UNIT, HEIGHT * UNIT))
self.shapes = self.load_images()
self.canvas = self._build_canvas()
self.texts = []
self.act = 0
self.pub_cmd_vel = rospy.Publisher('cmd_vel', Twist, queue_size=5)
self.reset_proxy = rospy.ServiceProxy('gazebo/reset_simulation', Empty)
self.unpause_proxy = rospy.ServiceProxy('gazebo/unpause_physics', Empty)
self.pause_proxy = rospy.ServiceProxy('gazebo/pause_physics', Empty)
def _build_canvas(self):
canvas = tk.Canvas(self, bg='white',
height=HEIGHT * UNIT,
width=WIDTH * UNIT)
# create grids
for c in range(0, WIDTH * UNIT, UNIT): # 0~400 by 80
x0, y0, x1, y1 = c, 0, c, HEIGHT * UNIT
canvas.create_line(x0, y0, x1, y1)
for r in range(0, HEIGHT * UNIT, UNIT): # 0~400 by 80
x0, y0, x1, y1 = 0, r, HEIGHT * UNIT, r
canvas.create_line(x0, y0, x1, y1)
# add img to canvas
self.rectangle = canvas.create_image(275, 275, image=self.shapes[0])
self.triangle1 = canvas.create_image(175, 175, image=self.shapes[1])
self.triangle2 = canvas.create_image(175, 375, image=self.shapes[1])
self.triangle3 = canvas.create_image(375, 175, image=self.shapes[1])
self.triangle4 = canvas.create_image(375, 375, image=self.shapes[1])
self.circle = canvas.create_image(475, 75, image=self.shapes[2])
# pack all
canvas.pack()
return canvas
def load_images(self):
rectangle = PhotoImage(
Image.open("/home/wangqiang/catkin_ws/src/turtlebot3_machine_learning/turtlebot3_dqn/img/rectangle.png").resize((35, 35)))
triangle = PhotoImage(
Image.open("/home/wangqiang/catkin_ws/src/turtlebot3_machine_learning/turtlebot3_dqn/img/triangle.png").resize((35, 35)))
circle = PhotoImage(
Image.open("/home/wangqiang/catkin_ws/src/turtlebot3_machine_learning/turtlebot3_dqn/img/circle.png").resize((35, 35)))
return rectangle, triangle, circle
def text_value(self, row, col, contents, action, font='Helvetica', size=7,
style='normal', anchor="nw"):
if action == 0:
origin_x, origin_y = 3, 21
elif action == 1:
origin_x, origin_y = 37, 21
elif action == 2:
origin_x, origin_y = 21, 3
else:
origin_x, origin_y = 21, 33
x = origin_y + (UNIT * col)
y = origin_x + (UNIT * row)
font = (font, str(size), style)
text = self.canvas.create_text(x, y, fill="black", text=contents,
font=font, anchor=anchor)
return self.texts.append(text)
def print_value_all(self, q_table):
for i in self.texts:
self.canvas.delete(i)
del self.texts[::]
for i in range(HEIGHT):
for j in range(WIDTH):
for action in range(0, 4):
state = [i, j]
if str(state) in q_table.keys():
temp = q_table[str(state)][action]
self.text_value(j, i, round(temp, 2), action)
def coords_to_state(self, coords):
x = int((coords[0] - 25) / 50)
y = int((coords[1] - 25) / 50)
return [x, y]
def state_to_coords(self, state):
x = int(state[0] * 50 + 25)
y = int(state[1] * 50 + 25)
return [x, y]
def reset(self):
rospy.wait_for_service('gazebo/reset_simulation')
try:
self.reset_proxy()
except (rospy.ServiceException) as e:
print("gazebo/reset_simulation service call failed")
self.update()
time.sleep(0.5)
x, y = self.canvas.coords(self.rectangle)
self.canvas.move(self.rectangle, UNIT / 2 - x + 250, UNIT / 2 - y + 250)
self.render()
# return observation
return self.coords_to_state(self.canvas.coords(self.rectangle))
def step(self, action):
state = self.canvas.coords(self.rectangle)
base_action = np.array([0, 0])
self.render()
vel_cmd = Twist()
if action == 0: # up
if state[1] > UNIT:
if self.act == 0: #zhi zou
vel_cmd.linear.x = 0.2
self.pub_cmd_vel.publish(vel_cmd)
elif self.act == 1: #hou zhuan
vel_cmd.angular.z = pi/2
self.pub_cmd_vel.publish(vel_cmd)
time.sleep(2)
vel_cmd.linear.x = 0.2
vel_cmd.angular.z = 0.0
self.pub_cmd_vel.publish(vel_cmd)
elif self.act ==2: #you zhuan
vel_cmd.angular.z = -pi/4
self.pub_cmd_vel.publish(vel_cmd)
time.sleep(2)
vel_cmd.linear.x = 0.2
vel_cmd.angular.z = 0.0
self.pub_cmd_vel.publish(vel_cmd)
else:#zuo zhuan
vel_cmd.angular.z = pi/4
self.pub_cmd_vel.publish(vel_cmd)
time.sleep(2)
vel_cmd.linear.x = 0.2
vel_cmd.angular.z = 0.0
self.pub_cmd_vel.publish(vel_cmd)
self.act = action
base_action[1] -= UNIT
time.sleep(2)
elif action == 1: # down
if state[1] < (HEIGHT - 1) * UNIT:
if self.act == 0: #hou zhuan
vel_cmd.angular.z = pi/2
self.pub_cmd_vel.publish(vel_cmd)
time.sleep(2)
vel_cmd.linear.x = 0.2
vel_cmd.angular.z = 0.0
self.pub_cmd_vel.publish(vel_cmd)
elif self.act == 1: # zhi zou
vel_cmd.linear.x = 0.2
self.pub_cmd_vel.publish(vel_cmd)
elif self.act ==2: #zuo zhuan
vel_cmd.angular.z = pi/4
self.pub_cmd_vel.publish(vel_cmd)
time.sleep(2)
vel_cmd.linear.x = 0.2
vel_cmd.angular.z = 0.0
self.pub_cmd_vel.publish(vel_cmd)
else:#you zhuan
vel_cmd.angular.z = -pi/4
self.pub_cmd_vel.publish(vel_cmd)
time.sleep(2)
vel_cmd.linear.x = 0.2
vel_cmd.angular.z = 0.0
self.pub_cmd_vel.publish(vel_cmd)
self.act = action
base_action[1] += UNIT
time.sleep(2)
elif action == 2: # left
if state[0] > UNIT:
if self.act == 0: #zuo zhuan
vel_cmd.angular.z = pi/4
self.pub_cmd_vel.publish(vel_cmd)
time.sleep(2)
vel_cmd.linear.x = 0.2
vel_cmd.angular.z = 0.0
self.pub_cmd_vel.publish(vel_cmd)
elif self.act == 1: #you zhuan
vel_cmd.angular.z = -pi/4
self.pub_cmd_vel.publish(vel_cmd)
time.sleep(2)
vel_cmd.linear.x = 0.2
vel_cmd.angular.z = 0.0
self.pub_cmd_vel.publish(vel_cmd)
elif self.act == 2: #zhi zou
vel_cmd.linear.x = 0.2
self.pub_cmd_vel.publish(vel_cmd)
else:#hou zhuan
vel_cmd.angular.z = pi/2
self.pub_cmd_vel.publish(vel_cmd)
time.sleep(2)
vel_cmd.linear.x = 0.2
vel_cmd.angular.z = 0.0
self.pub_cmd_vel.publish(vel_cmd)
self.act = action
base_action[0] -= UNIT
time.sleep(2)
elif action == 3: # right
if state[0] < (WIDTH - 1) * UNIT:
if self.act == 0: #you zhuan
vel_cmd.angular.z = -pi/4
self.pub_cmd_vel.publish(vel_cmd)
time.sleep(2)
vel_cmd.linear.x = 0.2
vel_cmd.angular.z = 0.0
self.pub_cmd_vel.publish(vel_cmd)
elif self.act == 1: #zuo zhuan
vel_cmd.angular.z = pi/4
self.pub_cmd_vel.publish(vel_cmd)
time.sleep(2)
vel_cmd.linear.x = 0.2
vel_cmd.angular.z = 0.0
self.pub_cmd_vel.publish(vel_cmd)
elif self.act == 2: #hou zhuan
vel_cmd.angular.z = pi/2
self.pub_cmd_vel.publish(vel_cmd)
time.sleep(2)
vel_cmd.linear.x = 0.2
vel_cmd.angular.z = 0.0
self.pub_cmd_vel.publish(vel_cmd)
else:#zhi zou
vel_cmd.linear.x = 0.2
self.pub_cmd_vel.publish(vel_cmd)
self.act = action
base_action[0] += UNIT
time.sleep(2)
# move agent
self.canvas.move(self.rectangle, base_action[0], base_action[1])
# move rectangle to top level of canvas
self.canvas.tag_raise(self.rectangle)
next_state = self.canvas.coords(self.rectangle)
# reward function
if next_state == self.canvas.coords(self.circle):
reward = 100
done = True
rospy.loginfo("Success!!!")
self.pub_cmd_vel.publish(Twist())
self.act = 0
elif next_state in [self.canvas.coords(self.triangle1),
self.canvas.coords(self.triangle2),
self.canvas.coords(self.triangle3),
self.canvas.coords(self.triangle4)]:
reward = -100
done = True
rospy.loginfo("Collision!!!")
self.pub_cmd_vel.publish(Twist())
self.act = 0
else:
reward = 0
done = False
next_state = self.coords_to_state(next_state)
return next_state, reward, done
def render(self):
time.sleep(0.03)
self.update()
```
#### File: src/turtlebot3_dqn/simulation_environment_real.py
```python
import rospy
import numpy as np
import math
import time
from math import pi
from geometry_msgs.msg import Twist, Point, Pose, PoseWithCovarianceStamped
from sensor_msgs.msg import LaserScan
from nav_msgs.msg import Odometry
from std_srvs.srv import Empty
from std_msgs.msg import String
from tf.transformations import euler_from_quaternion, quaternion_from_euler
from simulation_respawn_real import Respawn
# from nodes.turtlebot3_real_transmission_2 import Sender
# from gazebo_msgs.msg import ModelStates, ModelState
class Env():
def __init__(self, action_size):
self.goal_x = 0
self.goal_y = 0
self.start_x = 0
self.start_y = 0
self.start_orientation = PoseWithCovarianceStamped()
self.heading = 0
self.count = 0
self.action_size = action_size
self.initGoal = True
self.get_goalbox = False
self.position = Pose()
self.position_x, self.position_y = 0, 0
self.pub_cmd_vel = rospy.Publisher('cmd_vel', Twist, queue_size=1, latch = True)
self.sub_odom = rospy.Subscriber('odom', Odometry, self.getOdometry)
self.respawn_goal = Respawn()
self.action_memory = []
self.time_start = time.time()
self.orientation, self.yaw_init = 0, 0
self.goal_x_map, self.goal_y_map = 0, 0
def getGoalDistace(self):
goal_distance = round(math.hypot(self.goal_x - self.position.x, self.goal_y - self.position.y), 2)
return goal_distance
def getOdometry(self, odom):
self.position = odom.pose.pose.position
self.position_x, self.position_y = self.position.x, self.position.y
orientation = odom.pose.pose.orientation
self.orientation = orientation
orientation_list = [orientation.x, orientation.y, orientation.z, orientation.w]
_, _, yaw = euler_from_quaternion(orientation_list)
# print "odom yaw: ", yaw
goal_angle = math.atan2(self.goal_y - self.position.y , self.goal_x - self.position.x)
heading = goal_angle - yaw
if heading > pi:
heading -= 2 * pi
elif heading < -pi:
heading += 2 * pi
self.heading = round(heading, 2)
def getState(self, scan):
scan_range = []
scan_range2 = []
# print scan.ranges
heading = self.heading
min_range = 0.3
done = False
# no filter
# for i in range(len(scan.ranges)):
# if scan.ranges[i] == float('Inf'):
# scan_range.append(3.5)
# # zero Problem
# # elif np.isnan(scan.ranges[i]):
# # scan_range.append(0)
# elif scan.ranges[i] <= 0.07:
# scan_range.append(3.5)
# else:
# scan_range.append(scan.ranges[i])
# Filter
i = 0
while i <= len(scan.ranges)-1:
# print "length", len(scan_range)
if scan.ranges[i] == float('Inf'):
scan_range.append(3.5)
i += 1
elif scan.ranges[i] == 0:
k = 1
t = 0
if i == 0:
while scan.ranges[k]==0:
k += 1
while t <= k:
scan_range.append(scan.ranges[k])
t += 1
i = k + 1
else:
k = i
m = i
a = scan.ranges[i-1]
while scan.ranges[k]==0:
if k == 359:
while m <= k:
scan_range.append(a)
m += 1
for i in range(len(scan_range)):
if scan_range[i] < 0.12:
scan_range2.append(0.12)
else:
scan_range2.append(scan_range[i])
current_distance = round(math.hypot(self.goal_x - self.position.x, self.goal_y - self.position.y),2)
# if current_distance < 0.2:
if current_distance < 0.15:
vel_cmd = Twist()
self.get_goalbox = True
obstacle_min_range = round(min(scan_range), 2)
obstacle_angle = np.argmin(scan_range)
if min_range > min(scan_range) > 0:
done = True
return scan_range2 + [heading, current_distance, obstacle_min_range, obstacle_angle], done
k += 1
b = scan.ranges[k]
while m < k:
scan_range.append(max(a, b))
m += 1
i = k
else:
scan_range.append(scan.ranges[i])
i += 1
i=0
for i in range(len(scan_range)):
if scan_range[i] < 0.12:
scan_range2.append(0.12)
else:
scan_range2.append(scan_range[i])
obstacle_min_range = round(min(scan_range), 2)
obstacle_angle = np.argmin(scan_range)
if min_range > min(scan_range) > 0:
done = True
current_distance = round(math.hypot(self.goal_x - self.position.x, self.goal_y - self.position.y),2)
# if current_distance < 0.2:
if current_distance < 0.15:
vel_cmd = Twist()
self.get_goalbox = True
return scan_range2 + [heading, current_distance, obstacle_min_range, obstacle_angle], done
def setReward(self, state, done, action):
yaw_reward = []
obstacle_min_range = state[-2]
current_distance = state[-3]
heading = state[-4]
for i in range(5):
angle = -pi / 4 + heading + (pi / 8 * i) + pi / 2
tr = 1 - 4 * math.fabs(0.5 - math.modf(0.25 + 0.5 * angle % (2 * math.pi) / math.pi)[0])
yaw_reward.append(tr)
distance_rate = 2 ** (current_distance / self.goal_distance)
if obstacle_min_range < 0.5:
ob_reward = -5
else:
ob_reward = 0
reward = ((round(yaw_reward[action] * 5, 2)) * distance_rate) + ob_reward
if done:
rospy.loginfo("Near Collision!!")
reward = -200
# driving backwards last 25 actions ~5 seconds
t = 0
l = len(self.action_memory)
vel_cmd = Twist()
# while t <= 10:
# if len(self.action_memory) > 20:
# max_angular_vel = -1.5
# action = self.action_memory[l-t-1]
# ang_vel = ((-self.action_size + 1)/2 - action) * max_angular_vel * 0.5
# vel_cmd.linear.x = -0.15
# # vel_cmd.angular.z = ang_vel
# vel_cmd.angular.z = 0
# time_start = time.time()
# a=0
# self.pub_cmd_vel.publish(vel_cmd)
# t += 1
# else:
# t = 10
# stand still after collision
vel_cmd.linear.x = 0
vel_cmd.angular.z = 0
time_start = time.time()
a=0
while a < 1:
self.pub_cmd_vel.publish(vel_cmd)
a = time.time() - time_start
if self.get_goalbox:
rospy.loginfo("Goal!!")
print "start_position: ", self.start_x,"/ ", self.start_y
print "odom_position:", self.position.x,"/ " ,self.position.y
print "goal_position: ", self.goal_x,"/ ", self.goal_y
print "action: ", action
print "_______________________________________________________________"
reward = 500
self.get_goalbox = False
done = True
vel_cmd = Twist()
vel_cmd.linear.x = 0
vel_cmd.angular.z = 0
start = 0
start_1 = time.time()
while start - 5 < 0:
self.pub_cmd_vel.publish(vel_cmd)
start = time.time() - start_1
# self.pub_cmd_vel.publish(vel_cmd)
# self.goal_x, self.goal_y = self.respawn_goal.getPosition()
# self.goal_distance = self.getGoalDistace()
return reward, done
def speed(self, state):
# Calculate the data new with a filter
scan_range = []
speed = 0.15
speed_goal = 0
for i in range(len(state)):
if state[i] < 0.30:
scan_range.append(3.5)
else:
scan_range.append(state[i])
scan_range = state
obstacle_min_range = round(min(scan_range), 2)
goal_distance = scan_range[361]
# print obstacle_min_range
if obstacle_min_range >= 1:
speed = 0.15
elif obstacle_min_range < 1 and obstacle_min_range >= 0.3:
speed = 0.15 + ((obstacle_min_range-1)/7)
speed_goal = speed
if goal_distance < 0.5:
speed_goal = 0.15 + (goal_distance - 0.)/8
speed = min([speed, speed_goal])
return speed
def step(self, action):
time1 = time.time()
data = None
while data is None:
try:
data = rospy.wait_for_message('scan', LaserScan, timeout=5)
except:
pass
vel_cmd = Twist()
vel_cmd.linear.x = 0
state, done = self.getState(data)
reward, done = self.setReward(state, done, action)
if not done:
max_angular_vel = 1.5
# max_angular_vel = 0.15
ang_vel = ((self.action_size - 1)/2 - action) * max_angular_vel * 0.5
vel_cmd = Twist()
vel_cmd.linear.x = self.speed(state)
# vel_cmd.linear.x = 0.15
vel_cmd.angular.z = ang_vel
self.action_memory.append(-1*action)
time_start = time.time()
self.pub_cmd_vel.publish(vel_cmd)
if self.count % 2 == 0:
print "start_position: ", self.start_x,"/ ", self.start_y
print "odom_position:", self.position.x,"/ " ,self.position.y
print "goal_position: ", self.goal_x,"/ ", self.goal_y
print "goal_distance: ", state[-3],"/ obstacle_distance: ", state[-2]
print "Vel_linear: ",vel_cmd.linear.x , "action: ", action
print done
print "_____________________________________________________________"
self.count += 1
return np.asarray(state), reward, done
def reset(self):
# corrdinate receive, transformation
yaw_neu = 0
if self.initGoal:
self.start_x_map, self.start_y_map, start_orientation_2 = self.respawn_goal.getstartPosition()
self.goal_x_map, self.goal_y_map = self.respawn_goal.getPosition()
start_orientation_list = [start_orientation_2.x, start_orientation_2.y, start_orientation_2.z, start_orientation_2.w]
_, _, self.yaw_init = euler_from_quaternion(start_orientation_list)
self.initGoal = False
# self.goal_x, self.goal_y = self.goal_x_map, self.goal_y_map
else:
self.start_x_map, self.start_y_map = self.goal_x_map, self.goal_y_map
self.goal_x_map, self.goal_y_map = self.respawn_goal.getPosition()
orientation = self.orientation
orientation_list = [orientation.x, orientation.y, orientation.z, orientation.w]
_, _, yaw_neu = euler_from_quaternion(orientation_list)
print "yaw_neu:", yaw_neu
# self.goal_x_map, self.goal_y_map = self.goal_x, self.goal_y
print "Wait 3 sec"
time.sleep(3)
# in map coordinates
# diff_x = self.goal_x - self.start_x + self.position
# diff_y = self.goal_y - self.start_y + self.position
diff_x = self.goal_x_map - self.start_x_map
diff_y = self.goal_y_map - self.start_y_map
print "diff_x: ", diff_x
print "diff_y: ", diff_y
print "yaw_neu: ", yaw_neu
# yaw = yaw_neu + self.yaw_init
# print "yaw: ",yaw
# Transformation
yaw = self.yaw_init
self.goal_x = math.cos(yaw)*diff_x + math.sin(yaw)*diff_y + self.position_x
self.goal_y = -1*math.sin(yaw)*diff_x + math.cos(yaw)*diff_y + self.position_y
self.goal_distance = self.getGoalDistace()
data = None
while data is None:
try:
data = rospy.wait_for_message('scan', LaserScan, timeout=5)
except:
pass
self.goal_distance = self.getGoalDistace()
state, done = self.getState(data)
return np.asarray(state)
``` |
{
"source": "2535463841/fluent-python",
"score": 2
} |
#### File: fpstackutils/openstack/client.py
```python
import os
from cinderclient import client as cinder_client
import glanceclient
from keystoneauth1.identity import v3
from keystoneauth1.session import Session
from keystoneclient.v3 import client
from neutronclient.v2_0 import client as neutron_client
import novaclient
from novaclient import client as nova_client
from fp_lib.common import exceptions as fp_exc
from fp_lib.common import log
LOG = log.getLogger(__name__)
NOVA_API_VERSION = "2.49"
nova_extensions = [ext for ext in
nova_client.discover_extensions(NOVA_API_VERSION)
if ext.name in ("assisted_volume_snapshots",
"list_extensions",
"server_external_events")]
class Keystone(object):
def __init__(self, *args, **kwargs):
self.auth = v3.Password(*args, **kwargs)
self.session = Session(auth=self.auth)
self.keystone = client.Client(session=self.session)
def get_or_create_domain(self, name):
domains = self.keystone.domains.list(name=name)
return domains[0] if domains else self.keystone.domains.create(name)
def get_or_create_role(self, name, domain_name=None):
domain = None
if domain_name:
domain = self.get_or_create_domain(domain_name)
roles = self.keystone.roles.list(name=name, domain=domain)
return roles[0] if roles else self.keystone.roles.create(
name, domain=domain)
def get_or_create_project(self, name, domain_name, **kwargs):
domain = self.get_or_create_domain(domain_name)
projects = self.keystone.projects.list(name=name, domain=domain)
return projects[0] if projects else self.keystone.projects.create(
name, domain, **kwargs)
def get_or_create_user(self, name, domain_name, projec_name, **kwargs):
domain = self.get_or_create_domain(domain_name)
project = self.get_or_create_project(projec_name, domain_name)
role_name = kwargs.pop('role_name', None)
users = self.keystone.users.list(name=name, domain=domain)
user = users[0] if users else self.keystone.users.create(
name, domain=domain, project=project, **kwargs
)
if role_name:
role = self.get_or_create_role(role_name)
self.keystone.roles.grant(role, user=user, project=project)
return user
def get_quota(self):
project_id = self.session.get_project_id()
return self.nova.quotas.get(project_id)
class OpenstackClient(Keystone):
V3_AUTH_KWARGS = ['username', 'password', 'project_name',
'user_domain_name', 'project_domain_name']
def __init__(self, *args, **kwargs):
super(OpenstackClient, self).__init__(*args, **kwargs)
self.neutron = neutron_client.Client(session=self.session)
self.nova = nova_client.Client(NOVA_API_VERSION,
session=self.session,
extensions=nova_extensions)
self.glance = glanceclient.Client('2', session=self.session)
self.cinder = cinder_client.Client('2', session=self.session)
@classmethod
def get_auth_info_from_env(cls):
if 'OS_AUTH_URL' not in os.environ:
raise fp_exc.EnvIsNone('OS_AUTH_URL')
auth_url = os.getenv('OS_AUTH_URL')
auth_kwargs = {}
for auth_arg in cls.V3_AUTH_KWARGS:
env = 'OS_{}'.format(auth_arg.upper())
value = os.getenv(env)
if not value:
raise fp_exc.EnvIsNone(env)
auth_kwargs[auth_arg] = value
return auth_url, auth_kwargs
@classmethod
def create_instance(cls):
auth_url, auth_kwargs = cls.get_auth_info_from_env()
LOG.debug('auth info: %s', auth_kwargs)
return OpenstackClient(auth_url, **auth_kwargs)
def attach_interface(self, net_id=None, port_id=None):
return self.nova.servers.interface_attach(net_id=net_id,
port_id=port_id)
def detach_interface(self, vm_id, port_id):
return self.nova.servers.interface_detach(vm_id, port_id)
def list_interface(self, vm_id):
return self.nova.servers.interface_list(vm_id)
def attach_volume(self, vm_id, volume_id):
return self.nova.volumes.create_server_volume(vm_id, volume_id)
def detach_volume(self, vm_id, volume_id):
return self.nova.volumes.delete_server_volume(vm_id, volume_id)
def create_volume(self, name, size_gb=None):
size = size_gb or 1
return self.cinder.volumes.create(size, name=name)
def get_volume(self, volume_id):
return self.cinder.volumes.get(volume_id)
def delete_volume(self, volume_id):
return self.cinder.volumes.delete(volume_id)
def get_vm_actions(self, vm):
actions = {}
for action in self.nova.instance_action.list(vm.id):
actions.setdefault(action.action, [])
vm_action = self.nova.instance_action.get(vm.id,
action.request_id)
for event in vm_action.events:
actions[action.action].append(event)
return actions
def get_vm_events(self, vm):
action_events = []
for action in self.nova.instance_action.list(vm.id):
vm_action = self.nova.instance_action.get(vm.id,
action.request_id)
events = sorted(vm_action.events,
key=lambda x: x.get('start_time'))
action_events.append((action.action, events))
return action_events
def force_down(self, service, force_down):
if self.nova.api_version >= novaclient.api_versions.APIVersion('2.53'):
self.nova.services.force_down(service.id, force_down)
else:
self.nova.services.force_down(service.host, service.binary,
force_down=force_down)
```
#### File: fp_lib/common/confparser.py
```python
import collections
from six.moves import configparser
class ConfigParserWrapper(object):
def __init__(self):
self._parser = configparser.ConfigParser()
self._file = None
self._defaults = None
def defaults(self):
if self._defaults is None:
self._defaults = self._parser.defaults()
return self._defaults
def read(self, file):
self._file = file
if isinstance(file, str):
self._parser.read(file)
else:
self._parser.readfp(file)
def sections(self):
return self._parser.sections()
def options(self, section, ignore_default=False):
if section == 'DEFAULT':
return self._parser.defaults()
options = collections.OrderedDict()
for option in self._parser.options(section):
value = self._parser.get(section, option)
if ignore_default and value == self.defaults().get(option):
continue
options[option] = self._parser.get(section, option)
return options
def get(self, option, section='DEFAULT'):
options = self.options(section)
if option not in options:
raise configparser.NoOptionError(option, section)
return options.get(option)
def set(self, option, value, section='DEFAULT'):
self._parser.set(section, option, value)
with open(self._file, 'w') as fp:
self._parser.write(fp)
```
#### File: fp_lib/common/log.py
```python
import logging
from logging import handlers
_DEFAULT_LEVEL = logging.INFO
_DEFAULT_FORMAT = '%(asctime)s %(levelname)s %(name)s:%(lineno)s %(message)s'
_DEFAULT_FILE = None
_DEFAULT_MAX_BYTES = 0
_DEFAULT_BACKUP_COUNT = 1
_LOGGER = set([])
def disable_debug():
global _DEFAULT_LEVEL
_DEFAULT_LEVEL = logging.INFO
def enable_debug():
global _DEFAULT_LEVEL
_DEFAULT_LEVEL = logging.DEBUG
for name in _LOGGER:
logger = logging.getLogger(name)
logger.setLevel(_DEFAULT_LEVEL)
def set_default(level=None, filename=None, max_mb=None, backup_count=None):
global _DEFAULT_LEVEL
global _DEFAULT_FILE, _DEFAULT_MAX_BYTES, _DEFAULT_BACKUP_COUNT
if level:
_DEFAULT_LEVEL = level
if filename:
_DEFAULT_FILE = filename
if max_mb:
_DEFAULT_MAX_BYTES = 1024 * 1024 * max_mb
if backup_count:
_DEFAULT_BACKUP_COUNT = backup_count
for name in _LOGGER:
logger = logging.getLogger(name)
logger.setLevel(_DEFAULT_LEVEL)
if not logger.handlers:
logger.addHandler(get_handler())
else:
logger.handlers[0] = get_handler()
def load_config(config_file):
logging.config.fileConfig(config_file)
def get_handler(file_name=None, format=None):
file_name = file_name or _DEFAULT_FILE
if file_name:
handler = handlers.RotatingFileHandler(
file_name, mode='a',
maxBytes=_DEFAULT_MAX_BYTES,
backupCount=_DEFAULT_BACKUP_COUNT)
else:
handler = logging.StreamHandler()
handler.setLevel(logging.DEBUG)
handler.setFormatter(logging.Formatter(format or _DEFAULT_FORMAT))
return handler
def getLogger(name, file_name=None, format=None):
"""
>>> set_default(filename='test.log', level=logging.DEBUG)
>>> LOG = getLogger(__name__)
>>> LOG.debug('debug')
>>> LOG.info('info' * 100)
>>> LOG.error('error')
"""
global _LOGGER
_LOGGER.add(name)
logger = logging.getLogger(name)
logger.setLevel(_DEFAULT_LEVEL)
if not logger.handlers:
handler = get_handler(file_name=file_name, format=format)
logger.addHandler(handler)
return logger
```
#### File: fp_lib/common/progressbar.py
```python
from __future__ import print_function
import time
import threading
try:
from tqdm import tqdm
is_support_tqdm = True
except ImportError:
is_support_tqdm = False
from fp_lib import date
from . import log
LOG = log.getLogger(__name__)
class ProgressNoop(object):
def __init__(self, total, **kwargs):
self.total = total
def update(self, size):
pass
def close(self):
pass
def set_description(self, *args, **kargs):
pass
class ProgressWithPrint(ProgressNoop):
progress_format = '{} {:3}% [{:100}]\r'
def __init__(self, total, **kwargs):
super(ProgressWithPrint, self).__init__(total)
self.progress = {'completed': 0, 'total': self.total}
self.last_time = time.time()
self.lock = threading.Lock()
def update(self, size):
self.lock.acquire()
self.progress['completed'] += size
if time.time() - self.last_time >= 2:
self._print_progress()
self.last_time = time.time()
self.lock.release()
def close(self):
self._print_progress()
def _print_progress(self):
percent = self.progress['completed'] * 100 / self.progress['total']
print(self.progress_format.format(
date.parse_timestamp2str(time.time()), percent, '#' * percent))
class ProgressWithTqdm(ProgressNoop):
def __init__(self, *args, **kwargs):
self.pbar = tqdm(*args, **kwargs)
def update(self, size):
self.pbar.update(size)
def close(self):
self.pbar.clear()
self.pbar.close()
def set_description(self, *args, **kwargs):
self.pbar.set_description(*args, **kwargs)
def factory(total):
if is_support_tqdm:
return ProgressWithTqdm(total=total)
else:
LOG.warning('tqdm is not installed, use ProgressWithPrint')
return ProgressWithPrint(total=total)
```
#### File: downloader/urllib/driver.py
```python
import io
import os
import re
import urllib3
import bs4
from fp_lib.common import log
from fp_lib.common import progressbar
from fp_lib.downloader import driver
LOG = log.getLogger(__name__)
FILE_NAME_MAX_SIZE = 50
def find_links(url, link_regex=None, headers=None):
"""
>>> links = find_links('http://www.baidu.com',
... link_regex=r'.*.(jpg|png)$')
"""
httpclient = urllib3.PoolManager(headers=headers)
resp = httpclient.request('GET', url)
if resp.status != 200:
raise Exception('get web page failed, %s' % resp.data)
html = bs4.BeautifulSoup(resp.data, features="html.parser")
img_links = []
if link_regex:
regex_obj = re.compile(link_regex)
else:
regex_obj = None
for link in html.find_all(name='a'):
if not link.get('href'):
continue
if regex_obj and not regex_obj.match(link.get('href')):
continue
img_links.append(link.get('href'))
return img_links
class Urllib3Driver(driver.BaseDownloadDriver):
def __init__(self, headers=None, **kwargs):
super(Urllib3Driver, self).__init__(**kwargs)
self.headers = headers
self.filename_length = 1
self.http = urllib3.PoolManager(num_pools=self.workers,
headers=self.headers,
timeout=self.timeout)
def download_urls(self, url_list):
self.filename_length = 1
for url in url_list:
file_name = os.path.basename(url)
if len(file_name) > self.filename_length:
self.filename_length = len(file_name)
self.filename_length = min(self.filename_length, FILE_NAME_MAX_SIZE)
if not os.path.exists(self.download_dir):
os.makedirs(self.download_dir)
super(Urllib3Driver, self).download_urls(url_list)
def download(self, url):
file_name = os.path.basename(url)
resp = self.http.request('GET', url, preload_content=False)
if self.progress:
pbar = progressbar.factory(int(resp.headers.get('Content-Length')))
desc_template = '{{:{}}}'.format(self.filename_length)
pbar.set_description(desc_template.format(file_name))
else:
pbar = progressbar.ProgressNoop()
save_path = os.path.join(self.download_dir, file_name)
with open(save_path, 'wb') as f:
for data in resp.stream(io.DEFAULT_BUFFER_SIZE):
f.write(data)
pbar.update(len(data))
pbar.close()
return file_name
```
#### File: fp-lib/fp_lib/location.py
```python
import locale
import pytz
_COUNTRY = locale.getdefaultlocale()[0].split('_')[-1]
def set_country(country):
global _COUNTRY
if country not in pytz.country_names:
raise ValueError('country %s is not exists.' % country)
else:
_COUNTRY = country
def get_country():
"""use module pytz to get country"""
global _COUNTRY
assert _COUNTRY is not None
return pytz.country_names.get(_COUNTRY)
def get_country_timezones(country=None):
"""Get timezones by country code
>>> get_country_timezones(country='CN')
['Asia/Shanghai', 'Asia/Urumqi']
"""
return pytz.country_timezones[country or _COUNTRY]
```
#### File: fp-lib/fp_lib/net.py
```python
from collections import namedtuple
from concurrent import futures
import re
import socket
from fp_lib import executor
from fp_lib import system
ScanResult = namedtuple('ScanResult', 'host port connectable')
def port_scan(host, port_start=0, port_end=65535, threads=1, callback=None):
"""scan host ports between [port_start, port_end]
>>> port_scan('localhost',
... port_start=8001,
... port_end=8002,
... threads=3,
... callback=lambda future : print(future.done()))
True
True
"""
def _connect(port):
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
server.connect((host, port))
return ScanResult(host, port, True)
except Exception:
return ScanResult(host, port, False)
finally:
server.close()
with futures.ThreadPoolExecutor(threads) as executor:
for port in range(port_start, port_end + 1):
if callback:
executor.submit(_connect, port).add_done_callback(callback)
else:
executor.submit(_connect, port)
def ping(host):
if system.OS.is_linux():
result = executor.LinuxExecutor.execute(['ping', '-w', '3', host])
else:
result = executor.LinuxExecutor.execute(['ping', '-n', '3', host])
return result.status == 0
def get_internal_ip():
"""Get the internal network IP address
"""
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(('8.8.8.8', 80))
return s.getsockname()[0]
def get_ip_addresses(v4=True, v6=False):
addr_types = [socket.AddressFamily.AF_INET] if v4 else []
if v6:
addr_types.append(socket.AddressFamily.AF_INET6)
address_list = []
for addr in socket.getaddrinfo(socket.gethostname(), None):
if addr[0] in addr_types:
address_list.append(addr[4][0])
return address_list
def split_host_port(address, default_host=None, default_port=None):
"""Split address to host port
The format of address like:
host1, host1:8888, 1.1.1.1, 1.1.1.1:8888, :8888
>>> split_host_port('host1:8888')
('host1', '8888')
>>> split_host_port('host1')
('host1', None)
>>> split_host_port(':8888')
(None, '8888')
"""
if not address:
return default_host, default_port
host, port = re.match(r'([^:]+)*:*(\d+)*', address).groups()
return host or default_host, default_port if port is None else int(port)
```
#### File: fp_lib/server/cmdserver.py
```python
from __future__ import print_function
import json
from socket import socket
import subprocess
from gevent import pywsgi
from fp_lib.system import OS
from fp_lib import net
from fp_lib.common import log
LOG = log.getLogger(__name__)
DEFUALT_METHOD_STATUS = {
'GET': '200 OK',
'POST': '201 Created',
'PUT': '201 Updated',
'DELETE': '204 No Content'
}
class BaseWsgiApp(object):
def __init__(self):
self.environ = None
@property
def method(self):
return self.environ['REQUEST_METHOD']
def get_wsgi_input_data(self):
wsgi_input = self.environ.get('wsgi.input')
return wsgi_input.read(
int(self.environ.get('CONTENT_LENGTH', 0) or 0)
)
def __call__(self, environ, start_response):
self.environ = environ
if self.method not in DEFUALT_METHOD_STATUS:
start_response('404', [])
return ['method {} is not supported for this server'.format(self.method).encode()]
func = getattr(self, 'do_{}'.format(self.method), None)
resp = func(environ, start_response)
response_headers = [('Content-Type', 'application/json')]
start_response(resp[0], response_headers)
return [resp[1].encode()]
def do_GET(self, environ, start_response):
resp = self.get()
if isinstance(resp, str):
return DEFUALT_METHOD_STATUS['GET'], resp
else:
return resp
def do_POST(self, environ, start_response):
req_data = self.get_wsgi_input_data()
resp = self.post(req_data)
if isinstance(resp, str):
return DEFUALT_METHOD_STATUS['POST'], resp
else:
return resp
def do_PUT(self, environ, start_response):
return self.put(self)
def do_DELETE(self, environ, start_response):
return self.delete()
def get(self):
return '200 OK', 'Hello, world'
def post(self, data):
return '404 Not Found', 'POST is NotImplemented'
def put(self, data):
return '404 Not Found', 'PUT is NotImplemented'
def delete(self):
return '404 Not Found', 'DELETE is NotImplemented'
class CmdServer(BaseWsgiApp):
def get(self):
uname = OS.uname()
resp_body = {'system': uname.system,
'version': uname.version,
'machine': uname.machine,
'processor': uname.processor,
'release': uname.release
}
return '200 OK', json.dumps(resp_body)
def post(self, data):
try:
req_body = json.loads(data)
except Exception:
resp_body = {'error': 'request body is invalid'}
return '400 Invalid Request', json.dumps(resp_body)
cmd = req_body.get('cmd')
status, output = subprocess.getstatusoutput(cmd)
return json.dumps({'output': output, 'status': status})
def main():
import sys
address = sys.argv[0] if len(sys.argv) >= 2 else None
host, port = net.split_host_port(address,
default_host=net.get_internal_ip(),
default_port=8888)
log.enable_debug()
LOG.info('start server at %s:%s', host, port)
server = pywsgi.WSGIServer((host, port), CmdServer())
server.serve_forever()
if __name__ == '__main__':
main()
print(net.get_ip_addresses())
```
#### File: fp_utils/base/fs.py
```python
from __future__ import print_function
import time
import os
from fp_lib.common import cliparser
from fp_lib.common import log
from fp_lib import fs
LOG = log.getLogger(__name__)
class PyTac(cliparser.CliBase):
NAME = 'py-tac'
ARGUMENTS = [
cliparser.Argument('file', help='file'),
cliparser.Argument('-c', '--chunk', type=int, default=None,
help='The chunk size to read, default is None'),
cliparser.Argument('-n', '--nums', type=int, default=None,
help='Print Last N lines')]
def __call__(self, args):
start_time = time.time()
nums = args.nums
if nums is not None and nums <= 0:
LOG.error('The value of --nums NUM must >= 1')
return
with fs.open_backwards(args.file, chunk_size=args.chunk) as fp:
for line in fp:
print(line, end='')
if nums is not None:
nums -= 1
if nums <= 0:
break
LOG.debug('file is closed: %s', fp.closed)
LOG.debug('Used Time: %.2f seconds', time.time() - start_time)
class PyZip(cliparser.CliBase):
NAME = 'zip'
ARGUMENTS = [
cliparser.Argument('dir', help='the path of dir'),
cliparser.Argument('--no-root', action='store_true',
help='zip the child of dir'),
cliparser.Argument('--no-path', action='store_true',
help='save path to zip file'),
]
def __call__(self, args):
try:
fs.zip_files(args.dir, zip_root=not args.no_root,
zip_path=not args.no_path,
verbose=args.verbose)
except FileExistsError:
LOG.error('%s is not exists', args.dir)
except Exception as e:
LOG.error(e)
``` |
{
"source": "254195430/test",
"score": 3
} |
#### File: app/cloud-native-app/app.py
```python
from flask import Flask, flash, render_template, flash, request, jsonify, redirect, session, escape
from flask import abort
import bcrypt
from flask_cors import CORS, cross_origin
from flask import make_response, url_for
import json
import random
from requests import Requests
from pymongo import MongoClient
from flask.ext.pymongo import PyMongo
from time import gmtime, strftime
from flask.ext.mongoalchemy import MongoAlchemy
# Object creation
app = Flask(__name__)
app.config.from_object(__name__)
app.secret_key = '<some secret key>'
CORS(app)
app.config['MONGOALCHEMY_DATABASE'] = 'app'
app.config['MONGOALCHEMY_CONNECTION_STRING'] = 'mongodb://localhost:27017/'
connection = MongoClient("mongodb://localhost:27017/")
db = MongoAlchemy()
mongo=PyMongo(app)
# Initialize Database
def create_mongodatabase():
try:
dbnames = connection.database_names()
if 'app' not in dbnames:
db_api = connection.app.apirelease
db_api.insert( {
"buildtime": "2017-01-01 10:00:00",
"links": "/api/v1/users",
"methods": "get, post, put, delete",
"version": "v1"
})
db_api.insert( {
"buildtime": "2017-02-11 10:00:00",
"links": "api/v2/tweets",
"methods": "get, post",
"version": "2017-01-10 10:00:00"
})
print ("Database Initialize completed!")
else:
print ("Database already Initialized!")
except:
print ("Database creation failed!!")
# API Routes
@app.route('/')
def home():
if not session.get('logged_in'):
return render_template('login.html')
else:
return render_template('index.html', session = session['logged_in'])
@app.route('/index')
def index():
return render_template('index.html', session = session['logged_in'])
@app.route('/login', methods=['POST'])
def do_admin_login():
users = mongo.db.users
api_list=[]
login_user = users.find({'username': request.form['username']})
for i in login_user:
api_list.append(i)
print (api_list)
if api_list != []:
#print (api_list[0]['password'].decode('utf-8'), bcrypt.hashpw(request.form['password'].encode('utf-8'), api_list[0]['password']).decode('utf-8'))
if api_list[0]['password'].decode('utf-8') == bcrypt.hashpw(request.form['password'].encode('utf-8'), api_list[0]['password']).decode('utf-8'):
session['logged_in'] = api_list[0]['username']
return redirect(url_for('index'))
return 'Invalide username/password!'
else:
flash("Invalid Authentication")
return 'Invalid User!'
@app.route('/signup', methods=['GET', 'POST'])
def signup():
if request.method=='POST':
users = mongo.db.users
api_list=[]
existing_user = users.find({'$or':[{"username":request.form['username']} ,{"email":request.form['email']}]})
for i in existing_user:
# print (str(i))
api_list.append(str(i))
# print (api_list)
if api_list == []:
users.insert({
"email": (request.form['email']).lower(),
"id": random.randint(1,1000),
"name": request.form['name'],
"password": bcrypt.hashpw(request.form['pass'].encode('utf-8'), bcrypt.gensalt()),
"username": request.form['username']
})
session['username'] = request.form['username']
return redirect(url_for('home'))
return 'That user already exists'
else :
return render_template('signup.html')
@app.route("/logout")
def logout():
session['logged_in'] = False
return redirect(url_for('home'))
@app.route('/profile', methods=['GET', 'POST'])
def profile():
if request.method=='POST':
users = mongo.db.users
api_list=[]
existing_users = users.find({"username":session['username']})
for i in existing_users:
# print (str(i))
api_list.append(str(i))
user = {}
print (api_list)
if api_list != []:
print (request.form['email'])
user['email']=(request.form['email']).lower()
user['name']= request.form['name']
user['password']=<PASSWORD>(request.form['pass'].encode('utf-8'), bcrypt.gensalt())
users.update({'username':session['username']},{'$set': user} )
else:
return 'User not found!'
return redirect(url_for('index'))
if request.method=='GET':
users = mongo.db.users
user=[]
print (session['username'])
existing_user = users.find({"username":session['username']})
for i in existing_user:
user.append(i)
return render_template('profile.html', name=user[0]['name'], username=user[0]['username'], password=<PASSWORD>]['password'], email=user[0]['email'])
@app.route("/api/v1/info")
def home_index():
api_list=[]
db = connection.app.apirelease
for row in db.find():
print (row)
# api_list['buildtime']. str(row['buildtime'])
# api_list['version'] = str(row['version'])
# api_list['links'] = str(row['links'])
# api_list['methods'] = str(row['methods'])
api_list.append(str(row))
print (api_list)
return json.dumps(api_list), 200
@app.route('/api/v1/users', methods=['GET'])
def get_users():
return Requests.list_users()
@app.route('/api/v1/users/<int:user_id>', methods=['GET'])
def get_user(user_id):
return Requests.list_user(user_id)
@app.route('/api/v1/users', methods=['POST'])
def create_user():
if not request.json or not 'username' in request.json or not 'email' in request.json or not 'password' in request.json:
abort(400)
user = {
'username': request.json['username'],
'email': request.json['email'],
'name': request.json.get('name',""),
'password': request.json['password'],
'id': random.randint(1,1000)
}
return jsonify({'status': Requests.add_user(user)}), 201
@app.route('/api/v1/users', methods=['DELETE'])
def delete_user():
if not request.json or not 'username' in request.json:
abort(400)
user=request.json['username']
return jsonify({'status': Requests.del_user(user)}), 200
@app.route('/api/v1/users/<int:user_id>', methods=['PUT'])
def update_user(user_id):
user = {}
user['id']=user_id
key_list = request.json.keys()
for i in key_list:
user[i] = request.json[i]
return jsonify({'status': Requests.upd_user(user)}), 200
@app.route('/api/v2/tweets', methods=['GET'])
def get_tweets():
return Requests.list_tweets()
@app.route('/api/v2/tweets', methods=['POST'])
def add_tweets():
user_tweet = {}
if not request.json or not 'username' in request.json or not 'body' in request.json:
abort(400)
user_tweet['tweetedby'] = request.json['username']
user_tweet['body'] = request.json['body']
user_tweet['timestamp']=strftime("%Y-%m-%dT%H:%M:%SZ", gmtime())
user_tweet['id'] = random.randint(1,1000)
return jsonify({'status': Requests.add_tweet(user_tweet)}), 201
@app.route('/api/v2/tweets/<string:tweetedby>', methods=['GET'])
def get_tweet(tweetedby):
return Requests.list_tweet(tweetedby)
# Error handling
@app.errorhandler(404)
def resource_not_found(error):
return make_response(jsonify({'error': 'Resource not found!'}), 404)
@app.errorhandler(409)
def user_found(error):
return make_response(jsonify({'error': 'Conflict! Record exist'}), 409)
@app.errorhandler(400)
def invalid_request(error):
return make_response(jsonify({'error': 'Bad Request'}), 400)
# Main Function
if __name__ == '__main__':
create_mongodatabase()
app.run(host='0.0.0.0', port=5000, debug=True)
``` |
{
"source": "254Davidhashisoma/blog",
"score": 3
} |
#### File: blog/tests/test_comment.py
```python
import unittest
from app.models import Post, User, Comment
class TestPost(unittest.TestCase):
def setUp(self):
self.user_Collins = User(first_name = "David",
last_name = "Hashisoma",
username = "@Hashi",
password = "<PASSWORD>",
email = "<EMAIL>")
self.new_post = Post(post_title = "Test Title",
post_content = "This is a great move. I love blogging!",
user_id = self.user_Hashisoma.id)
self.new_comment = Comment(comment = "Great one!",
post_id = self.new_post.id,
user_id = self.user_George.id)
def test_instance(self):
self.assertTrue(isinstance(self.user_Hashisoma, User))
self.assertTrue(isinstance(self.new_post, Post))
self.assertTrue(isinstance(self.new_comment, Comment))
#test posting change qoutes
``` |
{
"source": "2556-AD/Open-Atom",
"score": 2
} |
#### File: Open-Atom/openatom/neutron.py
```python
from openatom.UNIVERSAL_CONSTANTS import *
from openatom.particle import ParticulateMatter
class Neutron(ParticulateMatter):
def __init__(
self,
charge = NEUTRON_CHARGE,
mass = NEUTRON_MASS,
velocity = VAL_TENDS_TO_ZERO,
xCoordinate = 0,
yCoordinate = 0,
zCoordinate = 0
):
super().__init__(
charge,
mass,
velocity,
xCoordinate,
yCoordinate,
zCoordinate
)
```
#### File: Open-Atom/openatom/open-atom.py
```python
class electron:
def __init__(self):
self.charge = -1.6022e-19
self.mass = 9.109e-31
class proton:
def __init__(self):
self.charge = 1.6022e-19
self.mass = 1.672e-27
class neutron:
def __init__(self):
self.charge = 0
self.mass = 1.675e-27
class atom:
def __init__(self, protonCount, neutronCount, electronCount):
self.protons = [proton() for p in range(protonCount)]
self.electrons = [electron() for e in range(electronCount)]
self.neutrons = [neutron() for n in range(neutronCount)]
# def probeAtom(self):
# print(self.protons)
# temp = electronCount 5,3
# counter =1 2
# shell = [2,3]
# while temp>0:
# as = 2**counter 2,4
# if(temp>=as):
# shell.append(as)
# else:
# shell.append(temp)
# temp -= as
# counter += 1
```
#### File: Open-Atom/openatom/principal_quantum_number.py
```python
from openatom.UNIVERSAL_CONSTANTS import *
from openatom.azimuthal_quantum_number import AzimuthalQNum
class PrincipalQNum():
def __init__(self, shellIdx):
self.label = self.assignShellLabel(shellIdx)
self.principalQuantumNumVal = shellIdx + 1
self.azimuthalArray = []
self.azimuthalArray = [AzimuthalQNum(len(self.azimuthalArray)) for i in range(self.principalQuantumNumVal)]
# shellArray.append(PrincipalQNum(len(shellArray)))
def assignShellLabel(self, shellIdx):
shellMap = {
0 : 'K',
1 : 'L',
2 : 'M',
3 : 'N',
4 : 'O',
5 : 'P'
}
return shellMap[shellIdx]
``` |
{
"source": "255/anagramatic",
"score": 4
} |
#### File: 255/anagramatic/filter.py
```python
import string
letters = set(string.ascii_lowercase)
banned_words = {
'fer',
'hep',
'len',
'lin',
'fahd',
'robt',
'rte',
'gte',
'feb',
'mar',
'apr',
'jun',
'jul',
'aug',
'sep',
'sept',
'nov',
'dec',
}
min_length = 4
valid_two_letter = {
'a', 'i',
'ab', 'ad', 'ah', 'ai', 'am', 'an', 'as', 'at', 'aw', 'ax',
'be', 'by',
'do',
'eh', 'em', 'en', 'er', 'ex',
'go',
'ha', 'hi', 'ho',
'if', 'in', 'is', 'it',
'ma', 'me', 'my'
'no',
'of', 'oh', 'on', 'or', 'ow', 'ox', 'oy',
'pa', 'pi',
'so',
'to',
'uh', 'um', 'up', 'us',
'we', 'wo',
'ye', 'yo',
}
vowels = {'a', 'e', 'i', 'o', 'u', 'y'}
def valid_short_word(word):
if len(word) <= 2:
return word in valid_two_letter
# check for at least one vowel for words longer than 2 letters
for letter in word:
if letter in vowels:
return True
else:
return False
def filter(file):
"""
Read and filter all of the whitespace-separated words in a file.
Duplicate words, words with 'special' characters, words lacking vowels, and many two-letter 'words' are removed.
The word list is output in lowercase in alphabetical order, one word per line.
"""
valid_words = set()
for word in file.read().split():
word = word.lower()
if word in banned_words:
continue
if len(word) < min_length and not valid_short_word(word):
continue
for char in word:
if char not in letters:
break
else:
valid_words.add(word)
return sorted(valid_words)
if __name__ == '__main__':
import sys
for word in filter(sys.stdin):
print(word)
``` |
{
"source": "256481788jianghao/complaint12306",
"score": 3
} |
#### File: 256481788jianghao/complaint12306/emailTool.py
```python
import smtplib
import email.mime.multipart
import email.mime.text
import time
import datetime
class emailTool:
def __init__(self,cfg):
self.config = cfg
def send(self, content):
smtp=smtplib.SMTP()
smtp.connect(self.config['Host'],'25')
smtp.login(self.config['From'],self.config['PW'])
msg=email.mime.multipart.MIMEMultipart()
msg['from'] = self.config['From']
msg['to'] = self.config['To']
msg['subject'] = self.config['subject']
#content = self.config['content']
txt=email.mime.text.MIMEText(content)
msg.attach(txt)
smtp.sendmail(self.config['From'],self.config['To'],str(msg))
smtp.quit()
print("send mail at "+str(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())))
def sendDelay(self, dtime, content):
self.send(content)
time.sleep(dtime)
def sendcyc(self, pro_time, pro_num, content):
#content_inner = cotent
pro_time = pro_time
pro_cnt = 0
mail_cnt = 0
while pro_cnt < pro_num:
send_num = pro_cnt + 1
sleeptime = int(pro_time/send_num)
while send_num > 0:
print("pro_cnt="+str(pro_cnt)+" at "+str(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())))
#time.sleep(sleeptime)
mail_cnt += 1
content_inner2 = ''
if mail_cnt >=2:
content_inner2 = '邮件内容与前面的几封相同,在没有得到问题的答复之前,我会一直如此询问下去.'
content_inner = "\n这是关于此问题的第"+str(mail_cnt)+"封邮件.\n"+content_inner2+"\n\n"+content
try:
self.sendDelay(sleeptime,content_inner)
#send_num -= 1
except:
print("send mail except")
finally:
send_num -= 1
pro_cnt +=1
``` |
{
"source": "256481788jianghao/share_test",
"score": 3
} |
#### File: 256481788jianghao/share_test/main.py
```python
import pandas as pd
import ToolModule as tm
#import datetime
import FilterDataModule as fd
print('初始化开始')
g_all_data = fd.all_data
#g_hs300_data = dataBase.get_hs300_data() #hs300历史数据
print('初始化结束')
"""
#得到某股的上市时间和上市天数
def getToMarketDateAndDays(code):
date_num = share_list_in_market[share_list_in_market.code == code].timeToMarket.iloc[0]
date_str = tm.numToDate(date_num)
dayObjNow = datetime.datetime.now()
dayObjx = datetime.datetime.strptime(date_str,'%Y-%m-%d')
diff = dayObjNow - dayObjx
return [date_str,diff.days]
"""
#得到某一天的数据
def getShareDataByDate(code,date):
return g_all_data[(g_all_data.code == code) & (g_all_data.date == date)]
#得到某一天所用股票的历史数据,剔除当日没有数据的股票
def getAllSharesDataByDate(date):
print("get all shares data of "+date)
return g_all_data[g_all_data.date == date]
#得到某一天的平均和中值换手率以及有交易信息的股票数量
def getTurnover(all_shares):
#all_shares = getAllSharesDataByDate(date)
if not all_shares.empty:
tmp = all_shares.sort_values(by='turnover')
#print(tmp)
else:
return None
num = len(tmp)
mean_data = tmp.mean()
#print(type(tmp.turnover))
mid_turnover = tmp.turnover.iloc[int(num/2)]
mean_turnover = mean_data.turnover
return [mean_turnover,mid_turnover,num]
#得到最近N天价格变化超过p的股票
def getShareListNP(startDate,endDate,P):
def p_change_sum(x):
p_change = x.p_change+100
ans = 1
for p in p_change:
ans = ans * p
return ans
def filterDate(date):
dateNum = tm.dateToNum(date)
return dateNum >= startDate and dateNum <= endDate
lastNData = g_all_data[g_all_data.date.apply(filterDate)]
p_change_data = lastNData.groupby('code').apply(p_change_sum)
p_change_code = p_change_data[p_change_data > P].index
def filterCode(code):
return code in p_change_code
ansData = lastNData[lastNData.code.apply(filterCode)]
return ansData
condition = 'turnover'
ans1 = getShareListNP(20170512,20170516,102)
ans2 = getShareListNP(20170509,20170511,102)
code1 = set(list(ans1.code))
code2 = set(list(ans2.code))
code3 = code1 & code2
print(len(code3)/len(code2))
print(len(code3))
ans4 = ans2[ans2.code.apply(lambda x:x in code3)]
print(ans4[condition].mean())
print(ans4[condition].median())
print(ans4[condition].var())
print("=====================================================================")
ans1 = getShareListNP(20160512,20160516,102)
ans2 = getShareListNP(20160509,20160511,102)
code1 = set(list(ans1.code))
code2 = set(list(ans2.code))
code3 = code1 & code2
print(len(code3)/len(code2))
print(len(code3))
ans4 = ans2[ans2.code.apply(lambda x:x in code3)]
print(ans4[condition].mean())
print(ans4[condition].median())
print(ans4[condition].var())
print("=====================================================================")
ans1 = g_all_data
#ans2 = getShareListNP(20160509,20160511,101)
code1 = set(list(ans1.code))
#code2 = set(list(ans2.code))
#code3 = code1 & code2
#print(len(code3)/len(code2))
print(len(code1))
#ans4 = ans2[ans2.code.apply(lambda x:x in code3)].sort_values(by='daysToMarket')
print(ans1[condition].mean())
print(ans1[condition].median())
print(ans1[condition].var())
"""
#计算从今天到过去30个交易日内的,每天换手率的平均值和中位数
mid_turnover_list = []
mean_turnover_list = []
num_list = []
days_list = []
for day in days30:
info = getTurnover(getAllSharesDataByDate(day))
if info == None:
continue
days_list.append(day)
mid_turnover_list.append(info[1])
mean_turnover_list.append(info[0])
num_list.append(info[2])
ans = pd.DataFrame({"day":days_list,'mean_turnover':mean_turnover_list,'mid_turnover':mid_turnover_list,'num':num_list})
print(ans)
"""
``` |
{
"source": "256481788jianghao/stock_task",
"score": 3
} |
#### File: 256481788jianghao/stock_task/FunctionMgr.py
```python
import pandas as pd
import sqlite3 as sql
from asn1crypto._ffi import null
from pandas.tests.frame.test_sort_values_level_as_str import ascending
class FunctionMgr:
def __init__(self,sql_con):
self.sql_con = sql_con
pass
'''
获取大于某换手率的股票列表
'''
def GetTurnoverRateList(self,rate,start_date,end_date):
data = pd.read_sql_query('select ts_code,trade_date,turnover_rate,turnover_rate_f from daily_basic where trade_date <='+str(end_date)+' and trade_date >='+str(start_date)+' and turnover_rate_f >='+str(rate),self.sql_con)
return data
'''
获取时间段内平均换手率排名列表
'''
def GetTurnoverRateMeanSortList(self,start_date,end_date):
data = self.GetTurnoverRateList(1, start_date, end_date)
group = data.groupby(by = 'ts_code')
def func(item):
tmp = dict()
tmp['mean_rate_f'] = item.turnover_rate_f.mean()
tmp['mean_rate'] = item.turnover_rate.mean()
tmp['mean_count'] = len(item)
return pd.Series(tmp)
ans = group.apply(func)
return (ans)
'''
根据代码查询概念
'''
def GetConceptByCode(self,code):
name_data = pd.read_sql_query('select * from concept_info',self.sql_con)
detail_data = pd.read_sql_query('select * from concept_detail where ts_code = \''+str(code)+'\'', self.sql_con)
name_list = []
for item in detail_data.id:
subdata = name_data[name_data.code == item]
#print(subdata.name.iloc[0])
name_list.append(subdata.name.iloc[0])
#print(name_list)
detail_data['concept_name'] = name_list
return detail_data
'''
概念排名
'''
def GetConceptSortList(self,code_list):
ans_dict=dict()
for stock_code in code_list:
data = self.GetConceptByCode(stock_code)
if not data.empty:
for name in data.concept_name:
if name in ans_dict.keys():
ans_dict[name] = ans_dict[name] + 1
else:
ans_dict[name] = 1
ans_frame = pd.DataFrame()
ans_frame['name'] = ans_dict.keys()
ans_frame['value'] = ans_dict.values()
return ans_frame.sort_values(by='value',ascending=False)
'''
得到某时间段内日数据
'''
def GetDaily(self,start_date,end_date):
data = pd.read_sql_query('select * from daily where trade_date >= '+str(start_date)+' and trade_date <= '+str(end_date),self.sql_con)
if data.empty:
print('GetDaily is empty ['+str(start_date)+'->'+str(end_date)+']')
return data
'''
得到某时间段内平均日价格变化
'''
def GetPctChangeSumList(self,start_date,end_date):
data = self.GetDaily(start_date, end_date)
if data.empty:
print('GetPctChangeSumList data is empty')
group = data.groupby(by='ts_code')
def func(item):
tmp = dict()
p_all = 1
for p in item['pct_change']:
p_all = p_all*(1+p/100)
tmp['sum_pct_change'] = (p_all-1)*100
tmp['sum_count'] = len(item)
return pd.Series(tmp)
ans = group.apply(func)
return ans
'''
获取某时间段内的累计涨幅和换手率
'''
def GetSumPChangeAndMeanTurnoverRateList(self,start_date,end_date):
sum_pctchange_data = self.GetPctChangeSumList(start_date, end_date)
mean_turnover_data = self.GetTurnoverRateMeanSortList(start_date, end_date)
mean_data = pd.merge(left=sum_pctchange_data,right=mean_turnover_data,left_index=True,right_index=True)
return mean_data
'''
获取某时间段内某概念的平均涨幅和还手率
'''
def GetConceptSumPChangeAndMeanTurnoverRateList(self,concept_id,start_date,end_date):
concept_detail_all_data = pd.read_sql_query('select * from concept_detail where id = \''+str(concept_id)+'\'', self.sql_con)
concept_data = concept_detail_all_data[concept_detail_all_data.id == concept_id].set_index('ts_code')
mean_data = self.GetSumPChangeAndMeanTurnoverRateList(start_date, end_date)
merge_data = pd.merge(left=mean_data,right=concept_data,left_index=True,right_index=True)
#print(sum_pctchange_data)
return merge_data
'''
获得换手率与价格浮动的关系表,截止到某日
'''
def GetPctChangeAndTurnoverRateRelationList(self,end_date,al=5,bs=5,bl=10):
date_all_len = al+bl
date_list = pd.read_sql_query('select * from trade_cal where is_open = 1 and cal_date <="'+str(end_date)+'"',self.sql_con)
date_list_all = date_list[-date_all_len:]
pdate = date_list_all.cal_date.iloc[-al]
sdate_bl = date_list_all.cal_date.iloc[0]
sdate_bs = date_list_all.cal_date.iloc[bs]
data_turnover = pd.read_sql_query('select ts_code,trade_date,turnover_rate,turnover_rate_f from daily_basic where trade_date <='+str(end_date)+' and trade_date >='+str(sdate_bl), self.sql_con)
data_daily = pd.read_sql_query('select trade_date,ts_code,pct_change from daily where trade_date >= '+str(sdate_bl)+' and trade_date <= '+str(end_date),self.sql_con)
merge_data = pd.merge(left=data_daily, left_on=['ts_code','trade_date'],right=data_turnover, right_on=['ts_code','trade_date'])
def sum_pct_change(data):
ans_sum = 1
for item in data:
ans_sum = ans_sum * (1+item/100)
return ans_sum
merge_date_group = merge_data.groupby('ts_code')
def g_func(items):
tmp= dict()
data_bl = items[items.trade_date < pdate]
data_bs = items[(items.trade_date <pdate) & (items.trade_date >= sdate_bs)]
data_al = items[items.trade_date >= pdate]
tmp['mean_turnover_f_bl'+str(bl)] = data_bl.turnover_rate_f.mean()
tmp['mean_turnover_f_bs'+str(bs)] = data_bs.turnover_rate_f.mean()
tmp['sum_pchaneg_bl'+str(bl)] = sum_pct_change(data_bl['pct_change'])
tmp['sum_pchaneg_al'+str(al)] = sum_pct_change(data_al['pct_change'])
tmp['data_len'] = len(items)
return pd.Series(tmp)
ans_data = merge_date_group.apply(g_func)
sub_ans_data = ans_data[ans_data.data_len >= date_all_len]
sub_ans_data['t_rate'] = sub_ans_data['mean_turnover_f_bs'+str(bs)]/sub_ans_data['mean_turnover_f_bl'+str(bl)]
sort_data = sub_ans_data.sort_values(by='sum_pchaneg_al'+str(al),ascending=False)
print(sort_data[(sort_data.t_rate > 1.3) & (sort_data.sum_pchaneg_al5 < 1)])
if __name__ == '__main__':
pd.set_option('max_columns', 100)
with sql.connect('stock.db') as con:
mgr = FunctionMgr(con)
mgr.GetPctChangeAndTurnoverRateRelationList(20190401)
#data_concept_mean = mgr.GetSumPChangeAndMeanTurnoverRateList( 20190401, 20190403)
#data_mean = data_concept_mean.sort_values(by='sum_pct_change',ascending=False)
#print(mgr.GetConceptSortList(data_mean[0:21].index))
#data = mgr.GetTurnoverRateMeanSortList(20190120,20190329)
#data_sort = data.sort_values(by='mean_rate_f',ascending=False)
#concept_data = mgr.GetConceptSortList(data_sort[0:21].index)
#print(concept_data)
```
#### File: stock_task/QTPg/StockBasicModle.py
```python
from PyQt5 import QtGui
import sqlite3 as sql
import readdb as rdb
import pandas as pd
class StockBasicModle(object):
'''
classdocs
'''
def __init__(self, tableview):
sql_con = sql.connect('stock.db')
self.stock_basic_data = rdb.read_stock_basic(sql_con)
self.subData = self.stock_basic_data
self.concept_info = rdb.read_concept_info(sql_con)
self.concept_detail = rdb.read_concept_detail(sql_con)
sql_con.close()
stock_basic_len = len(self.stock_basic_data)
self.model=QtGui.QStandardItemModel(stock_basic_len,2)
self.UpdateFilter()
tableview.setModel(self.model)
def Init_Concept_ComboBox(self,ui_comboBox):
length = len(self.concept_info)
ui_comboBox.addItem('all')
for row in range(length):
concept_item = self.concept_info.iloc[row]
#print(type(concept_item['name']))
ui_comboBox.addItem(concept_item['name'])
def ResetStockBasic(self):
self.subData = self.stock_basic_data
def FilterByHk(self,startTime):
print('FilterByHk s='+startTime)
sql_con = sql.connect('stock.db')
data_hk = rdb.read_hk_hold_by_date(sql_con, startTime, startTime)
if type(data_hk) == pd.DataFrame and type(self.subData) == pd.DataFrame:
data_merge = pd.merge(left=data_hk,right=self.subData,on=['ts_code','name'])
self.subData = data_merge
print(self.subData)
else:
pass
sql_con.close()
def FilterByConcept(self,concept_name):
print('FilterByConcept '+concept_name)
if concept_name == 'all':
return
concept_list = self.concept_detail[self.concept_detail['concept_name'] == concept_name]
#print(concept_list)
if len(concept_list) >0 and type(self.subData) == pd.DataFrame:
self.subData = self.subData[self.subData.ts_code.isin(concept_list.ts_code)]
def FilterByIndustry(self,patten):
if type(self.subData) == pd.DataFrame:
print('FilterByIndustry '+patten)
if len(patten) > 0:
def subfun(item):
if item is not None:
return item.find(patten) >= 0
else:
return False
self.subData = self.subData[self.subData.industry.apply(subfun)]
def FilterByName(self,patten):
if type(self.subData) == pd.DataFrame:
print('FilterByName '+patten)
if len(patten) > 0:
def subfun(item):
return item.find(patten) >= 0
self.subData = self.subData[self.subData.name.apply(subfun)]
else:
pass
def UpdateFilter(self,hasKechuangban=True,listdate='20200101'):
if type(self.subData) == pd.DataFrame:
self.model.clear()
self.model.setHorizontalHeaderLabels(['代码','名称','行业'])
if hasKechuangban:
pass
else:
self.subData = self.subData[self.subData.market != '科创板']
self.subData = self.subData[self.subData.list_date <= listdate]
stock_basic_len = len(self.subData)
#print(self.subData)
#print(stock_basic_len)
for row in range(stock_basic_len):
ts_code = self.subData.ts_code.iloc[row]
name = self.subData.name.iloc[row]
industry = self.subData.industry.iloc[row]
item_tscode=QtGui.QStandardItem(ts_code)
item_name=QtGui.QStandardItem(name)
item_industry=QtGui.QStandardItem(industry)
self.model.setItem(row,0,item_tscode)
self.model.setItem(row,1,item_name)
self.model.setItem(row,2,item_industry)
def GetData(self,row):
return self.subData.iloc[row]
```
#### File: 256481788jianghao/stock_task/read4.py
```python
import math
import porting as pt
import pandas as pd
import sqlite3 as sql
import readdb as rdb
import datetime
import matplotlib.pyplot as plt
import scipy.stats as sci_ss
#from tensorflow.python.grappler import item
pd.set_option('max_columns', 100)
sql_con = sql.connect('stock.db')
cursor = sql_con.cursor()
start_date = '20190101'
end_date = '20191113'
now_date = datetime.datetime.now().strftime('%Y%m%d')
try:
data_stock_basic = rdb.read_stock_basic(sql_con)
data_stock_basic = data_stock_basic[data_stock_basic.market != '科创板']
data_stock_basic = data_stock_basic[data_stock_basic.list_date < '20190501']
print(len(data_stock_basic))
data_dailybasic = rdb.read_daily_basic_by_date(sql_con, start_date, end_date)
print(len(data_dailybasic))
data_daily = rdb.read_daily_by_date(sql_con, start_date, end_date)
print(len(data_daily))
data_merge = data_dailybasic.merge(data_daily,on=['trade_date','ts_code'])
data_merge = data_merge.merge(data_stock_basic,on='ts_code')
print(len(data_merge))
data_merge_group = data_merge.groupby(by='ts_code')
def lam_fun(item):
item['id_index'] = range(0,len(item))
pItem = item[(item['pct_change'] > 3)]
sum_num = len(pItem)
collect_num = 0
for index in pItem.id_index:
subitem = item[(item.id_index > index) & (item.id_index <= index+5)]
if len(subitem) == 0:
sum_num = sum_num -1
continue
curItem = item[item.id_index == index]
#print(curItem)
diff_p = (subitem.high - curItem.high.iloc[0])*100/curItem.high.iloc[0]
#print(diff_p)
if len(diff_p[diff_p > 2]) > 1:
collect_num = collect_num + 1
#prod_ans = (subitem['pct_change']/100+1).cumprod()
#print(len(prod_ans[prod_ans > 1]))
#if len(prod_ans[prod_ans > 1]) > 1:
# collect_num = collect_num + 1
return pd.Series({'len':len(item),'sum_n':sum_num,'collect_n':collect_num})
data_ans = data_merge_group.apply(lam_fun)
all_sum = data_ans.sum_n.sum()
all_collect = data_ans.collect_n.sum()
print("sum="+str(all_sum)+'c='+str(all_collect)+" p="+str(all_collect/all_sum))
data_list = []
for i in range(0,all_sum):
if i < all_collect:
data_list.append(1)
else:
data_list.append(0)
stats_ans = sci_ss.ttest_1samp(data_list, 0.5)
print('=========stats_ans======')
print(stats_ans)
except Exception as e:
print("ex:"+str(e))
finally:
print("end execute")
cursor.close()
sql_con.close()
```
#### File: stock_task/UIForm/StockBasicForm.py
```python
from tkinter import ttk
class StockBasicFrom(object):
'''
classdocs
'''
def __init__(self, root):
self.tree=ttk.Treeview(root)#表格
self.tree["columns"]=("姓名","年龄","身高")
self.tree.column("姓名")
self.tree.column("年龄")
self.tree.column("身高")
self.tree.heading("姓名",text="姓名-name") #显示表头
self.tree.heading("年龄",text="年龄-age")
self.tree.heading("身高",text="身高-tall")
self.tree.insert("",0,text="line1" ,values=("1","2","3")) #插入数据,
self.tree.insert("",1,text="line1" ,values=("1","2","3"))
self.tree.insert("",2,text="line1" ,values=("1","2","3"))
self.tree.insert("",3,text="line1" ,values=("1","2","3"))
def pack(self):
self.tree.pack()
def grid(self):
self.tree.grid()
```
#### File: stock_task/UIForm/UpdataForm.py
```python
import tkinter as tk
import tkinter.messagebox
import update as up
import threading
class UpdataForm(tk.Tk):
'''
classdocs
'''
def __init__(self):
super().__init__()
self.title('更新数据库界面')
label1=tk.Label(self,text='开始时间:')
label2=tk.Label(self,text='结束时间:')
self.entry1 = tk.Entry(self)
self.entry2 = tk.Entry(self)
button = tk.Button(self,text='开 始',command=self._update)
label1.grid(row=0,column=0)
label2.grid(row=1,column=0)
self.entry1.grid(row=0,column=1)
self.entry2.grid(row=1,column=1)
button.grid(row=2,columnspan=2)
def _update(self):
stime = self.entry1.get()
etime = self.entry2.get()
if len(stime) == 0 or len(etime) == 0:
tk.messagebox.askokcancel('温馨提示', "stime etime is null")
return
t = threading.Thread(target=self._updateThreadFun,args=(stime,etime))
t.daemon = True
t.start()
def _updateThreadFun(self,starttime,endtime):
print('update s='+starttime+' e='+endtime)
up.UpdateFunction(starttime, endtime)
if __name__ == '__main__':
form = UpdataForm()
form.mainloop()
``` |
{
"source": "2567910/tmm",
"score": 2
} |
#### File: apps/translation_management_tool/signals.py
```python
from django.db.models.signals import post_save, post_delete, m2m_changed
from django.dispatch import receiver
from tmm.apps.translation_management_tool.models import Translation, TranslationKey, Project, Language
import logging
LOGGER = logging.getLogger(__name__)
# If a new translation key is added, create a new translation for each language
@receiver(post_save, sender=TranslationKey)
def create_translations_for_langs (sender, instance, created, **kwargs):
LOGGER.info('In create_translations_for_langs signal')
if created:
for language in instance.project.languages.all():
Translation.objects.create(language=language, key=instance)
# If a translation key is deleted, delete all translations for that key
@receiver(post_delete, sender=TranslationKey)
def delete_translations_for_langs (sender, instance, **kwargs):
LOGGER.info('delete_translations_for_langs')
for language in instance.project.languages.all():
data_to_be_deleted = Translation.objects.filter(key = instance, language=language)
data_to_be_deleted.delete()
# If a new language is added to a project then we need to create a translation for each translation key
@receiver(m2m_changed, sender=Project.languages.through)
def create_translations_for_new_lang (sender, instance, pk_set, action, **kwargs):
LOGGER.info('create_translations_for_new_lang')
for changed_id in pk_set:
all_keys_in_project = TranslationKey.objects.filter(project = instance)
changed_lang = Language.objects.get(id = changed_id)
for key in all_keys_in_project:
if action == "pre_remove":
tranlsation_to_be_removed = Translation.objects.get(key = key, language = changed_lang)
tranlsation_to_be_removed.delete()
if action == "pre_add":
Translation.objects.create(language= changed_lang, key = key)
```
#### File: translation_management_tool/tests/test_api.py
```python
from django.test import TestCase
import logging
from django.urls import reverse
from tmm.apps.translation_management_tool.models import Language, Project, Translation, TranslationKey
LOGGER = logging.getLogger(__name__)
class ApiTest(TestCase):
fixtures = ['project.json']
TEST_I18N = {
"title": "Test-Titel",
"common": {
"actions": {
"save": "Speichern"
},
"ok": "OK"
}
}
def test_json_output(self):
project = Project.objects.first()
self.assertIsNotNone(project)
lang_de = Language.objects.get(code='de')
key_title = TranslationKey.objects.create(key='title', project=project)
self.assertEqual(Translation.objects.count(), 2, Translation.objects.values())
Translation.objects.filter(key=key_title, language=lang_de).update(value='Test-Titel')
key_save = TranslationKey.objects.create(key='common.actions.save', project=project)
Translation.objects.filter(key=key_save, language=lang_de).update(value='Speichern')
key_ok = TranslationKey.objects.create(key='common.ok', project=project)
Translation.objects.filter(key=key_ok, language=lang_de).update(value='OK')
response = self.client.get(reverse('i18next_json', kwargs={'project': project.name, 'lang': 'de'}))
self.assertEqual(response.status_code, 200)
data = response.json()
LOGGER.debug('>>>>>>>>>> expected %s', self.TEST_I18N)
LOGGER.debug('>>>>>>>>>> actual %s', data)
self.assertDictEqual(data, self.TEST_I18N)
``` |
{
"source": "2581676612/python",
"score": 4
} |
#### File: python/algorithms/algorithm_sorting.py
```python
def bubble_sort():
"""
冒泡排序:
n个元素,循环n-1轮,
每一轮,比较n-i次,选出最大值
"""
L = [9, 8, 7, 6, 5, 4, 3, 2, 1]
n = len(L)
for i in range(1, n): # 比较n-1轮
# print n - i
for j in range(n - i): # 每i轮比较n-i次,选出最大值
if L[j] > L[j + 1]:
L[j], L[j + 1] = L[j + 1], L[j]
print L
def insertion_sort():
"""
插入排序算法:
原序列:[2,3,1,34,5,6,11,7,8]
下标从0开始:
第一次:取下标1 和下标[:1]比较
第二次:取下标2 和下标[:2]比较
。。。
第n-1次:取下标n-1(注意此时的元素已经是最后一个元素了)和[:n-1]比较
共比较n-1次
"""
L = [9, 8, 7, 5, 6, 4, 3, 2, 1]
n = len(L)
for i in range(n - 1):
for j in range(i + 1): # 因为下标从0开始,所以第i次,对应的数据位置要 i+1表示当前下标位置
# print i+1,'-',j
if L[i + 1] < L[j]: L[i + 1], L[j] = L[j], L[i + 1]
print L
def selection_sort():
"""
选择排序算法:
每次找出最小元素,放置到序列头部,循环序列
第一次:找出最小放到下标0
第二次:在剩余找出最小放到下标1
。。。
第n-1次
"""
L = [5, 4, 3, 2, 1, 0, -77]
n = len(L)
for i in range(n - 1):
for j in range(i + 1, n):
# print i,'-',j
if L[i] > L[j]: L[i], L[j] = L[j], L[i]
print L
```
#### File: mysql/mysqldb_/study_mysqldb.py
```python
import MySQLdb
import logging
from contextlib import closing
"""
# # 1.创建数据库的连接
# conn = MySQLdb.connect(host='localhost', port=3306, user='root',
# passwd='<PASSWORD>', db='iaasms_dev', )
#
# # 2.创建游标
# cur = conn.cursor(MySQLdb.cursors.DictCursor)
#
# # 3.通过游标cur 操作execute()方法可以写入纯sql语句对数据进行操作
# sql = '''
# SELECT a.name AS snapshot_name, b.name AS volume_name
# FROM snapshot a INNER JOIN volume b
# ON a.volume_id=b.volume_id
# '''
# count = cur.execute(sql) # 返回总条数
# # result = cur.fetchmany(count) # 返回指定条目的结果集
# result = cur.fetchall()
# # 4.关闭游标
# cur.close()
#
# # 5.提交事务,必须要有这个方法,否则数据不会被真正的插入。
# conn.commit()
#
# # 6.关闭连接
# conn.close()
#
# # ************打印***********
# print result
# 一次插入多条记录,,返回值为受影响的行数。
# sql="insert into student values(%s,%s,%s,%s)"
# cur.executemany(sql,[
# ('3','Tom','1 year 1 class','6'),
# ('3','Jack','2 year 1 class','7'),
# ('3','rick','2 year 2 class','7'),
# ])
# *******************close conn***************************
from contextlib import closing
import MySQLdb
''' At the beginning you open a DB connection. Particular moment when
you open connection depends from your approach:
- it can be inside the same function where you work with cursors
- in the class constructor
- etc
'''
db = MySQLdb.connect("host", "user", "pass", "database")
with closing(db.cursor()) as cur:
cur.execute("somestuff")
results = cur.fetchall()
# do stuff with results
cur.execute("insert operation")
# call commit if you do INSERT, UPDATE or DELETE operations
db.commit()
cur.execute("someotherstuff")
results2 = cur.fetchone()
# do stuff with results2
# at some point when you decided that you do not need
# the open connection anymore you close it
db.close()
"""
# 创建名为MySQL的日志
logger = logging.getLogger('MySQL')
# 设置logger的level为DEBUG
logger.setLevel(logging.DEBUG)
# 创建一个输出日志到控制台的StreamHandler
handler = logging.StreamHandler()
formatter = logging.Formatter('[%(asctime)s] %(name)s:%(levelname)s: %(message)s')
handler.setFormatter(formatter)
# 给logger添加上handler
logger.addHandler(handler)
class _Closing(closing):
def __exit__(self, *exc_info):
if self.thing:
self.thing.close()
class MySQLUtils(object):
def __init__(self, *args, **kwargs):
"""
:param args:
:param kwargs:
"""
for k, v in kwargs.iteritems():
setattr(self, k, v)
self.__args = args
self.__kwargs = kwargs
self.__connection = None
self.__cursor = None
def __enter__(self):
"""
打开资源,支持with语法
:return: MySQLUtils instance
"""
self.open()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""
关闭资源,支持with语法
:param exc_type:
:param exc_val:
:param exc_tb:
:return:
"""
self.close()
if exc_tb:
# 默认返回None, 返回None或False 发生异常交由外部调用程序捕获(建议)
# 如果返回True,则由该函数内部处理,外部调用会继续执行
logger.error('[%s]%s' % (exc_type, exc_val))
def open(self):
"""
打开连接
:return:
"""
if self.__connection:
raise MySQLdb.MySQLError("connection already connected.")
self.__connection = MySQLdb.connect(*self.__args, **self.__kwargs)
if self.__cursor:
raise MySQLdb.MySQLError("cursor already opened.")
self.__cursor = self.__connection.cursor(MySQLdb.cursors.DictCursor)
logger.info("connection opened.")
def close(self):
"""
关闭连接
:return:
"""
with _Closing(self.__cursor) as _:
pass
with _Closing(self.__connection) as _:
pass
self.__cursor = None
self.__connection = None
logger.info("connection close success.")
def __execute(self, sql, commit=False):
"""
执行SQL
:param sql:
:param commit:
:return:tuple result or row numbers
"""
if not (self.__connection and self.__cursor):
raise MySQLdb.MySQLError("connection already closed.")
count = self.__cursor.execute(sql) # 返回总条数
result = self.__cursor.fetchall()
self.__connection.commit() if commit else None
return count if commit else result
def select(self, sql, formatter_func=None):
"""
查询函数
:param sql:
:param formatter_func:格式化函数
:return:
"""
if formatter_func:
return map(formatter_func, self.__execute(sql))
return self.__execute(sql)
def save_or_update(self, sql):
"""
编辑或修改
:param sql:
:return:row numbers
"""
return self.__execute(sql, True)
def delete(self, sql):
"""
删除资源
:param sql:
:return: row numbers
"""
return self.__execute(sql, True)
if __name__ == '__main__':
mu = MySQLUtils(host='localhost', port=3306, user='root',
passwd='<PASSWORD>', db='iaasms_dev')
import datetime
def formatter_datetime(dic):
for k, v in dic.iteritems():
if isinstance(v, datetime.datetime):
dic[k] = str(v)
return dic
# 1. try-finally
# try:
# mu.open()
# # raise Exception('异常')
# print mu.select('SELECT * FROM flavor', formatter_datetime)
# print mu.delete('DELETE FROM flavor WHERE id=42')
# finally:
# mu.close()
# 2. with
with mu as mu:
mu.close()
# raise Exception('异常')
print mu.select('SELECT * FROM flavor', formatter_datetime)
print mu.delete('DELETE FROM flavor WHERE id=42')
print getattr(mu, 'host'), getattr(mu, 'port'), getattr(mu, 'no', None)
```
#### File: mysql/sqlalchemy_/study_sqlalchemy.py
```python
import time
from sqlalchemy import (
Table, Column, MetaData, create_engine)
from sqlalchemy.engine.result import ResultProxy
from sqlalchemy.sql.sqltypes import (
Unicode, INTEGER)
url = 'mysql+mysqldb://{user}:{pwd}@{host}:{port}/{db_name}?charset=utf8'.format(
user='root',
pwd='<PASSWORD>',
host='localhost',
port='3306',
db_name='iaasms'
)
# pool_recycle=3600 连接超时参数
engine = create_engine(url)
table = Table(
'tom_test', MetaData(),
Column('id', INTEGER, primary_key=True),
Column('start_time', INTEGER, index=False),
Column('last_time', INTEGER, nullable=False),
Column('count', INTEGER, nullable=False),
Column('region', Unicode(20, _warn_on_bytestring=False))
)
# 创建表
table.create(engine, True)
def _formatter_data(res):
"""
sqlalchemy.engine.result.ResultProxy 对象数据提取
res.cursor._rows # 数据
res._metadata.keys 或 res.cursor.description # 数据库表字段名
:param res:
:return: list
"""
assert isinstance(res, ResultProxy)
assert res.returns_rows
rows = []
for _row in res.cursor._rows:
row = {}
for index, column in enumerate(res._metadata.keys):
row[column] = _row[index]
rows.append(row)
return rows
def _execute_success(res):
"""
sqlalchemy.engine.result.ResultProxy 数据库修改状态
res.returns_rows # 是否返回数据
res.rowcount 是否执行成功 1 success,0 error
:param res:
:return: boolean
"""
assert isinstance(res, ResultProxy)
return res.rowcount > 0
def insert():
# 插入
# sqlalchemy.exc.IntegrityError 主键冲突异常
sql = table.insert().values(**{
'id': 2,
'start_time': time.time(),
'last_time': time.time(),
'count': 1,
'region': 'test'
})
res = engine.execute(sql)
print _execute_success(res)
def select():
# 查询
sql = table.select().where(table.c.id == 2)
res = engine.execute(sql)
print _formatter_data(res)
def update():
# 修改
sql = table.update().where(table.c.id == 1).values(count=9)
res = engine.execute(sql)
print _execute_success(res)
def delete():
sql = table.delete().where(table.c.id == 2)
res = engine.execute(sql)
print _execute_success(res)
```
#### File: crawlers/spider/writer.py
```python
import sys
reload(sys)
sys.setdefaultencoding('utf8')
class FileWriter(object):
def __init__(self, file_name=None):
self.file_name = file_name or 'data.txt'
self._data = []
def load_data(self, data):
if not data:
return
self._data.append(data)
def writer(self):
f = open(self.file_name, 'wb+')
[f.write('%s\n\n' % d) for d in self._data]
f.close()
```
#### File: study_numpy/numpy_functions/np_dot.py
```python
import numpy as np
"""
>>> import numpy as np
Examples
--------
>>> np.random.rand(3,2)
array([[ 0.14022471, 0.96360618], #random
[ 0.37601032, 0.25528411], #random
[ 0.49313049, 0.94909878]]) #random
>>> np.dot(3, 4)
12
Neither argument is complex-conjugated:
>>> np.dot([2j, 3j], [2j, 3j])
(-13+0j)
For 2-D arrays it is the matrix product:
>>> a = [[1, 0], [0, 1]]
>>> b = [[4, 1], [2, 2]]
>>> np.dot(a, b)
array([[4, 1],
[2, 2]])
>>> a = np.arange(3*4*5*6).reshape((3,4,5,6))
>>> b = np.arange(3*4*5*6)[::-1].reshape((5,4,6,3))
>>> np.dot(a, b)[2,3,2,1,2,2]
499128
>>> sum(a[2,3,2,:] * b[1,2,:,2])
499128
"""
# ############################### 一维 ###############################
"""
参数个数相同:
"""
print np.dot(3, 4) # 3*4 -> 12
print np.dot([1, 2, 3], [4, 5, 6]) # 1 * 4 + 2 * 5 + 3 * 6 -> 32
"""
参数列表不同(短的参数元素个数只能为1,且不能为列表[]类型):
如:
>>> np.dot([1, 2, 3], [4, 5])
ValueError: shapes (3,) and (2,) not aligned: 3 (dim 0) != 2 (dim 0)
>>> np.dot([1, 2, 3], [4])
ValueError: shapes (3,) and (1,) not aligned: 3 (dim 0) != 1 (dim 0)
>>> np.dot([1, 2, 3], 4)
[ 4 8 12]
"""
print np.dot([1, 2, 3], 4) # [1*4,2*4,3*4] -> [ 4 8 12]
# ############################### 二维 ###############################
"""
参数个数相同:
计算过程:
第一轮:
1. A中取第一个元素[x1, y1]
B中取各个元素中的第一个值[m1, m2]
矩阵相乘-> x1*m1+y1*m2
2. A中取第一个元素[x1, y1]
B中取各个元素中的第二个值[n1, n2]
矩阵相乘-> x1*n1+y1*n2
--> [[ 77 110]]
第二轮:
1. A中取第二个元素[x2, y2]
B中取各个元素中的第一个值[m1, m2]
矩阵相乘-> x2*m1+y2*m2
2. A中取第二个元素[x2, y2]
B中取各个元素中的第二个值[n1, n2]
矩阵相乘-> x2*n1+y2*n2
--> [[ 77 110] [165 242]]
"""
x1, y1 = 1, 2
x2, y2 = 3, 4
m1, n1 = 11, 22
m2, n2 = 33, 44
A = [[x1, y1], [x2, y2]] # 行
B = [[m1, n1], [m2, n2]] # 列
print np.dot(A, B)
# [[ 77 110]
# [165 242]]
print '测试计算过程:'
print x1 * m1 + y1 * m2, x1 * n1 + y1 * n2 # 77 110
print x2 * m1 + y2 * m2, x2 * n1 + y2 * n2 # 165 242
def my_dot_w2(a, b):
# 判断是否为列表
if isinstance(a, list) and isinstance(b, list):
assert len(a) == len(b)
l1, l2 = a, b
result = []
if isinstance(l1[0], list): # 判断是否为多维数组
size = len(l1)
for index, value in enumerate(l1):
start, cell = 0, []
while start < size:
cell.append(my_dot_w2(value, map(lambda x: x[start], l2)))
start += 1
result.append(cell)
return result
else: # 一维数组
return sum(map(lambda j: l1[j] * l2[j], xrange(len(l1))))
# 以下为数字与数组的矩阵算法,找出集合
elif isinstance(a, list) and isinstance(b, int):
return map(lambda x: x * b, a)
elif isinstance(b, list) and isinstance(a, int):
return map(lambda x: x * a, b)
# 都为数字的算法
elif isinstance(a, int) and isinstance(b, int):
return a * b
# 其他情况抛出异常
else:
raise Exception('params must be "list or int"!')
print '**' * 50
print my_dot_w2([1, 2], 3) # 1*3,2*3 = [3, 6]
print np.dot([1, 2], 3)
print my_dot_w2(3, [1, 2]) # 3*1,3*2 = [3, 6]
print np.dot(3, [1, 2])
print my_dot_w2([1, 2], [3, 4]) # 1*3+2*4 = 11
print np.dot([1, 2], [3, 4])
print my_dot_w2(A, B)
print np.dot(A, B)
```
#### File: oslo_/config/config_test.py
```python
from oslo_config import cfg
from oslo_config import types
# 端口规范
PortType = types.Integer(1, 65535)
# 多个配置项组成一个模式
default_opts = [
cfg.StrOpt('bind_host',
default='0.0.0.0',
help='IP address to listen on.'),
cfg.Opt('bind_port', # 只有Opt类型才能指定PortType
type=PortType,
default=9292,
help='Port number to listen on.')
]
# 单个配置项模式
default_opt = cfg.ListOpt('enabled_api',
default=['ec2', 'api_compute'],
help='List of APIs to enable by default.')
# 命令行选项
cli_opts = [
cfg.BoolOpt('verbose',
short='v',
default=False,
help='Print more verbose output'),
cfg.BoolOpt('debug',
short='d',
default=False,
help='Print debugging output'),
]
# 配置 rabbit_group 组
rabbit_group = cfg.OptGroup(
name='RABBIT',
title='RABBIT options'
)
# 配置组中的模式,通常以配置组的名称为前缀(非必须)
rabbit_opt = cfg.BoolOpt('ssl',
default=False,
help='use ssl for connection')
# 配置组中的多配置项模式
rabbit_opts = [
cfg.StrOpt('host',
default='localhost',
help='IP/hostname to listen on.'),
cfg.IntOpt('port',
default=5672,
help='Port number to listen on.')
]
def register_default_opts(conf):
"""
注册默认组的配置项
"""
conf.register_opt(default_opt)
conf.register_opts(default_opts)
def register_rabbit_group(conf):
"""
注册 rabbit 信息
"""
# 配置组必须在其组件被注册前注册!
conf.register_group(rabbit_group)
# 注册配置组中含有多个配置项的模式,必须指明配置组
conf.register_opts(rabbit_opts, rabbit_group)
# 注册配置组中的单配置项模式,指明配置组
conf.register_opt(rabbit_opt, rabbit_group)
def register_cli_opts(conf):
"""
注册 cli 选项
:param conf:
:return:
"""
conf.register_cli_opts(cli_opts)
def get_bind_host(conf):
"""
使用选项 bind_host
"""
return getattr(conf, 'bind_host', None)
def get_bind_port(conf):
"""
使用选项 bind_port
"""
return conf.bind_port
def get_rabbit_username(conf):
"""
配置文件中存在,代码没有注册,不能解析
"""
return conf.RABBIT.username
if __name__ == '__main__':
# 创建配置类
config = cfg.CONF
# 开始注册default
register_default_opts(config)
register_rabbit_group(config)
register_cli_opts(config)
# 加载配置文件
config(default_config_files=['config.conf'])
print 'host:', get_bind_host(config)
# list_all_sections
for section in config.list_all_sections():
print section
print config.RABBIT
print config.RABBIT.host
print get_rabbit_username(config)
```
#### File: i18n/i18n_app/_i18n.py
```python
import oslo_i18n
DOMAIN = "i18n_app"
_translators = oslo_i18n.TranslatorFactory(domain=DOMAIN)
# The primary translation function using the well-known name "_"
_ = _translators.primary
# The contextual translation function using the name "_C"
# requires oslo.i18n >=2.1.0
_C = _translators.contextual_form
# The plural translation function using the name "_P"
# requires oslo.i18n >=2.1.0
_P = _translators.plural_form
# Translators for log levels.
#
# NOTE(dhellmann): This is not needed for new projects as of the
# Pike series.
#
# The abbreviated names are meant to reflect the usual use of a short
# name like '_'. The "L" is for "log" and the other letter comes from
# the level.
_LI = _translators.log_info
_LW = _translators.log_warning
_LE = _translators.log_error
_LC = _translators.log_critical
def get_available_languages():
"""
返回当前可以提供翻译的语言列表
#所有的语言包在 /usr/local/lib/python2.7/dist-packages/babel/locale-data/
:return:
"""
return oslo_i18n.get_available_languages(DOMAIN)
def translate(msg, user_locale='zh_CN'):
"""
翻译"msg"为指定的语言,默认"en_US"
:param msg: the object to translate
:param user_locale: the locale to translate the message to, if None the
default system locale will be used
'en_US' 'zh_CN'
:returns: the translated object in unicode, or the original object if
it could not be translated
"""
return oslo_i18n.translate(msg, user_locale)
def enable_lazy(enable=True):
return oslo_i18n.enable_lazy(enable)
```
#### File: services/registry_discovery/service01.py
```python
import rpyc
from rpyc.utils.server import ThreadedServer
class MyService(rpyc.Service):
def on_connect(self):
pass
def on_disconnect(self):
pass
@classmethod
def exposed_get_answer(cls):
return 66
@classmethod
def get_question(cls):
return "what is the airspeed velocity of an unladen swallow?"
if __name__ == "__main__":
t = ThreadedServer(MyService, port=18861)
print """
service start ok! port {port}
""".format(port=18861)
t.start()
```
#### File: examples/executors/process_pool.py
```python
from datetime import datetime
import os
from apscheduler.schedulers.blocking import BlockingScheduler
def tick():
print('Tick! The time is: %s' % datetime.now())
if __name__ == '__main__':
scheduler = BlockingScheduler()
scheduler.add_executor('processpool')
scheduler.add_job(tick, 'interval', seconds=3)
print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C'))
try:
scheduler.start()
except (KeyboardInterrupt, SystemExit):
pass
```
#### File: examples/jobstores/job_store.py
```python
from datetime import datetime, timedelta
import sys
import os
from apscheduler.schedulers.blocking import BlockingScheduler
def alarm(time):
print('Alarm! This alarm was scheduled at %s.' % time)
if __name__ == '__main__':
scheduler = BlockingScheduler()
# ################ mongodb
scheduler.add_jobstore('mongodb', collection='example_jobs')
if len(sys.argv) > 1 and sys.argv[1] == '--clear':
scheduler.remove_all_jobs()
alarm_time = datetime.now() + timedelta(seconds=10)
scheduler.add_job(alarm, 'date', run_date=alarm_time, args=[datetime.now()])
print('To clear the alarms, run this example with the --clear argument.')
print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C'))
try:
scheduler.start()
except (KeyboardInterrupt, SystemExit):
pass
```
#### File: python/standard_library/study_httplib.py
```python
import httplib
import urllib
def request_get(host, port, url, params=None, headers=None, timeout=5):
status, http_clint, data = None, None, None
try:
http_clint = httplib.HTTPConnection(host, port, timeout=timeout)
url = url + urllib.urlencode(params or {})
http_clint.request('GET', url, headers=headers or {})
response = http_clint.getresponse()
status = response.status
data = response.read()
except Exception, e:
print e
finally:
if http_clint:
http_clint.close()
return status, data
def request_post(host, port, url, body=None, headers=None, timeout=5):
status, http_clint, data = None, None, None
try:
http_clint = httplib.HTTPConnection(host, port, timeout=timeout)
http_clint.request('POST', url, body, headers)
response = http_clint.getresponse()
status = response.status
data = response.read()
except Exception, e:
print 'http post error :{0}'.format(e)
finally:
if http_clint:
http_clint.close()
return status, data
```
#### File: python/standard_library/study_logging.py
```python
import logging
import sys
FORMAT_STR = "[%(asctime)s] %(name)s:%(levelname)s: %(message)s"
# logger = logging.getLogger("django")
# logger.debug(logging.DEBUG) # 使用django热加载
def config1():
"""
**********************Config 1**********************
"""
# config 1.
# 设置默认的level为DEBUG
# 设置log的格式
# 注意:basicConfig有一个 很大的缺点。
# 调用basicConfig其实是给root logger添加了一个handler,
# 这样当你的程序和别的使用了 logging的第三方模块一起工作时,
# 会影响第三方模块的logger行为。这是由logger的继承特性决定的。
logging.basicConfig(
level=logging.DEBUG,
format="[%(asctime)s] %(name)s:%(levelname)s: %(message)s"
)
# 记录log
logging.debug('debug')
logging.info('info')
logging.warn('warn')
logging.error('error')
logging.critical('critical')
def config2():
"""
********************Config 2************************
"""
# # config 2
# 使用一个名字为fib的logger
logger = logging.getLogger('app_name')
# 设置logger的level为DEBUG
logger.setLevel(logging.DEBUG)
# 创建一个输出日志到控制台的StreamHandler
handler = logging.StreamHandler()
formatter = logging.Formatter('[%(asctime)s] %(name)s:%(levelname)s: %(message)s')
handler.setFormatter(formatter)
# 给logger添加上handler
logger.addHandler(handler)
logger.debug('debug message')
logger.info('hello world')
def config3():
"""
config3 输出到文件
"""
# 获取logger实例,如果参数为空则返回root logger
logger = logging.getLogger("AppName")
# 指定logger输出格式
formatter = logging.Formatter(FORMAT_STR)
# 文件日志
file_handler = logging.FileHandler("test.log")
file_handler.setFormatter(formatter) # 可以通过setFormatter指定输出格式
# 控制台日志
console_handler = logging.StreamHandler(sys.stdout)
console_handler.formatter = formatter # 也可以直接给formatter赋值
# 为logger添加的日志处理器,可以自定义日志处理器让其输出到其他地方
logger.addHandler(file_handler)
logger.addHandler(console_handler)
# 指定日志的最低输出级别,默认为WARN级别
logger.setLevel(logging.INFO)
# 输出不同级别的log
logger.debug('this is debug info')
logger.info('this is information')
logger.warn('this is warning message')
logger.error('this is error message')
logger.fatal('this is fatal message, it is same as logger.critical')
logger.critical('this is critical message')
# if __name__ == '__main__':
```
#### File: standard_library/threads/rethread.py
```python
import threading
class ReThread(threading.Thread):
def __init__(self, *args, **kwargs):
super(ReThread, self).__init__(*args, **kwargs)
self.__flag = threading.Event() # 用于暂停线程的标识
self.__flag.set() # 设置为True
self.__running = threading.Event() # 用于停止线程的标识
self.__running.set() # 将running设置为True
@property
def is_running(self):
"""
获取运行标志
:return: True/False
"""
return self.__running.isSet()
def run(self):
"""
使用while 循环,使用self.is_running 来获取运行标志位
"""
pass
def stop(self):
"""
设置为False, 让线程阻塞
"""
self.__flag.clear()
def resume(self):
"""
设置为True, 让线程停止阻塞
"""
self.__flag.set()
def exit(self):
"""
暂停标志设置为True
运行标志设置为False
"""
self.__flag.set()
self.__running.clear()
if __name__=="__main__":
rt= ReThread()
rt.start()
print '111'
# rt.join()
``` |
{
"source": "2585157341/RFBNet-master_Chinese_note",
"score": 2
} |
#### File: layers/functions/detection.py
```python
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
from torch.autograd import Function
from torch.autograd import Variable
from utils.box_utils import decode
# test_RFB.py中被调用,生成的detector对象,结合net输出的out(forward中拆分为loc + conf,结合RFBNet结构,
# 可以发现是一个全卷积的feature map,可以结合multibox函数理解),conf就是预测的分类得分,
# loc其实就是预定义位置、尺度、长宽比的anchor(也称为prior box)的offsets,再结合anchor + loc offsets,就可以得到最终预测的结果了
# 至于之后的NMS、top-k阈值抑制,都是后操作,不属于Detect里做的工作
class Detect(Function):
"""At test time, Detect is the final layer of SSD. Decode location preds,
apply non-maximum suppression to location predictions based on conf
scores and threshold to a top_k number of output predictions for both
confidence score and locations.
"""
def __init__(self, num_classes, bkg_label, cfg):
self.num_classes = num_classes
self.background_label = bkg_label
self.variance = cfg['variance']
def forward(self, predictions, prior):
"""
Args:
loc_data: (tensor) Loc preds from loc layers
Shape: [batch,num_priors*4]因为4个位置坐标
conf_data: (tensor) Shape: Conf preds from conf layers
Shape: [batch*num_priors,num_classes]
prior_data: (tensor) Prior boxes and variances from priorbox layers
Shape: [1,num_priors,4]
"""
loc, conf = predictions # test_RFB.py内调用net,通过RFBNet输出的(loc, conf)
loc_data = loc.data #查看multibox_loss.py就可以知道loc,conf,prior的输出shape,loc shape: (batch_size,num_priors,4)
conf_data = conf.data #conf shape: (batch_size,num_priors,num_classes)
prior_data = prior.data #priors shape: (num_priors,4)
num = loc_data.size(0) # batch_size
self.num_priors = prior_data.size(0) # 预定义的anchor个数,如SSD,指的是特征金字塔上所有检测分支feature map上的anchor
self.boxes = torch.zeros(1, self.num_priors, 4) # 对应batch_size = 1,4表示bbox(x1, y1, x2, y2)坐标
self.scores = torch.zeros(1, self.num_priors, self.num_classes) # 对应bbox类别,如VOC 20 + 1类,batch size = 1
if loc_data.is_cuda:
self.boxes = self.boxes.cuda()
self.scores = self.scores.cuda()
if num == 1:
# size batch×num_classes×num_priors
conf_preds = conf_data.unsqueeze(0) # batch size = 1,维度规整化(添加了一个维度)
else:
conf_preds = conf_data.view(num, num_priors,
self.num_classes) # 因为有batch张图像,所以reshape成这样
self.boxes.expand_(num, self.num_priors, 4) #batch_size不为1时扩展为num
self.scores.expand_(num, self.num_priors, self.num_classes)
# Decode predictions into bboxes.
for i in range(num): # for each detected image
# Decode locations from predictions using priors to undo the encoding we did for offset regression
#decode函数在utils文件夹下的box_utils程序里
decoded_boxes = decode(loc_data[i], prior_data, self.variance)
conf_scores = conf_preds[i].clone()
self.boxes[i] = decoded_boxes
self.scores[i] = conf_scores
return self.boxes, self.scores
``` |
{
"source": "2594306528/semi-supervised-for-captcha",
"score": 3
} |
#### File: 2594306528/semi-supervised-for-captcha/models.py
```python
import torch.nn as nn
from layers import CNN, Encoder, RNNAttnDecoder
class CNNSeq2Seq(nn.Module):
def __init__(self, vocab_size):
super(CNNSeq2Seq, self).__init__()
self.backbone = CNN()
self.encoder = Encoder()
self.decoder = RNNAttnDecoder(vocab_size=vocab_size)
def forward(self, x, y, is_training):
out = self.backbone(x)
encoder_outputs, encoder_hidden = self.encoder(out)
vocab_out = self.decoder(y, encoder_outputs, encoder_hidden, is_training)
return vocab_out
def forward_2(self, x, max_len):
out = self.backbone(x)
encoder_outputs, encoder_hidden = self.encoder(out)
vocab_out = self.decoder.forward_2(encoder_outputs, encoder_hidden, max_len)
return vocab_out
```
#### File: 2594306528/semi-supervised-for-captcha/util.py
```python
import torch.nn as nn
from timm.loss import LabelSmoothingCrossEntropy
import torch.nn.functional as F
class Seq2SeqLoss(nn.Module):
def __init__(self):
super(Seq2SeqLoss, self).__init__()
self.criterion = LabelSmoothingCrossEntropy()
def forward(self, outputs, y):
"""
outputs: [batch_size, max_len-1, vocab_size]
y: [batch_size, max_len]
"""
max_len = y.size(1)
return sum([self.criterion(outputs[:, i, :], y[:, i + 1]) for i in range(max_len - 1)]) / (max_len - 1)
class ClassLoss(nn.Module):
def __init__(self):
super(ClassLoss, self).__init__()
self.criterion = nn.CrossEntropyLoss(ignore_index=-1)
def forward(self, outputs, y):
"""
outputs: [batch_size, max_len-1, vocab_size]
y: [batch_size, max_len]
"""
max_len = y.size(1)
return sum([self.criterion(outputs[:, i, :], y[:, i + 1]) for i in range(max_len - 1)])
class ConsistentLoss(nn.Module):
def __init__(self):
super(ConsistentLoss, self).__init__()
def forward(self, outputs, outputs_ema):
"""
outputs: [batch_size, max_len-1, vocab_size]
outputs_ema: [batch_size, max_len-1, vocab_size]
"""
batch_size = outputs.size(0)
max_len = outputs.size(1) + 1
num_classes = outputs.size(2)
loss = 0
for i in range(max_len-1):
input_logits = outputs[:, i, :]
target_logits = outputs_ema[:, i, :]
input_softmax = F.softmax(input_logits, dim=1)
target_softmax = F.softmax(target_logits, dim=1)
loss += F.mse_loss(input_softmax, target_softmax, size_average=False) / num_classes
return loss / batch_size
def compute_seq_acc(outputs, y, batch_size, max_len):
"""
outputs: [batch_size, max_len-1, vocab_size]
y: [batch_size, max_len]
"""
num_eq = (y[:, 1:].data == outputs.max(2)[1]).sum(dim=1)
accuracy_clevel = num_eq.sum() / batch_size / (max_len - 1)
accuracy_all = (num_eq == max_len - 1).sum() / batch_size
return accuracy_clevel, accuracy_all
``` |
{
"source": "25A0/MustacheDonkeyPants",
"score": 3
} |
#### File: MustacheDonkeyPants/tests/test_mdpants.py
```python
import pytest
from mdpants import mdpants
import argparse
import binascii
# Test that parse_args fails when mandatory elements are missing
def test_fails_without_arguments():
with pytest.raises(SystemExit):
mdpants.parse_args([])
def test_just_random_succesds():
assert mdpants.parse_args(['-R'])
def test_single_file_succesds():
assert mdpants.parse_args(['existing_file'])
def test_random_and_file_fails():
with pytest.raises(SystemExit):
assert mdpants.parse_args(['-R', 'existing_file'])
def test_infile_and_binary_fails():
with pytest.raises(SystemExit):
assert mdpants.parse_args(['--in existing_file', '--bin existing_file'])
def test_infile_needs_argument():
with pytest.raises(SystemExit):
assert mdpants.parse_args(['--in'])
def test_binfile_needs_argument():
with pytest.raises(SystemExit):
assert mdpants.parse_args(['--bin'])
def test_word_count():
assert mdpants.len_wordlist('tests/lists/wordlist.txt') == 50
def test_word_count_sparse():
assert mdpants.len_wordlist('tests/lists/sparsewordlist.txt') == 50
def test_word_count_non_ascii():
assert mdpants.len_wordlist('tests/lists/emoticons.txt') == 18
def test_accept_lines_with_not_only_whitespace():
assert mdpants.accept_line(' foo \n')
assert mdpants.accept_line('foo\n')
assert mdpants.accept_line('\t\tfoo\t\n')
def test_seed_depends_on_file_content():
seed1 = mdpants.get_hash_seed('tests/lists/existing_file')
seed2 = mdpants.get_hash_seed('tests/lists/another_existing_file')
assert not seed1 == seed2
def test_seed_deterministic():
seed1 = mdpants.get_hash_seed('tests/lists/existing_file')
seed2 = mdpants.get_hash_seed('tests/lists/existing_file')
assert seed1 == seed2
seed3 = binascii.unhexlify('0cf9180a764aba863a67b6d72f0918bc131c6772642cb2dce5a34f0a702f9470ddc2bf125c12198b1995c233c34b4afd346c54a2334c350a948a51b6e8b4e6b6')
assert seed3 == seed1
def test_seed_from_binary_file():
assert mdpants.get_hash_seed('tests/lists/binary')
def test_random_seed_not_trivially_broken():
seed1 = mdpants.get_prng_seed()
seed2 = mdpants.get_prng_seed()
assert not seed1 == seed2
def test_extract_words():
count = mdpants.len_wordlist('tests/lists/wordlist.txt')
words = mdpants.fetch_words('tests/lists/wordlist.txt',
[0.0/count, 1.0/count, 2.0/count], 'text')
assert words == ['Aa', 'Aaa', 'Aah']
def test_extract_sparse_words():
count = mdpants.len_wordlist('tests/lists/sparsewordlist.txt')
words = mdpants.fetch_words('tests/lists/sparsewordlist.txt',
[0.0/count, 4.0/count, 22.0/count], 'text')
assert words == ['Aa', 'Aahing', 'Aasvogels']
def test_extract_sparse_non_ascii():
count = mdpants.len_wordlist('tests/lists/emoticons.txt')
words = mdpants.fetch_words('tests/lists/emoticons.txt',
[0.0/count, 8.0/count, 17.0/count], 'text')
assert words == [u'😚', u'😢', u'😫']
``` |
{
"source": "25shmeckles/scripts",
"score": 3
} |
#### File: scripts/bkp/tcplib.py
```python
import sys
import socket
import random
ver = '0.1'
## Helper functions
def getMyIP():
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as sock:
sock.connect(("8.8.8.8", 80))
myIP = sock.getsockname()[0]
return myIP
def getAddress(args):
if len(args):
ip = args[0]
port = args[1]
else:
ip, port = getRndDefault()
return ip, port
def getRndDefault():
r = [line.split() for line in defaults.split('\n') if line != '']
return random.sample(r, 1)[0]
## Globals
defaults = \
'''
172.16.17.32 9999
'''
## Commands
def help(doc):
print(doc)
def send(ip, port, data, myIP):
received = None
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
# Connect to server and send data
sock.connect((ip, int(port)))
sock.sendall(bytes(f'{myIP} {data}\n', 'utf-8'))
# Receive data from the server and shut down
received = str(sock.recv(1024), 'utf-8')
print(f'Sent: {data}')
print(f'Received: {received}')
def sendfile(ip, port, filepath, tag, myIP):
if '/' in filepath:
filename = filepath.split('/')[-1]
elif '\\' in filepath:
filename = filepath.split('\\')[-1]
else:
filename = filepath
file_content = ''
with open(filepath, 'r') as f:
for line in f:
file_content += f'_*_{line}' #_*_ as line break
data = f'{tag} {filename} {file_content}'
send(ip, port, data, myIP)
def pushme(ip, port, myIP):
tag = 'me'
for filepath in ['tcplib.py','tcp-cli.py','tcp-srv.py']:
sendfile(ip, port, filepath, tag, myIP)
def test(ip, port, myIP):
data = 'connection test'
send(ip, port, data, myIP)
```
#### File: 25shmeckles/scripts/canvas_demo.py
```python
import tkinter as tk
import random
# --- functions ---
def create_mines(how_many, canvas):
bubbles = []
w = canvas.winfo_reqwidth()
h = canvas.winfo_reqheight()
for __ in range(how_many):
x = random.randint(0, w)
y = random.randint(0, h)
r = random.randint(5, 10)
mine = canvas.create_oval(x-r, y-r, x+r, y+r)
bubbles.append([mine, r])
return bubbles
def moves_mines(canvas, bubbles):
h = canvas.winfo_reqheight()
for mine, r in bubbles:
#canvas.move(mine, 0, -1)
# get position
x1, y1, x2, y2 = canvas.coords(mine)
# change
y1 -= 1
y2 -= 1
# if top then move to the bottom
if y2 <= 0:
y1 = h
y2 = y1 + 2*r
# set position
canvas.coords(mine, x1, y1, x2, y2)
root.after(REFRESH_RATE, moves_mines, canvas, bubbles)
if __name__ == '__main__':
root = tk.Tk()
canvas = tk.Canvas(root, width=800, height=600)
canvas.pack()
REFRESH_RATE = 10 #ms
bubbles = create_mines(50, canvas)
root.after(REFRESH_RATE, moves_mines, canvas, bubbles)
root.mainloop()
```
#### File: 25shmeckles/scripts/Ensembl_Client.py
```python
import sys
import urllib
import urllib2
import json
import time
class EnsemblRestClient(object):
def __init__(self, server='http://rest.ensembl.org', reqs_per_sec=15):
self.server = server
self.reqs_per_sec = reqs_per_sec
self.req_count = 0
self.last_req = 0
def perform_rest_action(self, endpoint, hdrs=None, params=None):
if hdrs is None:
hdrs = {}
if 'Content-Type' not in hdrs:
hdrs['Content-Type'] = 'application/json'
if params:
endpoint += '?' + urllib.urlencode(params)
data = None
# check if we need to rate limit ourselves
if self.req_count >= self.reqs_per_sec:
delta = time.time() - self.last_req
if delta < 1:
time.sleep(1 - delta)
self.last_req = time.time()
self.req_count = 0
try:
request = urllib2.Request(self.server + endpoint, headers=hdrs)
response = urllib2.urlopen(request)
content = response.read()
if content:
data = json.loads(content)
self.req_count += 1
except urllib2.HTTPError, e:
# check if we are being rate limited by the server
if e.code == 429:
if 'Retry-After' in e.headers:
retry = e.headers['Retry-After']
time.sleep(float(retry))
self.perform_rest_action(endpoint, hdrs, params)
else:
sys.stderr.write('Request failed for {0}: Status code: {1.code} Reason: {1.reason}\n'.format(endpoint, e))
return data
def get_variants(self, species, symbol):
genes = self.perform_rest_action(
'/xrefs/symbol/{0}/{1}'.format(species, symbol),
params={'object_type': 'gene'}
)
if genes:
stable_id = genes[0]['id']
variants = self.perform_rest_action(
'/overlap/id/{0}'.format(stable_id),
params={'feature': 'variation'}
)
return variants
return None
def run(species, symbol):
client = EnsemblRestClient()
variants = client.get_variants(species, symbol)
if variants:
for v in variants:
print '{seq_region_name}:{start}-{end}:{strand} ==> {id} ({consequence_type})'.format(**v);
if __name__ == '__main__':
if len(sys.argv) == 3:
species, symbol = sys.argv[1:]
else:
species, symbol = 'human', 'BRAF'
run(species, symbol)
```
#### File: 25shmeckles/scripts/FormatInput_2.py
```python
infile = '/home/amarcozz/Documents/Projects/Fusion Genes/datasets/ISCA_UCSC_missing.txt'
outfile = '/home/amarcozz/Documents/Projects/Fusion Genes/datasets/dataset_4b.txt'
def find(string, char):
'''Looks for a character in a sctring and retrurns its indes.'''
return [index for index, letter in enumerate(string) if letter == char]
def format_Tags_Vals(infile, outfile):
with open(infile) as inf:
lines = inf.readlines()
with open(outfile, 'w') as outf:
startline = 1840
list_of_dict = []
for line in lines:
tab_index = find(line, '\t')
tags_string = line[:tab_index[0]].rstrip(',').rstrip('\n')
data_string = line[tab_index[0]+1:].rstrip(',').rstrip('\n')
# divides each strings in list of strings. The comma is the separator.
tags_indexes = find(tags_string, ",")
data_indexes = find(data_string, ",")
print tags_indexes
print data_indexes
tags_list = []
data_list = []
n = 0
for index in tags_indexes:
tags_list.append(tags_string[n:index])
n = index + 1
tags_list.append(tags_string[n:])
print tags_list
n = 0
for index in data_indexes:
data_list.append(data_string[n:index].replace('%2C','').replace('.',''))
n = index + 1
data_list.append(data_string[n:])
print data_list
# creates a dictionary for aech line with Tag:Value pairs
vars()['dict'+ str(startline)] = {}
for n in range(len(tags_list)):
vars()['dict'+ str(startline)].update({tags_list[n]:data_list[n]})
# organizes the dictionaries in a list of dict.
list_of_dict.append(vars()['dict'+ str(startline)])
# next line tracker
startline += 1
print len(list_of_dict)
print startline - 1840
for dic in list_of_dict:
if dic['var_type'] == 'copy_number_loss':
orientation = 'TH'
elif dic['var_type'] == 'copy_number_gain':
orientation = 'HT'
else:
orientation = 'None'
newline = dic['ID'] + '\t' + dic['Parent'] + '\t' + dic['samples'] + '\t' + dic['var_origin'] + '\t' + dic['Start_range'] + '\t' + dic['End_range'] + '\t' + dic['var_type'] + '\t' + orientation
print newline
outf.write(newline + '\n')
format_Tags_Vals(infile, outfile)
``` |
{
"source": "2600box/harvest",
"score": 3
} |
#### File: management/commands/create_harvest_superuser.py
```python
import sys
from django.contrib.auth.models import User
from django.core.management.base import BaseCommand
from django.db import IntegrityError
from Harvest.utils import get_logger
logger = get_logger(__name__)
class Command(BaseCommand):
help = "Create a superuser with a given username and password"
_type_map = {'int': int, 'float': float}
def add_arguments(self, parser):
parser.add_argument('--exists-ok', default=False, action='store_true')
parser.add_argument('username')
parser.add_argument('password')
def handle(self, *args, **options):
try:
User.objects.create_superuser(
username=options['username'],
email=None,
password=options['password'],
)
logger.info('User {} created.', options['username'])
except IntegrityError:
if not options['exists_ok']:
print('User {} already exists!'.format(options['username']))
sys.exit(1)
```
#### File: harvest/Harvest/path_utils.py
```python
import os
import shutil
def list_abs_files(path):
results = []
for root, dirs, files in os.walk(path):
for file in files:
results.append(os.path.join(root, file))
results.sort(key=lambda f: (f.count('/'), f))
return results
def list_rel_files(path):
return [os.path.relpath(f, path) for f in list_abs_files(path)]
def list_src_dst_files(src_path, dst_path):
return [
(os.path.join(src_path, rel_file), os.path.join(dst_path, rel_file))
for rel_file in list_rel_files(src_path)
]
def copytree_into(src_path, dst_path):
for src_file, dst_file in list_src_dst_files(src_path, dst_path):
os.makedirs(os.path.dirname(dst_file), exist_ok=True)
shutil.copy2(src_file, dst_file)
def strip_invalid_path_characters(path):
return ''.join(c for c in path if c not in r'\/:*?"<>|')
```
#### File: plugins/bibliotik/compression.py
```python
import zlib
BIBLIOTIK_ZDICT = '''<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
<head>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8" />
<meta name="viewport" content="width=device-width, initial-scale=1.0, maximum-scale=1.0" />
<link rel="shortcut icon" href="/static/favicon.ico" />
<script src="/static/jquery-1.7.2.min.js"></script>
<script src="/static/jquery.confirm-1.2.js"></script>
<script src="/static/formerrorlistscroll.js"></script>
<script src="/static/lightbox/jquery.lightbox-0.5.min.js"></script>
<script src="/static/jquery.lazy-1.7.4.min.js"></script>
<script src="/static/imagesloaded-4.1.min.js"></script>
<script src="/static/jquery.qtip-3.0.3.min.js"></script>
<script src="/static/disableOnSubmit.js"></script>
<script src='/static/simplemde-1.11.2.min.js'></script>
<link rel='stylesheet' href='/static/simplemde-1.11.2.min.css'>
<link rel='stylesheet' href='/static/components/font-awesome/font-awesome-built.css'>
<link rel="stylesheet" type="text/css" media="screen" href="/static/lightbox/jquery.lightbox-0.5.css" />
<link rel="stylesheet" type="text/css" media="screen" href="/static/jquery.qtip-3.0.3.min.css" />
<link rel="stylesheet" type="text/css" media="screen" href="/static/default.css?" />
<link rel="stylesheet" type="text/css" media="screen" href="/static/css_garden/makeup.css?" />
<title>Bibliotik / Torrents / </title>
<script type="text/javascript">
document.head.parentElement.className += ' have-js'; // flag JS availability
function brokenImage(img) {
img.src="/static/icons/NI_Broken.png";
}
$(function() {
$('a[rel*=lightbox]').lightBox({
imageLoading: '/static/lightbox/lightbox-ico-loading.gif',
imageBtnClose: '/static/lightbox/lightbox-btn-close.gif',
imageBtnPrev: '/static/lightbox/lightbox-btn-prev.gif',
imageBtnNext: '/static/lightbox/lightbox-btn-next.gif',
imageBlank: '/static/lightbox/lightbox-blank.gif'
});
$('form').disableOnSubmit();
$('time').attr('title', function() {
return new Date($(this).attr('datetime')).toLocaleString();
});
// Rewrite legacy links as relative links
$('a[href*="bibliotik.org"]').attr('href', function(idx, href){
return (href||$(this).attr('href')).replace(/^https?:\/\/(www\.)?bibliotik.org/i, '');
});
});
</script>
</head>
<body>
<div id="superwrap">
<div id="headerwrap">
<div id="pre_header">
<ul id="pre_header_status">
<li><a href=""></a> ()</li>
<li><a href="/settings">Settings</a></li>
<li><a href="/logout?authkey=">Log Out</a></li>
<li>Up: </li>
<li>Down: </li>
<li> Ratio:
,</li>
<li>Required: 0.0 (Exempt!)</li>
</ul>
<ul id="pre_header_nav">
<li><a href="/invites">Invites</a>
(4)
</li>
<li> <a href="/conversations">Conversations</a>
</li>
<li><a href="/notifications">Notifications</a></li>
<li><a href="/bookmarks">Bookmarks</a></li>
<li><a href="/uploads">Uploads</a></li>
</ul>
</div>
<div id="header">
<div id="header_top">
<a href="/"><span id="header_logo"></span></a>
<span id="header_notifications">
</span>
</div>
<div id="header_nav">
<ul id="header_nav_links">
<li><a href="/">Home</a></li>
<li><a href="/torrents/">Torrents</a></li>
<li><a href="/requests/">Requests</a></li>
<li><a href="/collections/">Collections</a></li>
<li><a href="/forums/">Forums</a></li>
<li><a href="/rules/">Rules</a></li>
<li><a href="/help/">Help</a></li>
<li><a href="/log/">Log</a></li>
<li><a href="/upload/">Upload</a></li>
</ul>
<span id="header_nav_search">
<form action="/torrents/" method="get">
<input type="text" size="35" name="search" id="search_header" value="> Search torrents" onblur="if (this.value == '') this.value = '> Search torrents';" onfocus="if (this.value == '> Search torrents') this.value = '';" />
<input type="submit" value="" style="display:none;" />
</form>
</span>
</div>
</div>
</div>
<div id="body">
<div id="torrent-" class="torrent retail ebooks-category epub-format english-language">
<h1 id="title"><img src="/static/icons/Ebooks.png" style="vertical-align:text-top" title="Ebooks" />
</h1>
<div id="sidebar">
<a rel="lightbox" href=""><img src='' width="220" /></a>
<ul>
<li class="details_peers">
<strong>7</strong> <a href="/torrents//peers">
Peers
</a>
</li>
<li class="details_snatches">
<strong>5</strong> <a href="/torrents//snatches">
Snatches
</a>
</li>
</ul>
<form action="" method="post" accept-charset="UTF-8"><input type="hidden" name="addTags" /><input type="hidden" name="authkey" value="" /><table><tr valign="top"><td class="Flabel"><label for="TagsField">Add tag: </label></td><td class="Ffield"><input type="text" name="TagsField" id ="TagsField" maxlength="1024" size ="10" value="" /></td></tr><tr><td colspan="2"><input type="submit" value="Add tag!" /></td></tr></table></form>
</div>
<div id="main">
<div id="detailsbox">
<p id="creatorlist">
By <a class="authorLink" href="/creators/"></a></p>
<p id="published">
Published by <a class="publisherLink" href="/publishers/"></a> in (<span id="torrentISBN"></span>)</p>
<p id="details_content_info">
English, pages</p>
<p id="details_tags">
Tags: <span class="taglist"><a class="tagLink" href="/tags/26">fiction</a></span> </p>
<p id="details_file_info">
Retail EPUB,
MB,
file(s)
<a><label for=file-toggler class=toggling data-show="(show)" data-hide="(hide)">(show)</label></a>
</p>
<p id="details_upload_info">
Uploaded by <a href="/users/"></a> <time datetime=""> minutes, seconds ago</time>
</p>
<p id="details_activity_info">
seeders,
leecher,
snatches,
<a href="/torrents/#comments">0</a>
comments,
0 bookmarks
</p>
<p id="details_ratio_info">
<em>If you download this torrent, your ratio will be .</em>
</p>
<p id="details_links">
<a href="/torrents//download" title="Download"><img src="/static/icons/download.png" /></a>
<span class="doBookmark" title="Add Bookmark"> </span>
<a href="/torrents//report" title="Report"><img src="/static/icons/report.png" /></a>
<a href="/torrents//wiki" title="Description"><img src="/static/icons/wiki.png" /></a>
<a href="/torrents//images" title="Images"><img src="/static/icons/image.png" /></a>
</p>
</div>
<input type=checkbox id=file-toggler class=toggling>
<div id="files" class="table_div toggling">
<h2>Files</h2>
<table cellspacing="0" style="width:auto">
<thead>
<tr>
<th>Filename</th>
<th>Size</th>
</tr>
</thead>
<tbody>
<tr>
<td>/td>
<td> MB</td>
</tr>
</tbody>
</table>
</div>
<div id="description">
<h2>Description</h2>
<div class="markdown">
</div>
</div>
</div>
<div id="comments">
<script type="text/javascript">
$(function() {
$('.quoteComment').live("click", function() {
var commentId = $(this).attr("id").split("-")[1];
$.ajax({
method: "get",
url: "/torrents//comments/" + commentId + "/get",
beforeSend: function() {
$("#doQuoteComment-" + commentId).html('<img src="/static/icons/loading.gif" />');
},
complete: function() {
$("#doQuoteComment-" + commentId).html('<a class="quoteComment" id="quote-' + commentId + '">Quote</a>');
},
success: function(text) {
var temp = $("#CommentField").val();
if (temp.length && !temp.endsWith('\n')) temp += '\n';
if (temp.length && !temp.endsWith('\n\n')) temp += '\n';
temp += text + '\n\n';
$("#CommentField").val(temp);
$("#CommentField").change();
window.location.hash = "#commentHeader";
},
error: function() {
$("#doQuoteComment-" + commentId).html('<strong>Connection failed!</strong>');
}
});
});
});
</script>
<h2>Comments</h2>
<p>No comments found.</p>
</div>
<h4 id="commentHeader">Add comment</h4>
<form action="" method="post" accept-charset="UTF-8"><input type="hidden" name="addComment" /><input type="hidden" name="authkey" value="" /><table><tr><td class="Flabel"><label for="CommentField">Comment: </label></td><td class="Ffield"><textarea name="CommentField" id="CommentField" rows="15" cols="90"></textarea></td></tr><tr><td colspan="2"><input type="submit" value="Add comment!" /></td></tr></table></form></div>
</div> <!-- End #body -->
<div id="footer">
<div id="footer_nav">
<div id="more">
<h4>More:</h4>
<ul>
<li><a href="/users">Users</a></li>
<li><a href="/tags">Tags</a></li>
<li><a href="/creators">Creators</a></li>
<li><a href="/publishers">Publishers</a></li>
</ul>
</div>
<div id="rules">
<h4>Rules:</h4>
<ul>
<li><a href="/rules">Main</a></li>
<li><a href="/rules/uploading">Uploading</a></li>
<li><a href="/rules/retail">Retail</a></li>
<li><a href="/rules/trumping">Trumping</a></li>
<li><a href="/rules/naming-conventions">Naming</a></li>
<li><a href="/rules/requests">Requests</a></li>
<li><a href="/rules/tagging">Tagging</a></li>
<li><a href="/rules/ratio">Ratio</a></li>
</ul>
</div>
<div id="help">
<h4>Help:</h4>
<ul>
<li><a href="/tutorials">Tutorials</a></li>
<li><a href="/help/contact">Contact</a></li>
<li><a href="/help/classes">User Classes</a></li>
<li><a href="/help/searching">Searching</a></li>
<li><a href="/help/editor">Text Formatting</a></li>
<li><a href="/help/clients">Allowed Clients</a></li>
<li><a href="/help/ports">Blacklisted Ports</a></li>
</ul>
</div>
</div>
</div> <!-- End #footer -->
</div> <!-- End #superwrap -->
<script src='/static/store-2.0.3.everything.min.js'></script><script src='/static/toggle-display.js?'></script><script type='text/javascript'>var bookmarksAuthKey = "";</script><script src='/static/do-bookmarks.js?'></script><script src='/static/cover-hover.js?'></script><script src='/static/wysiwyg-editor.js?'></script></body>
</html>'''.encode()
def bibliotik_compress_html(html):
obj = zlib.compressobj(level=9, zdict=BIBLIOTIK_ZDICT)
return b'b\x01' + obj.compress(html.encode()) + obj.flush()
def bibliotik_decompress_html(data):
if data[:2] == b'<!':
return data.decode()
if data[:2] == b'b\x01':
obj = zlib.decompressobj(zdict=BIBLIOTIK_ZDICT)
decompressed_data = obj.decompress(data[2:]) + obj.flush()
return decompressed_data.decode()
else:
raise Exception('Unknown/invalid Bibliotik compression header')
```
#### File: plugins/bibliotik/views.py
```python
import pickle
from http import cookiejar
from rest_framework.generics import RetrieveUpdateDestroyAPIView
from rest_framework.response import Response
from rest_framework.views import APIView
from Harvest.cookie_utils import cookie_to_dict, cookie_from_dict
from Harvest.utils import TransactionAPIView, CORSBrowserExtensionView, get_logger
from plugins.bibliotik.client import BibliotikClient
from plugins.bibliotik.models import BibliotikClientConfig
from plugins.bibliotik.serializers import BibliotikClientConfigSerializer
logger = get_logger(__name__)
class Config(TransactionAPIView, RetrieveUpdateDestroyAPIView):
"""Configure the Bibliotik integration plugin."""
serializer_class = BibliotikClientConfigSerializer
def get_object(self):
try:
return BibliotikClientConfig.objects.select_for_update().get()
except BibliotikClientConfig.DoesNotExist:
return BibliotikClientConfig(
is_server_side_login_enabled=True,
)
# After a username/password update, set last_login_failed to False
def perform_update(self, serializer):
serializer.instance.last_login_failed = False
serializer.instance.clear_login_data()
super().perform_update(serializer)
class ConnectionTest(APIView):
"""Test the connection of the plugin to the user's Bibliotik account."""
def post(self, request):
try:
client = BibliotikClient()
client.get_index()
return Response({'success': True})
except Exception as ex:
return Response({'success': False, 'detail': str(ex)})
class ClearLoginData(TransactionAPIView, APIView):
def post(self, request):
config = BibliotikClientConfig.objects.select_for_update().get()
config.last_login_failed = False
config.clear_login_data()
config.save()
return Response({'success': True})
class Cookies(CORSBrowserExtensionView, APIView):
def _get_response_from_config(self, config):
if config and config.cookies:
return Response({
'cookies': [cookie_to_dict(c) for c in pickle.loads(config.cookies)],
})
return Response({'cookies': []})
def get(self, request):
config = BibliotikClientConfig.objects.first()
return self._get_response_from_config(config)
def put(self, request):
input_jar = cookiejar.CookieJar()
for cookie_data in request.data['cookies']:
input_jar.set_cookie(cookie_from_dict(cookie_data))
client = BibliotikClient()
# Try to reconcile offered vs. stored cookies.
config = client.accept_cookies_if_ok(input_jar)
# If we're left without cookies, try to login server-side, if enabled
if not config.cookies and config.is_server_side_login_enabled:
logger.debug('No working cookies found, trying server-side login.')
try:
client.get_index()
config.refresh_from_db()
logger.debug('Server-side login is working.')
except:
# Swallow exceptions, so we don't fail the client if we can't login.
logger.debug('Server-side login is also not working.')
return self._get_response_from_config(config)
```
#### File: management/commands/run_scheduler.py
```python
import asyncio
from django.core.management.base import BaseCommand
from Harvest.utils import get_logger
from task_queue.scheduler import QueueScheduler
logger = get_logger(__name__)
class Command(BaseCommand):
help = "Run the queue consumer"
def handle(self, *args, **options):
QueueScheduler().run()
```
#### File: harvest/task_queue/task_queue.py
```python
import pickle
from functools import partial, wraps
from django.db import close_old_connections
from task_queue.models import AsyncTask
def db_decorator(fn):
@wraps(fn)
def inner(*args, **kwargs):
try:
return fn(*args, **kwargs)
finally:
close_old_connections()
return inner
class AsyncTaskInfo:
def __init__(self, handler):
self.handler = db_decorator(handler)
self.handler_str = handler.__module__ + '.' + handler.__name__
class PeriodicTaskInfo(AsyncTaskInfo):
def __init__(self, handler, interval_seconds):
super().__init__(handler)
self.interval_seconds = interval_seconds
class _TaskQueue:
def __init__(self):
self.async_tasks = {}
self.periodic_tasks = {}
def async_task(self):
def decorator(fn):
task_info = AsyncTaskInfo(fn)
self.async_tasks[task_info.handler_str] = task_info
fn.delay = partial(self._execute_async, task_info)
return fn
return decorator
def periodic_task(self, interval_seconds):
def decorator(fn):
task_info = PeriodicTaskInfo(fn, interval_seconds)
self.periodic_tasks[task_info.handler_str] = task_info
fn.delay = partial(self._execute_async, task_info)
return fn
return decorator
def _execute_async(self, task_info, *args, **kwargs):
AsyncTask.objects.create(
handler=task_info.handler_str,
args_pickle=pickle.dumps({
'args': args,
'kwargs': kwargs,
})
)
TaskQueue = _TaskQueue()
```
#### File: harvest/torrents/alcazar_event_processor.py
```python
import time
from itertools import chain
from Harvest.utils import get_logger
from torrents.alcazar_client import update_torrent_from_alcazar, \
create_or_update_torrent_from_alcazar
from torrents.models import Torrent, Realm, TorrentInfo
from torrents.signals import torrent_removed
logger = get_logger(__name__)
class AlcazarEventProcessor:
@classmethod
def _process_removed_events(cls, realm, removed_info_hashes):
removed_torrents_qs = Torrent.objects.filter(realm=realm, info_hash__in=removed_info_hashes)
removed_info_hashes = list(removed_torrents_qs.values_list('info_hash', flat=True))
logger.debug('Matched {} Torrent objects for deletion.'.format(len(removed_info_hashes)))
removed_torrents_qs.delete()
for removed_info_hash in removed_info_hashes:
torrent_removed.send_robust(cls, realm=realm, info_hash=removed_info_hash)
@classmethod
def _process_added_torrents(cls, realm, added_torrent_states):
# Short-circuit to avoid any queries
if not added_torrent_states:
return
info_hashes = [state['info_hash'] for state in added_torrent_states]
torrent_info_ids = {
item[0]: item[1] for item in
TorrentInfo.objects.filter(
realm=realm,
info_hash__in=info_hashes,
is_deleted=False,
).values_list('info_hash', 'id')
}
for added_state in added_torrent_states:
create_or_update_torrent_from_alcazar(
realm=realm,
torrent_info_id=torrent_info_ids.get(added_state['info_hash']),
torrent_state=added_state,
)
@classmethod
def _process_events(cls, realm, events):
cls._process_removed_events(realm, events['removed'])
updated_info_hashes = [state['info_hash'] for state in chain(events['added'], events['updated'])]
existing_torrents = {
t.info_hash: t for t in Torrent.objects.filter(realm=realm, info_hash__in=updated_info_hashes)}
added_torrents_states = []
logger.debug('Matched {} Torrent objects for updating.', len(existing_torrents))
num_updated = 0
for updated_state in chain(events['added'], events['updated']):
torrent = existing_torrents.get(updated_state['info_hash'])
if not torrent:
added_torrents_states.append(updated_state)
else:
if update_torrent_from_alcazar(torrent, updated_state):
num_updated += 1
logger.debug('Actually updated {} in DB.', num_updated)
logger.debug('Matched {} new states for adding.', len(added_torrents_states))
cls._process_added_torrents(realm, added_torrents_states)
@classmethod
def _process(cls, events):
realms = {realm.name: realm for realm in Realm.objects.all()}
for realm_name, batch in events.items():
realm = realms.get(realm_name)
if not realm:
realm, _ = Realm.objects.get_or_create(name=realm_name)
logger.debug('Processing events for realm {}.', realm_name)
cls._process_events(realm, batch)
@classmethod
def process(cls, events):
start = time.time()
logger.debug('Processing events.')
retries_remaining = 3
while True:
try:
cls._process(events)
break
except Exception:
if retries_remaining > 0:
logger.warning('Exception during alcazar event processing. Retrying.')
retries_remaining -= 1
else:
logger.exception('Exhausted event processing retries.')
raise
logger.debug('Completed alcazar update in {:.3f}.', time.time() - start)
```
#### File: torrents/migrations/0022_torrentinfo_raw_response_binary.py
```python
from django.db import migrations, models, transaction
from Harvest.utils import qs_chunks
def encode_all_raw_responses(apps, schema_editor):
TorrentInfo = apps.get_model('torrents', 'TorrentInfo')
for batch in qs_chunks(TorrentInfo.objects.all(), 1000):
with transaction.atomic():
for ti in batch:
ti.raw_response_binary = ti.raw_response.encode()
ti.save(update_fields=('raw_response_binary',))
class Migration(migrations.Migration):
dependencies = [
('torrents', '0021_auto_20190322_1030'),
]
operations = [
migrations.AddField(
model_name='torrentinfo',
name='raw_response_binary',
field=models.BinaryField(default=b''),
preserve_default=False,
),
migrations.RunPython(encode_all_raw_responses, migrations.RunPython.noop, elidable=True),
]
```
#### File: harvest/torrents/serializers.py
```python
from rest_framework import serializers
from torrents.models import AlcazarClientConfig, Realm, Torrent, TorrentInfo, DownloadLocation
from trackers.registry import TrackerRegistry
class AlcazarClientConfigSerializer(serializers.ModelSerializer):
class Meta:
model = AlcazarClientConfig
fields = ('base_url', 'token', 'unify_single_file_torrents')
class RealmSerializer(serializers.ModelSerializer):
class Meta:
model = Realm
fields = ('id', 'name')
class TorrentInfoSerializer(serializers.ModelSerializer):
metadata = serializers.SerializerMethodField()
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.metadata_serializers_by_realm_id = None
def _ensure_metadata_serializers_by_realm_id(self):
if self.metadata_serializers_by_realm_id is not None:
return self.metadata_serializers_by_realm_id
realms_by_name = {r.name: r for r in Realm.objects.all()}
self.metadata_serializers_by_realm_id = {}
for tracker in TrackerRegistry.get_plugins():
realm = realms_by_name[tracker.name]
if not realm:
continue
serializer_class = tracker.torrent_info_metadata_serializer_class
if serializer_class is None:
continue
serializer = serializer_class()
serializer.bind('metadata', self)
self.metadata_serializers_by_realm_id[realm.id] = serializer
def get_metadata(self, obj):
if not self.context.get('serialize_metadata', True):
return None
self._ensure_metadata_serializers_by_realm_id()
metadata_serializer = self.metadata_serializers_by_realm_id.get(obj.realm_id)
if metadata_serializer:
return metadata_serializer.to_representation(obj)
class Meta:
model = TorrentInfo
fields = ('info_hash', 'tracker_id', 'fetched_datetime', 'metadata')
class TorrentSerializer(serializers.ModelSerializer):
realm = serializers.IntegerField(source='realm_id')
torrent_info = TorrentInfoSerializer()
@classmethod
def get_context_from_request_data(cls, data):
return {
'serialize_metadata': bool(int(data.get('serialize_metadata', '1')))
}
class Meta:
model = Torrent
fields = '__all__'
class DownloadLocationSerializer(serializers.ModelSerializer):
realm = serializers.PrimaryKeyRelatedField(queryset=Realm.objects.all())
class Meta:
model = DownloadLocation
fields = '__all__'
```
#### File: harvest/trackers/registry.py
```python
from rest_framework.exceptions import APIException
class PluginMissingException(APIException):
status_code = 400
def __init__(self, name, action):
super().__init__('Missing plugin {} to perform {}.'.format(name, action))
class DuplicatePluginException(Exception):
def __init__(self, name, *args, **kwargs):
super().__init__('Trying to register a duplicate plugin {}.'.format(name), *args, **kwargs)
class InvalidPluginException(Exception):
def __init__(self, name, *args, **kwargs):
super().__init__('Trying to register a duplicate plugin {}.'.format(name), *args, **kwargs)
class PluginRegistry:
def __init__(self):
self._plugins = {}
def register(self, plugin):
if plugin.name in self._plugins:
raise DuplicatePluginException(plugin.name)
self._plugins[plugin.name] = plugin
def get_plugin(self, name, action=None):
plugin = self._plugins.get(name)
if plugin is None:
raise PluginMissingException(name, action)
return plugin
def get_plugins(self):
return list(self._plugins.values())
TrackerRegistry = PluginRegistry()
```
#### File: harvest/upload_studio/apps.py
```python
from django.apps import AppConfig
from torrents import signals
from upload_studio.executor_registry import ExecutorRegistry
class UploadStudioConfig(AppConfig):
name = 'upload_studio'
def ready(self):
from .executors import (manual_edit, lame_transcode, create_torrent_file, finish_upload, sox_process,
fix_filename_track_numbers)
ExecutorRegistry.register_executor(manual_edit.ManualEditExecutor)
ExecutorRegistry.register_executor(lame_transcode.LAMETranscoderExecutor)
ExecutorRegistry.register_executor(create_torrent_file.CreateTorrentFileExecutor)
ExecutorRegistry.register_executor(finish_upload.FinishUploadExecutor)
ExecutorRegistry.register_executor(sox_process.SoxProcessExecutor)
ExecutorRegistry.register_executor(fix_filename_track_numbers.FixFilenameTrackNumbers)
from .receivers import on_torrent_finished
signals.torrent_finished.connect(on_torrent_finished)
```
#### File: upload_studio/executors/create_torrent_file.py
```python
import os
import subprocess
from subprocess import CalledProcessError
import bencode
from Harvest.path_utils import list_abs_files
from Harvest.utils import get_logger
from trackers.utils import TorrentFileInfo
from upload_studio.step_executor import StepExecutor
logger = get_logger(__name__)
BAD_FILES = {'thumbs.db'}
class CreateTorrentFileExecutor(StepExecutor):
name = 'create_torrent_files'
description = 'Creates a .torrent file.'
def __init__(self, *args, announce, extra_info_keys=None, **kwargs):
super().__init__(*args, **kwargs)
self.announce = announce
self.extra_info_keys = extra_info_keys
@property
def torrent_file_path(self):
return os.path.join(self.step.get_area_path('torrent_file'), self.metadata.torrent_name + '.torrent')
def check_prerequisites(self):
try:
self.mktorrent_version = subprocess.check_output(['mktorrent', '--help']).decode().split('\n')[0]
except FileNotFoundError:
self.raise_error('mktorrent not found in path. Make sure mktorrent is installed.')
def clean_temp_hidden_files(self):
for file in list_abs_files(self.step.data_path):
if os.path.basename(file).startswith('.') or file.lower() in BAD_FILES:
logger.info('{} removing bad file {}.', self.project, file)
os.remove(file)
def create_torrent(self):
os.makedirs(os.path.dirname(self.torrent_file_path), exist_ok=True)
args = [
'mktorrent',
'-a', self.announce,
'-p',
'-n', self.metadata.torrent_name,
'-o', self.torrent_file_path,
self.step.data_path
]
logger.info('{} creating .torrent file with command: {}', self.project, args)
try:
subprocess.check_output(args, encoding='utf-8', stderr=subprocess.STDOUT)
except CalledProcessError as exc:
raise Exception('mktorrent failed with code {}: {}'.format(exc.returncode, exc.stdout.strip()))
def add_extra_info_keys(self):
if not self.extra_info_keys:
return
logger.info('{} adding extra info keys {}.', self.project, self.extra_info_keys)
with open(self.torrent_file_path, 'rb') as f:
meta_info = bencode.bdecode(f.read())
meta_info['info'].update(self.extra_info_keys)
with open(self.torrent_file_path, 'wb') as f:
f.write(bencode.bencode(meta_info))
def record_additional_metadata(self):
torrent_file_info = TorrentFileInfo.from_file(self.torrent_file_path)
self.metadata.torrent_info_hash = torrent_file_info.info_hash
self.metadata.processing_steps.append('Generate .torrent file with info hash {}.'.format(
self.metadata.torrent_info_hash))
def handle_run(self):
self.check_prerequisites()
self.copy_prev_step_files()
self.clean_temp_hidden_files()
self.create_torrent()
self.add_extra_info_keys()
self.record_additional_metadata()
```
#### File: harvest/upload_studio/tasks.py
```python
from task_queue.task_queue import TaskQueue
from upload_studio.steps_runner import StepsRunner
@TaskQueue.async_task()
def project_run_all(project_id):
runner = StepsRunner(project_id)
runner.run_all()
@TaskQueue.async_task()
def project_run_one(project_id):
runner = StepsRunner(project_id)
runner.run_one()
```
#### File: harvest/upload_studio/utils.py
```python
import subprocess
def execute_subprocess_chain(chain):
processes = []
p_stdin = None
for args in chain:
p_stdout = None if args is chain[-1] else subprocess.PIPE
p = subprocess.Popen(args, stdin=p_stdin, stdout=p_stdout)
processes.append(p)
p_stdin = p.stdout
for p in reversed(processes):
p.communicate()
for p in processes:
if p.returncode != 0:
raise Exception('Subprocess returned non-zero.')
def pprint_subprocess_chain(chain):
return ' | '.join(' '.join(args) for args in chain)
```
#### File: harvest/upload_studio/views.py
```python
from django.db import OperationalError, transaction
from rest_framework import status
from rest_framework.exceptions import APIException
from rest_framework.generics import RetrieveDestroyAPIView, GenericAPIView
from rest_framework.response import Response
from Harvest.utils import TransactionAPIView, CORSBrowserExtensionView
from monitoring.models import LogEntry
from upload_studio.executor_registry import ExecutorRegistry
from upload_studio.models import Project, ProjectStepWarning, ProjectStep
from upload_studio.serializers import ProjectShallowSerializer, ProjectDeepSerializer
from upload_studio.tasks import project_run_all, project_run_one
class Projects(CORSBrowserExtensionView, GenericAPIView):
queryset = Project.objects.all()
def filter_queryset(self, queryset):
source_tracker_id = self.request.query_params.get('source_tracker_id')
if source_tracker_id:
queryset = queryset.filter(source_torrent__torrent_info__tracker_id=source_tracker_id)
project_type = self.request.query_params.get('project_type')
if project_type:
queryset = queryset.filter(project_type=project_type)
return queryset
def get(self, request):
queryset = self.filter_queryset(self.get_queryset())
active_qs = queryset.filter(is_finished=False).order_by('-created_datetime')
history_qs = queryset.filter(is_finished=True).order_by('-finished_datetime')[:50]
return Response({
'active': ProjectShallowSerializer(active_qs, many=True).data,
'history': ProjectShallowSerializer(history_qs, many=True).data,
})
class ProjectView(RetrieveDestroyAPIView):
queryset = Project.objects.all()
serializer_class = ProjectDeepSerializer
@transaction.atomic
def perform_destroy(self, instance):
instance = self.queryset.select_for_update().get(id=instance.id)
instance.delete_all_data()
instance.delete()
class ProjectMutatorView(TransactionAPIView):
def perform_work(self, request, **kwargs):
raise NotImplementedError()
def post(self, request, pk, **kwargs):
try:
self.project = Project.objects.select_for_update(nowait=True).get(id=pk)
except OperationalError:
raise APIException(
'Unable to perform action on project while it is running.',
code=status.HTTP_400_BAD_REQUEST,
)
self.perform_work(request, **kwargs)
# Get a fresh copy for serialization
self.project = Project.objects.get(id=pk)
return Response(ProjectDeepSerializer(self.project).data)
class ProjectResetToStep(ProjectMutatorView):
def perform_work(self, request, **kwargs):
step_index = int(request.data['step'])
self.project.reset(step_index)
class ProjectRunAll(ProjectMutatorView):
def perform_work(self, request, **kwargs):
project_run_all.delay(self.project.id)
class ProjectRunOne(ProjectMutatorView):
def perform_work(self, request, **kwargs):
project_run_one.delay(self.project.id)
class ProjectFinish(ProjectMutatorView):
def perform_work(self, request, **kwargs):
LogEntry.info('Manually finished upload studio {}.'.format(self.project))
self.project.finish()
class ProjectInsertStep(ProjectMutatorView):
def perform_work(self, request, **kwargs):
index = request.data['index']
executor_name = request.data['executor_name']
# Ensure the executor exists
ExecutorRegistry.get_executor(executor_name)
self.project.insert_step(index, ProjectStep(
executor_name=executor_name,
))
class WarningAck(ProjectMutatorView):
def perform_work(self, request, **kwargs):
try:
warning = ProjectStepWarning.objects.get(step__project=self.project, id=kwargs['warning_id'])
except ProjectStepWarning.DoesNotExist:
raise APIException('Warning does not exist.', code=status.HTTP_404_NOT_FOUND)
if warning.acked:
raise APIException('Warning already acked.', code=status.HTTP_400_BAD_REQUEST)
warning.acked = True
warning.save()
step = self.project.next_step
if step and not step.projectstepwarning_set.filter(acked=False).exists():
project_run_all.delay(self.project.id)
``` |
{
"source": "2601677867/httpx",
"score": 2
} |
#### File: httpx/httpx/_main.py
```python
import functools
import json
import sys
import typing
import click
import httpcore
import pygments.lexers
import pygments.util
import rich.console
import rich.progress
import rich.syntax
from ._client import Client
from ._exceptions import RequestError
from ._models import Response
from ._status_codes import codes
def print_help() -> None:
console = rich.console.Console()
console.print("[bold]HTTPX :butterfly:", justify="center")
console.print()
console.print("A next generation HTTP client.", justify="center")
console.print()
console.print(
"Usage: [bold]httpx[/bold] [cyan]<URL> [OPTIONS][/cyan] ", justify="left"
)
console.print()
table = rich.table.Table.grid(padding=1, pad_edge=True)
table.add_column("Parameter", no_wrap=True, justify="left", style="bold")
table.add_column("Description")
table.add_row(
"-m, --method [cyan]METHOD",
"Request method, such as GET, POST, PUT, PATCH, DELETE, OPTIONS, HEAD.\n"
"[Default: GET, or POST if a request body is included]",
)
table.add_row(
"-p, --params [cyan]<NAME VALUE> ...",
"Query parameters to include in the request URL.",
)
table.add_row(
"-c, --content [cyan]TEXT", "Byte content to include in the request body."
)
table.add_row(
"-d, --data [cyan]<NAME VALUE> ...", "Form data to include in the request body."
)
table.add_row(
"-f, --files [cyan]<NAME FILENAME> ...",
"Form files to include in the request body.",
)
table.add_row("-j, --json [cyan]TEXT", "JSON data to include in the request body.")
table.add_row(
"-h, --headers [cyan]<NAME VALUE> ...",
"Include additional HTTP headers in the request.",
)
table.add_row(
"--cookies [cyan]<NAME VALUE> ...", "Cookies to include in the request."
)
table.add_row(
"--auth [cyan]<USER PASS>",
"Username and password to include in the request. Specify '-' for the password to use "
"a password prompt. Note that using --verbose/-v will expose the Authorization "
"header, including the password encoding in a trivially reversible format.",
)
table.add_row(
"--proxy [cyan]URL",
"Send the request via a proxy. Should be the URL giving the proxy address.",
)
table.add_row(
"--timeout [cyan]FLOAT",
"Timeout value to use for network operations, such as establishing the connection, "
"reading some data, etc... [Default: 5.0]",
)
table.add_row("--follow-redirects", "Automatically follow redirects.")
table.add_row("--no-verify", "Disable SSL verification.")
table.add_row(
"--http2", "Send the request using HTTP/2, if the remote server supports it."
)
table.add_row(
"--download [cyan]FILE",
"Save the response content as a file, rather than displaying it.",
)
table.add_row("-v, --verbose", "Verbose output. Show request as well as response.")
table.add_row("--help", "Show this message and exit.")
console.print(table)
def get_lexer_for_response(response: Response) -> str:
content_type = response.headers.get("Content-Type")
if content_type is not None:
mime_type, _, _ = content_type.partition(";")
try:
return pygments.lexers.get_lexer_for_mimetype(mime_type.strip()).name
except pygments.util.ClassNotFound: # pragma: nocover
pass
return "" # pragma: nocover
def format_request_headers(request: httpcore.Request, http2: bool = False) -> str:
version = "HTTP/2" if http2 else "HTTP/1.1"
headers = [
(name.lower() if http2 else name, value) for name, value in request.headers
]
method = request.method.decode("ascii")
target = request.url.target.decode("ascii")
lines = [f"{method} {target} {version}"] + [
f"{name.decode('ascii')}: {value.decode('ascii')}" for name, value in headers
]
return "\n".join(lines)
def format_response_headers(
http_version: bytes,
status: int,
reason_phrase: typing.Optional[bytes],
headers: typing.List[typing.Tuple[bytes, bytes]],
) -> str:
version = http_version.decode("ascii")
reason = (
codes.get_reason_phrase(status)
if reason_phrase is None
else reason_phrase.decode("ascii")
)
lines = [f"{version} {status} {reason}"] + [
f"{name.decode('ascii')}: {value.decode('ascii')}" for name, value in headers
]
return "\n".join(lines)
def print_request_headers(request: httpcore.Request, http2: bool = False) -> None:
console = rich.console.Console()
http_text = format_request_headers(request, http2=http2)
syntax = rich.syntax.Syntax(http_text, "http", theme="ansi_dark", word_wrap=True)
console.print(syntax)
syntax = rich.syntax.Syntax("", "http", theme="ansi_dark", word_wrap=True)
console.print(syntax)
def print_response_headers(
http_version: bytes,
status: int,
reason_phrase: typing.Optional[bytes],
headers: typing.List[typing.Tuple[bytes, bytes]],
) -> None:
console = rich.console.Console()
http_text = format_response_headers(http_version, status, reason_phrase, headers)
syntax = rich.syntax.Syntax(http_text, "http", theme="ansi_dark", word_wrap=True)
console.print(syntax)
syntax = rich.syntax.Syntax("", "http", theme="ansi_dark", word_wrap=True)
console.print(syntax)
def print_response(response: Response) -> None:
console = rich.console.Console()
lexer_name = get_lexer_for_response(response)
if lexer_name:
if lexer_name.lower() == "json":
try:
data = response.json()
text = json.dumps(data, indent=4)
except ValueError: # pragma: nocover
text = response.text
else:
text = response.text
syntax = rich.syntax.Syntax(text, lexer_name, theme="ansi_dark", word_wrap=True)
console.print(syntax)
else: # pragma: nocover
console.print(response.text)
def format_certificate(cert: dict) -> str: # pragma: nocover
lines = []
for key, value in cert.items():
if isinstance(value, (list, tuple)):
lines.append(f"* {key}:")
for item in value:
if key in ("subject", "issuer"):
for sub_item in item:
lines.append(f"* {sub_item[0]}: {sub_item[1]!r}")
elif isinstance(item, tuple) and len(item) == 2:
lines.append(f"* {item[0]}: {item[1]!r}")
else:
lines.append(f"* {item!r}")
else:
lines.append(f"* {key}: {value!r}")
return "\n".join(lines)
def trace(name: str, info: dict, verbose: bool = False) -> None:
console = rich.console.Console()
if name == "connection.connect_tcp.started" and verbose:
host = info["host"]
console.print(f"* Connecting to {host!r}")
elif name == "connection.connect_tcp.complete" and verbose:
stream = info["return_value"]
server_addr = stream.get_extra_info("server_addr")
console.print(f"* Connected to {server_addr[0]!r} on port {server_addr[1]}")
elif name == "connection.start_tls.complete" and verbose: # pragma: nocover
stream = info["return_value"]
ssl_object = stream.get_extra_info("ssl_object")
version = ssl_object.version()
cipher = ssl_object.cipher()
server_cert = ssl_object.getpeercert()
alpn = ssl_object.selected_alpn_protocol()
console.print(f"* SSL established using {version!r} / {cipher[0]!r}")
console.print(f"* Selected ALPN protocol: {alpn!r}")
if server_cert:
console.print("* Server certificate:")
console.print(format_certificate(server_cert))
elif name == "http11.send_request_headers.started" and verbose:
request = info["request"]
print_request_headers(request, http2=False)
elif name == "http2.send_request_headers.started" and verbose: # pragma: nocover
request = info["request"]
print_request_headers(request, http2=True)
elif name == "http11.receive_response_headers.complete":
http_version, status, reason_phrase, headers = info["return_value"]
print_response_headers(http_version, status, reason_phrase, headers)
elif name == "http2.receive_response_headers.complete": # pragma: nocover
status, headers = info["return_value"]
http_version = b"HTTP/2"
reason_phrase = None
print_response_headers(http_version, status, reason_phrase, headers)
def download_response(response: Response, download: typing.BinaryIO) -> None:
console = rich.console.Console()
syntax = rich.syntax.Syntax("", "http", theme="ansi_dark", word_wrap=True)
console.print(syntax)
content_length = response.headers.get("Content-Length")
kwargs = {"total": int(content_length)} if content_length else {}
with rich.progress.Progress(
"[progress.description]{task.description}",
"[progress.percentage]{task.percentage:>3.0f}%",
rich.progress.BarColumn(bar_width=None),
rich.progress.DownloadColumn(),
rich.progress.TransferSpeedColumn(),
) as progress:
description = f"Downloading [bold]{download.name}"
download_task = progress.add_task(description, **kwargs) # type: ignore
for chunk in response.iter_bytes():
download.write(chunk)
progress.update(download_task, completed=response.num_bytes_downloaded)
def validate_json(
ctx: click.Context,
param: typing.Union[click.Option, click.Parameter],
value: typing.Any,
) -> typing.Any:
if value is None:
return None
try:
return json.loads(value)
except json.JSONDecodeError: # pragma: nocover
raise click.BadParameter("Not valid JSON")
def validate_auth(
ctx: click.Context,
param: typing.Union[click.Option, click.Parameter],
value: typing.Any,
) -> typing.Any:
if value == (None, None):
return None
username, password = value
if password == "-": # pragma: nocover
password = click.prompt("Password", hide_input=True)
return (username, password)
def handle_help(
ctx: click.Context,
param: typing.Union[click.Option, click.Parameter],
value: typing.Any,
) -> None:
if not value or ctx.resilient_parsing:
return
print_help()
ctx.exit()
@click.command(add_help_option=False)
@click.argument("url", type=str)
@click.option(
"--method",
"-m",
"method",
type=str,
help=(
"Request method, such as GET, POST, PUT, PATCH, DELETE, OPTIONS, HEAD. "
"[Default: GET, or POST if a request body is included]"
),
)
@click.option(
"--params",
"-p",
"params",
type=(str, str),
multiple=True,
help="Query parameters to include in the request URL.",
)
@click.option(
"--content",
"-c",
"content",
type=str,
help="Byte content to include in the request body.",
)
@click.option(
"--data",
"-d",
"data",
type=(str, str),
multiple=True,
help="Form data to include in the request body.",
)
@click.option(
"--files",
"-f",
"files",
type=(str, click.File(mode="rb")),
multiple=True,
help="Form files to include in the request body.",
)
@click.option(
"--json",
"-j",
"json",
type=str,
callback=validate_json,
help="JSON data to include in the request body.",
)
@click.option(
"--headers",
"-h",
"headers",
type=(str, str),
multiple=True,
help="Include additional HTTP headers in the request.",
)
@click.option(
"--cookies",
"cookies",
type=(str, str),
multiple=True,
help="Cookies to include in the request.",
)
@click.option(
"--auth",
"auth",
type=(str, str),
default=(None, None),
callback=validate_auth,
help=(
"Username and password to include in the request. "
"Specify '-' for the password to use a password prompt. "
"Note that using --verbose/-v will expose the Authorization header, "
"including the password encoding in a trivially reversible format."
),
)
@click.option(
"--proxies",
"proxies",
type=str,
default=None,
help="Send the request via a proxy. Should be the URL giving the proxy address.",
)
@click.option(
"--timeout",
"timeout",
type=float,
default=5.0,
help=(
"Timeout value to use for network operations, such as establishing the "
"connection, reading some data, etc... [Default: 5.0]"
),
)
@click.option(
"--follow-redirects",
"follow_redirects",
is_flag=True,
default=False,
help="Automatically follow redirects.",
)
@click.option(
"--no-verify",
"verify",
is_flag=True,
default=True,
help="Disable SSL verification.",
)
@click.option(
"--http2",
"http2",
type=bool,
is_flag=True,
default=False,
help="Send the request using HTTP/2, if the remote server supports it.",
)
@click.option(
"--download",
type=click.File("wb"),
help="Save the response content as a file, rather than displaying it.",
)
@click.option(
"--verbose",
"-v",
type=bool,
is_flag=True,
default=False,
help="Verbose. Show request as well as response.",
)
@click.option(
"--help",
is_flag=True,
is_eager=True,
expose_value=False,
callback=handle_help,
help="Show this message and exit.",
)
def main(
url: str,
method: str,
params: typing.List[typing.Tuple[str, str]],
content: str,
data: typing.List[typing.Tuple[str, str]],
files: typing.List[typing.Tuple[str, click.File]],
json: str,
headers: typing.List[typing.Tuple[str, str]],
cookies: typing.List[typing.Tuple[str, str]],
auth: typing.Optional[typing.Tuple[str, str]],
proxies: str,
timeout: float,
follow_redirects: bool,
verify: bool,
http2: bool,
download: typing.Optional[typing.BinaryIO],
verbose: bool,
) -> None:
"""
An HTTP command line client.
Sends a request and displays the response.
"""
if not method:
method = "POST" if content or data or files or json else "GET"
try:
with Client(
proxies=proxies,
timeout=timeout,
verify=verify,
http2=http2,
) as client:
with client.stream(
method,
url,
params=list(params),
content=content,
data=dict(data),
files=files, # type: ignore
json=json,
headers=headers,
cookies=dict(cookies),
auth=auth,
follow_redirects=follow_redirects,
extensions={"trace": functools.partial(trace, verbose=verbose)},
) as response:
if download is not None:
download_response(response, download)
else:
response.read()
if response.content:
print_response(response)
except RequestError as exc:
console = rich.console.Console()
console.print(f"[red]{type(exc).__name__}[/red]: {str(exc)}")
sys.exit(1)
sys.exit(0 if response.is_success else 1)
``` |
{
"source": "2602Sum/Real_estate_price_prediction",
"score": 4
} |
#### File: 2602Sum/Real_estate_price_prediction/app.py
```python
import numpy as np
import pickle
import pandas as pd
import streamlit as st
from sklearn.preprocessing import MinMaxScaler
mms = MinMaxScaler()
from PIL import Image
model=open('saved_pickle','rb')
regressor=pickle.load(model)
def real_estate_price_prediction(House_age, Distance_to_the_nearest_MRT_station,
Number_of_convenience_stores):
X = np.array([[House_age, Distance_to_the_nearest_MRT_station,
Number_of_convenience_stores]])
#X_norm = mms.fit_transform(X)
prediction=regressor.predict(X)
print(prediction)
return prediction
def main():
st.title("Real Estate Price Prediction")
html_temp = """
<div style="background-color:tomato;padding:10px">
<h2 style="color:white;text-align:center;">Streamlit Real Estate Price Prediction ML App </h2>
</div>
"""
st.markdown(html_temp,unsafe_allow_html=True)
House_age = st.text_input("House_age")
Distance_to_the_nearest_MRT_station = st.text_input("Distance_to_the_nearest_MRT_station")
Number_of_convenience_stores = st.text_input("Number_of_convenience_stores")
result=""
if st.button("Predict"):
result=real_estate_price_prediction(House_age, Distance_to_the_nearest_MRT_station,
Number_of_convenience_stores)
st.success('The output is {}'.format(result))
if st.button("About"):
st.text("Lets LEarn")
st.text("Built with Streamlit")
if __name__=='__main__':
main()
``` |
{
"source": "260601068j/X-Planef",
"score": 3
} |
#### File: 260601068j/X-Planef/hardware_analysis.py
```python
from __future__ import division, print_function
import argparse
import collections
import logging
from ga_library import *
from utils import *
from collections import defaultdict, OrderedDict
SHOW_ABSOLUTE_NUMBERS = False
_out = ''
def _log(s, end='\n'):
global _out
_out += s + end
file_name_suffix = ''
def main():
argparser = argparse.ArgumentParser(description='Dumps hardware stats from X-Plane Desktop; you probably want to pipe the output to a CSV file')
argparser.add_argument('--version', type=int, default=11, help='The major version of X-Plane you want data on (10 or 11)')
args = argparser.parse_args()
write_hardware_analysis_files(Version.v11 if args.version == 11 else Version.v10, UserGroup.PaidOnly)
def write_hardware_analysis_files(version: Union[int, Version], user_group: UserGroup, csv_path=None):
"""
:type csv_path: Union[str,None]
"""
global file_name_suffix
file_name_suffix = "_%s_%s_%s" % (version, user_group.name, today_file_suffix())
qm = SimpleQueryMgr(GaService.desktop(), version, Metric.Users, user_group)
perform_cpu_analysis(qm.query(CustomDimension.Cpu))
perform_flight_controls_analysis(qm.query(CustomDimension.FlightControls))
stats = HardwareStats(GaService.desktop(), version, Metric.Users, user_group)
grapher = HardwareGrapher(stats)
perform_ram_analysis(stats)
perform_gpu_analysis(stats)
perform_os_analysis(stats, grapher)
perform_vr_analysis(stats, grapher)
if not csv_path:
csv_path = "hardware_analysis%s.csv" % file_name_suffix
with open(csv_path, 'w') as out_file:
out_file.write(_out)
out_file.write('\n')
class HardwareStats:
def __init__(self, service: GaService, version: Union[int, Version]=Version.v11, user_group: UserGroup=UserGroup.PaidOnly):
self.qm = SimpleQueryMgr(service, version, Metric.Users, user_group)
def operating_systems(self) -> Dict[str, int]:
platform_count = defaultdict(int)
for row in self.qm.query(CustomDimension.Os):
val = str_to_int(row[1])
os_name = classify_platform(row[0])
platform_count[os_name] += val
return counts_to_percents(platform_count)
def operating_system_versions(self) -> Dict[str, Dict[str, int]]:
version_count = defaultdict(lambda: defaultdict(int))
for row in self.qm.query(CustomDimension.Os):
val = str_to_int(row[1])
os_name = classify_platform(row[0])
version = get_os_version(row[0])
if version:
version_count[os_name][version] += val
return version_count
def ram_amounts(self) -> Dict[str, int]:
users_with_at_least_this_much_ram = collections.defaultdict(int)
total_users = 0
for row in self.qm.query(CustomDimension.Ram):
val = str_to_int(row[1])
total_users += val
ram_class = int(row[0])
if ram_class >= 2:
users_with_at_least_this_much_ram["2GB"] += val
if ram_class >= 4:
users_with_at_least_this_much_ram["4GB"] += val
if ram_class >= 8:
users_with_at_least_this_much_ram["8GB"] += val
if ram_class >= 16:
users_with_at_least_this_much_ram["16GB"] += val
if ram_class >= 32:
users_with_at_least_this_much_ram["32GB"] += val
return counts_to_percents(users_with_at_least_this_much_ram, total_users)
def gpu_manufacturers(self) -> Dict[str, int]:
out = defaultdict(int)
for row in self.qm.query(CustomDimension.Gpu):
out[get_gpu_manufacturer(row[0])] += str_to_int(row[1])
out = counts_to_percents(out)
with suppress(KeyError):
if out['Unknown'] < 0.3:
del out['Unknown']
return out
def gpu_generation(self) -> Dict[str, int]:
out = defaultdict(int)
for row in self.qm.query(CustomDimension.Gpu):
out[get_gpu_generation(row[0])] += str_to_int(row[1])
return counts_to_percents(out)
def gpu_platform(self) -> Dict[str, int]:
out = defaultdict(int)
for row in self.qm.query(CustomDimension.Gpu):
out[get_mobile_versus_desktop(row[0])] += str_to_int(row[1])
return counts_to_percents(out)
def vr_headsets(self):
known_headsets = {
'rift': 'Oculus Rift',
'oculus': 'Oculus Rift',
'pimax 5k': 'Pimax 5K',
'psvr': 'PSVR Headset',
'windows': 'Windows Mixed Reality',
'lighthouse': 'OpenVR (like HTC Vive)',
'vive': 'OpenVR (like HTC Vive)',
'aapvr': 'Phone',
'vridge': 'Phone',
'ivry': 'Phone',
'phonevr': 'Phone',
}
headset_count = collections.defaultdict(int)
for row in self.qm.query(CustomDimension.VrHeadset):
label = row[0]
for search_term, deduped_name in known_headsets.items():
if search_term in label.lower():
label = deduped_name
break
else:
logging.debug('unknown headset: ' + label)
headset_count[label] += str_to_int(row[1])
return counts_to_percents(headset_count, smush_into_other_below_percent=1)
def vr_usage(self):
vr_start_date = Version.v1120r4.value.start_date
total_users = sum(str_to_int(row[1]) for row in self.qm.query(CustomDimension.Ram, override_start_date=vr_start_date))
vr_users = sum(str_to_int(row[1]) for row in self.qm.query(CustomDimension.VrHeadset, override_start_date=vr_start_date))
vr_pct = round((vr_users / total_users) * 100, 2)
return {
'Have Used VR': vr_pct,
'2-D Monitor Only': 100 - vr_pct
}
@property
def total_users(self):
ram_data = self.qm.query(CustomDimension.Ram)
return sum(str_to_int(row[1]) for row in ram_data)
class HardwareGrapher:
def __init__(self, stats: HardwareStats):
self.stats = stats
def operating_systems(self) -> plotly.graph_objs.Figure:
return make_pie_chart_figure(self.stats.operating_systems())
def ram_amounts(self) -> plotly.graph_objs.Figure:
return make_bar_chart_figure(self.stats.ram_amounts(), 'Users with at Least <em>x</em> GB RAM', make_x_label=lambda l: str(l) + '+')
def gpu_mobile_vs_desktop(self) -> plotly.graph_objs.Figure:
return make_pie_chart_figure(self.stats.gpu_platform())
def gpu_manufacturers(self) -> plotly.graph_objs.Figure:
return make_bar_chart_figure(self.stats.gpu_manufacturers(), 'GPU Manufacturers')
def vr_headsets(self) -> plotly.graph_objs.Figure:
return make_bar_chart_figure(self.stats.vr_headsets(), 'VR Headsets', already_sorted=True, y_label='% VR Users')
def vr_usage(self) -> plotly.graph_objs.Figure:
return make_pie_chart_figure(self.stats.vr_usage(), top_pad_px=40)
def perform_os_analysis(stats: HardwareStats, grapher: HardwareGrapher):
# Overall platform breakdown
platform_count = stats.operating_systems()
_log("PLATFORM BREAKDOWN")
dump_generic_count_dict(platform_count, "Operating System", "Machines")
plotly.offline.plot(grapher.operating_systems(), image='png', image_filename='os_breakdown' + file_name_suffix, image_width=1024, output_type='file')
version_count = stats.operating_system_versions()
_log("OS VERSIONS")
dump_generic_count_dict(version_count["Windows"], "OS Version", "Windows Machines")
dump_generic_count_dict(version_count["Mac"], "OS Version", "Macs")
dump_generic_count_dict(version_count["Linux"], "OS Version", "Linux Machines")
def clean_up_string_formatting(string):
return str(string).strip()
def perform_cpu_analysis(results_rows):
def get_cpu_core_count(cpu_line):
stats = cpu_line.split(" - ")
for stat in stats:
if stat.startswith("Cores:"):
label_and_cores = stat.split(" ")
return int(label_and_cores[1])
return 0
cpu_cores = collections.defaultdict(int)
for row in results_rows:
val = str_to_int(row[1])
core_count = get_cpu_core_count(row[0])
cpu_cores[core_count] += val
_log("NUMBER OF CPU CORES")
dump_generic_count_dict(cpu_cores, "CPU Cores", "Machines")
def perform_vr_analysis(stats: HardwareStats, grapher: HardwareGrapher):
_log("VR USAGE")
dump_generic_count_dict(stats.vr_usage(), "VR Status", "Users")
_log("VR HEADSETS")
dump_generic_count_dict(stats.vr_headsets(), "Headset Type", "Users")
plotly.offline.plot(grapher.vr_usage(), image='png', image_filename='vr_usage' + file_name_suffix, image_width=1024, output_type='file')
plotly.offline.plot(grapher.vr_headsets(), image='png', image_filename='vr_headsets' + file_name_suffix, image_width=1024, output_type='file')
def get_gpu_manufacturer(gpu_string):
if lower_contains(gpu_string, ('firepro', 'firegl', 'radeon', 'amd ')) or gpu_string.startswith(('67EF', '67DF', 'ASUS EAH', 'ASUS R')):
return "AMD/ATI"
elif lower_contains(gpu_string, ('Quadro', 'GeForce', 'TITAN')) or gpu_string.startswith(('NVS ', 'NV1')):
return "Nvidia"
elif "Intel" in gpu_string:
return "Intel"
return "Unknown"
def get_gpu_generation(gpu_string):
gpu = gpu_string.lower()
if "quadro" in gpu:
return "Nvidia Quadro (All Generations)"
elif "firepro" in gpu or "firegl" in gpu:
return "AMD FirePro (All Generations)"
if "radeon" in gpu or "asus" in gpu:
for gen in [2, 3, 4, 5, 6, 7, 8, 9]:
gen = str(gen)
if "R" + gen + " M" in gpu_string:
return "Radeon R" + gen + "M"
elif "R" + gen + " " in gpu_string:
return "Radeon R" + gen
elif re.search(gen + "\d\d\dM", gpu_string) or ("Mobility" in gpu_string and re.search(gen + "\d\d\d", gpu_string)):
return "Radeon " + gen + "xxxM"
elif re.search(gen + "\d\d\d", gpu_string):
return "Radeon " + gen + "xxxM"
else:
return "Radeon (Other)"
elif "titan x" in gpu:
return "GeForce 9xx"
elif "titan" in gpu:
return "GeForce 7xx"
elif "geforce" in gpu:
for gen in [1, 2, 3, 4, 5, 6, 7, 8, 9]:
gen = str(gen)
base_radeon_re = "GeForce (G|GT|GTX|GTS)?\s*"
if re.search(base_radeon_re + gen + "\d\d\s*(Ti)?(\s|/)", gpu_string):
return "GeForce " + gen + "xx"
elif re.search(base_radeon_re + gen + "\d\dM", gpu_string):
return "GeForce " + gen + "xxM"
elif re.search(base_radeon_re + gen + "\d\d\d\s*(Ti)?(\s|/)", gpu_string):
return "GeForce " + gen + "xxx"
elif re.search(base_radeon_re + gen + "\d\d\dM", gpu_string):
return "GeForce " + gen + "xxxM"
else:
return "GeForce (Other)"
elif "intel" in gpu:
if any(ident in gpu for ident in ["gma", "gm45", "g41", "g45", "q45", "eaglelake", "4 series"]):
return "Intel Integrated (GMA or earlier)"
elif "hd" in gpu or "iris" in gpu:
if any(ident in gpu for ident in ["2000", "3000"]):
return "Intel Integrated (6th Generation; HD 2000/3000)"
elif any(ident in gpu for ident in ["4000", "4200", "4400", "4600", "4700", "5000", "5100", "5200"]):
return "Intel Integrated (7th Generation; HD 2500/4x00/5x00)"
elif any(ident in gpu_string for ident in ["5300", "5500", "5600", "5700", "6000", "6100", "6200", "6300"]):
return "Intel Integrated (8th Generation; HD 5x00/6x00)"
elif any(ident in gpu_string for ident in ["500", "505", "510", "515", "520", "530", "540", "550", "580"]):
return "Intel Integrated (9th Generation; HD 5xx)"
else:
return "Intel Integrated (5th Generation; HD)"
elif "sandybridge" in gpu:
return "Intel Integrated (6th Generation; HD 2000/3000)"
elif "haswell" in gpu or "ivybridge" in gpu or "bay trail" in gpu:
return "Intel Integrated (7th Generation; HD 2500/4x00/5x00)"
elif "broadwell" in gpu:
return "Intel Integrated (8th Generation; HD 5x00/6x00)"
elif "skylake" in gpu:
return "Intel Integrated (9th Generation; HD 5xx)"
elif "ironlake" in gpu:
return "Intel Integrated (5th Generation; HD)"
else:
return gpu_string
return "Other"
def get_mobile_versus_desktop(gpu_string):
gen = get_gpu_generation(gpu_string)
if gen.startswith("Intel"):
return "Intel"
elif gen.endswith("M"):
return "Mobile"
else:
return "Desktop"
def perform_gpu_analysis(stats: HardwareStats):
gpu_manufacturer = stats.gpu_manufacturers()
_log("GPU PLATFORM")
dump_generic_count_dict(stats.gpu_platform(), "GPU Platform", "Machines")
_log("GPU MANUFACTURER")
dump_generic_count_dict(gpu_manufacturer, "GPU Manufacturer", "Machines")
_log("GPU GENERATION")
dump_generic_count_dict(stats.gpu_generation(), "GPU Generation", "Machines")
with suppress(KeyError):
del gpu_manufacturer['Unknown']
make_bar_chart(gpu_manufacturer, 'gpu_manufacturer' + file_name_suffix, 'Manufacturer', needs_conversion_to_percents=False, height_scaling_factor=0.7)
def perform_ram_analysis(stats: HardwareStats):
users_with_at_least_this_much_ram = stats.ram_amounts()
_log("USERS WITH AT LEAST THIS MUCH RAM")
for ram_amount, value in users_with_at_least_this_much_ram.items():
_log(','.join([str(ram_amount), str(value)]))
_log("\n" * 3)
make_bar_chart(users_with_at_least_this_much_ram, 'ram_amounts' + file_name_suffix, 'RAM Amount', make_x_label=lambda l: str(l) + '+', height_scaling_factor=0.7)
def perform_flight_controls_analysis(results_rows):
known_yokes = [
"Saitek Pro Flight Yoke",
"Saitek X52",
"CH FLIGHT SIM YOKE",
"CH ECLIPSE YOKE",
"Pro Flight Cessna Yoke",
"PFC Cirrus Yoke",
"CH 3-Axis 10-Button POV USB Yoke",
]
known_sticks = [
"Logitech 3D Pro",
"T.Flight Hotas",
"T.Flight Stick X",
"Logitech Attack 3",
"Mad Catz F.L.Y.5 Stick",
"SideWinder Precision 2",
"T.16000M",
"SideWinder Force Feedback 2",
"Saitek Pro Flight X-55 Rhino Stick",
"Cyborg",
"Saitek Cyborg USB Stick",
"AV8R",
"Logitech Freedom 2.4",
"SideWinder Joystick",
"Mad Catz V.1 Stick",
"SideWinder Precision Pro",
"SideWinder 3D Pro",
"Logitech Force 3D Pro",
"WingMan Force 3D",
"Joystick - HOTAS Warthog",
"WingMan Extreme Digital 3D",
"WingMan Extreme 3D",
"Top Gun Afterburner",
"CH FLIGHTSTICK PRO",
"CH FIGHTERSTICK",
"CH COMBATSTICK",
"Saitek ST290",
"Saitek ST90",
"Top Gun Fox 2",
"Aviator for Playstation 3",
"Dark Tornado Joystick",
"Saitek X45",
"Saitek X36",
"USB Joystick",
"Pro Flight X65",
"G940",
"HOTAS Cougar Joystick",
"MetalStrik 3D",
"WingMan Attack 2"
]
known_controllers = [
"XBOX",
"Playstation(R)3 Controller",
"WingMan Cordless Gamepad",
"WingMan RumblePad",
"Logitech Dual Action",
"RumblePad 2",
"ASUS Gamepad",
"USB WirelessGamepad",
"Betop Controller",
"Logitech(R) Precision(TM) Gamepad",
"Wireless Gamepad F710"
]
known_rc_controllers = [
"InterLink Elite",
"RealFlight Interface"
]
def canonicalize_stick_or_yoke_name(flight_control_row):
flight_control_row = clean_up_string_formatting(flight_control_row)
if "Mouse" in flight_control_row:
return "Mouse"
elif "VID:1133PID:49685" in flight_control_row:
return "Logitech Extreme 3D"
elif "WingMan Ext Digital 3D" in flight_control_row:
return "WingMan Extreme Digital 3D"
elif "VID:1699PID:1890" in flight_control_row:
return "Saitek X52"
elif "Wireless 360 Controller" in flight_control_row:
return "XBOX"
elif "VID:121PID:6" in flight_control_row:
return "Generic USB Joystick"
elif "VID:1678PID:49402" in flight_control_row:
return "CH Products (Unknown)"
for control in known_yokes + known_sticks + known_controllers:
if control.lower() in flight_control_row.lower():
return control
if "," in flight_control_row:
return flight_control_row.replace(",", ";")
return flight_control_row
def classify_stick_or_yoke(flight_control_row):
flight_control_row = canonicalize_stick_or_yoke_name(flight_control_row)
if flight_control_row == "Mouse":
return "Mouse"
elif flight_control_row in known_yokes:
return "Yoke"
elif flight_control_row in known_sticks:
return "Joystick"
elif flight_control_row in known_controllers:
return "Gamepad"
elif flight_control_row in known_rc_controllers:
return "RC Controller"
elif "yoke" in flight_control_row.lower():
return "Yoke"
elif "stick" in flight_control_row.lower():
return "Joystick"
elif "pad" in flight_control_row.lower():
return "Gamepad"
else:
return "Unknown"
flight_controls = collections.defaultdict(int)
flight_control_type = collections.defaultdict(int)
has_rudder_pedals = collections.defaultdict(int)
for row in results_rows:
val = str_to_int(row[1])
flight_controls[canonicalize_stick_or_yoke_name(row[0])] += val
flight_control_type[classify_stick_or_yoke(row[0])] += val
row = clean_up_string_formatting(row[0])
if "rudder" in row.lower() or "pedals" in row.lower():
has_rudder_pedals[True] += val
else:
has_rudder_pedals[False] += val
nuke_these_keys = []
for controls, count in flight_controls.items():
if count < 5:
nuke_these_keys.append(controls)
for key in nuke_these_keys:
flight_controls["Other"] += flight_controls[key]
del flight_controls[key]
_log("PRIMARY FLIGHT CONTROLS TYPE")
dump_generic_count_dict(flight_control_type, "Flight Controls Type", "Users")
_log("PRIMARY FLIGHT CONTROLS MODEL (for non-mouse users)")
del flight_controls["Mouse"]
dump_generic_count_dict(flight_controls, "Flight Controls Model", "Users")
_log("USERS FLYING WITH PEDALS")
dump_generic_count_dict(has_rudder_pedals, "Has Pedals?", "Users")
def dump_generic_count_dict(dictionary, label, metric_category):
if SHOW_ABSOLUTE_NUMBERS:
_log(label + ",Num " + metric_category + ",% of All " + metric_category)
else:
_log(label + ",% of All " + metric_category)
total = total_entries_in_dict(dictionary)
sorted_dict = sorted(dictionary.items(), key=operator.itemgetter(1), reverse=True)
for i, label_and_count in enumerate(sorted_dict):
if SHOW_ABSOLUTE_NUMBERS:
_log(','.join([str(label_and_count[0]), str(label_and_count[1]), str((label_and_count[1] / total) * 100) + "%"]))
else:
# Coerce to ASCII
label = clean_up_string_formatting(label_and_count[0])
percent_str = clean_up_string_formatting(str((label_and_count[1] / total) * 100) + u"%")
_log(label, end="")
_log(",", end="")
_log(percent_str)
_log("\n" * 3)
def lower_contains(s: str, check: Iterable[str]) -> bool:
return any(sub.lower() in s.lower() for sub in check)
if __name__ == '__main__':
main()
``` |
{
"source": "2626060Z/tango_with_django_project",
"score": 3
} |
#### File: tango_with_django_project/rango/tests_chapter6.py
```python
import os
import re # We use regular expressions to do more in-depth checks on generated HTML output from views.
import warnings
import importlib
from rango.models import Category, Page
from populate_rango import populate
from django.urls import reverse
from django.test import TestCase
from django.conf import settings
from django.db.models.query import QuerySet
FAILURE_HEADER = f"{os.linesep}{os.linesep}{os.linesep}================{os.linesep}TwD TEST FAILURE =({os.linesep}================{os.linesep}"
FAILURE_FOOTER = f"{os.linesep}"
class Chapter6PopulationScriptTest(TestCase):
"""
A few simple tests to examine whether the population script has been updated to include the requested changes (views for pages).
"""
def setUp(self):
populate()
def test_page_objects_have_views(self):
"""
Checks the basic requirement that all pages must have a positive view count.
"""
pages = Page.objects.filter()
for page in pages:
self.assertTrue(page.views > 0,
f"{FAILURE_HEADER}The page '{page.title}' has a negative/zero view count. The exercises for Chapter 6 stated that all view values must be greater than zero. Update your population script, and try again.{FAILURE_FOOTER}")
class Chapter6IndexViewTests(TestCase):
"""
A series of tests that examine the behaviour of the index view and its corresponding template.
Tests to see if the context dictionary is correctly formed, and whether the response is correct, too.
For these tests, we rely on the populate_rango module. We assume that this is now fully correct and working.
If tests fail and you can't understand why, maybe it's worth checking out your population script!
And yes, we assume that all exercises have been completed, too.
"""
def setUp(self):
populate()
self.response = self.client.get(reverse('rango:index'))
self.content = self.response.content.decode()
def test_template_filename(self):
"""
Still using a template?
"""
self.assertTemplateUsed(self.response, 'rango/index.html',
f"{FAILURE_HEADER}Are you using index.html for your index() view? Why not?!{FAILURE_FOOTER}")
def test_index_context_dictionary(self):
"""
Runs some assertions to check if the context dictionary has the correct key/value pairings.
"""
expected_boldmessage = 'Crunchy, creamy, cookie, candy, cupcake!'
expected_categories_order = list(Category.objects.order_by('-likes')[:5])
expected_pages_order = list(Page.objects.order_by('-views')[
:5]) # From the exercises section of Chapter 6 -- we cannot assume a set order, because the developer can set the number of views to whatever they wish.
# Does the boldmessage still exist? A surprising number of people delete it here.
self.assertTrue('boldmessage' in self.response.context,
f"{FAILURE_HEADER}The 'boldmessage' variable couldn't be found in the context dictionary for the index() view. Did you delete it?{FAILURE_FOOTER}")
self.assertEquals(expected_boldmessage, self.response.context['boldmessage'],
f"{FAILURE_HEADER}Where did {expected_boldmessage} go in the index() view?{FAILURE_FOOTER}")
# Check that categories exists in the context dictionary, that it references the correct objects, and the order is spot on.
self.assertTrue('categories' in self.response.context,
f"{FAILURE_HEADER}We couldn't find a 'categories' variable in the context dictionary within the index() view. Check the instructions in the book, and try again.{FAILURE_FOOTER}")
self.assertEqual(type(self.response.context['categories']), QuerySet,
f"{FAILURE_HEADER}The 'categories' variable in the context dictionary for the index() view didn't return a QuerySet object as expected.{FAILURE_FOOTER}")
self.assertEqual(expected_categories_order, list(self.response.context['categories']),
f"{FAILURE_HEADER}Incorrect categories/category order returned from the index() view's context dictionary -- expected {expected_categories_order}; got {list(self.response.context['categories'])}.{FAILURE_FOOTER}")
# Repeat, but for the pages variable. Note that order cannot be verfified (no instructions in book to use certain values).
self.assertTrue('pages' in self.response.context,
f"{FAILURE_HEADER}We couldn't find a 'pages' variable in the index() view's context dictionary. Did you complete the Chapter 6 exercises?{FAILURE_FOOTER}")
self.assertEqual(type(self.response.context['pages']), QuerySet,
f"{FAILURE_HEADER}The 'pages' variable in the index() view's context dictionary doesn't return a QuerySet as expected.{FAILURE_FOOTER}")
self.assertEqual(expected_pages_order, list(self.response.context['pages']),
f"{FAILURE_HEADER}The 'pages' context dictionary variable for the index() view didn't return the QuerySet we were expectecting: got {list(self.response.context['pages'])}, expected {expected_pages_order}. Did you apply the correct ordering to the filtered results?{FAILURE_FOOTER}")
def test_index_categories(self):
"""
Checks the response generated by the index() view -- does it render the categories correctly?
Regular expressions are used here (yikes) to try and be as fair as possible when checking the markup received from the developer's project.
"""
category_li_entries_regex = [ # 0 = regex match, 1 = title of category, 2 = sanitised markup for error message
[
r'<li>(\s*|\n*)<a(\s+)href(\s*)=(\s*)("/rango/category/python/"|\'/rango/category/python/\')(\s*)>(\s*|\n*)Python(\s*|\n*)</a>(\s*|\n*)</li>',
'Python', '<li><a href="/rango/category/python/">Python</a></li>'],
[
r'<li>(\s*|\n*)<a(\s+)href(\s*)=(\s*)("/rango/category/django/"|\'/rango/category/django/\')(\s*)>(\s*|\n*)Django(\s*|\n*)</a>(\s*|\n*)</li>',
'Django', '<li><a href="/rango/category/django/">Django</a></li>'],
[
r'<li>(\s*|\n*)<a(\s+)href(\s*)=(\s*)("/rango/category/other-frameworks/"|\'/rango/category/other-frameworks/\')(\s*)>(\s*|\n*)Other Frameworks(\s*|\n*)</a>(\s*|\n*)</li>',
'Other Frameworks', '<li><a href="/rango/category/other-frameworks/">Other Frameworks</a></li>'],
]
# Check for the presence of each entry.
for entry in category_li_entries_regex:
self.assertTrue(re.search(entry[0], self.content),
f"{FAILURE_HEADER}We couldn't find the expected markup '{entry[2]}' (for the {entry[1]} category) in the response of your index() view. Check your template, and try again.{FAILURE_FOOTER}")
def test_index_pages(self):
"""
Checks the response generated by the index() view -- does it render the pages correctly?
As you can set view values to whatever you like for pages (in the population script), we need to be a bit more clever working out what five of the pages should be displayed.
"""
page_li_entries_regex = {
'Official Python Tutorial': r'<li>(\s*|\n*)<a(\s+)href(\s*)=(\s*)("http://docs.python.org/3/tutorial/"|\'http://docs.python.org/3/tutorial/\')(\s*)>(\s*|\n*)Official Python Tutorial(\s*|\n*)</a>(\s*|\n*)</li>',
'How to Think like a Computer Scientist': r'<li>(\s*|\n*)<a(\s+)href(\s*)=(\s*)("http://www.greenteapress.com/thinkpython/"|\'http://www.greenteapress.com/thinkpython/\')(\s*)>(\s*|\n*)How to Think like a Computer Scientist(\s*|\n*)</a>(\s*|\n*)</li>',
'Learn Python in 10 Minutes': r'<li>(\s*|\n*)<a(\s+)href(\s*)=(\s*)("http://www.korokithakis.net/tutorials/python/"|\'http://www.korokithakis.net/tutorials/python/\')(\s*)>(\s*|\n*)Learn Python in 10 Minutes(\s*|\n*)</a>(\s*|\n*)</li>',
'Official Django Tutorial': r'<li>(\s*|\n*)<a(\s+)href(\s*)=(\s*)("https://docs.djangoproject.com/en/2.1/intro/tutorial01/"|\'https://docs.djangoproject.com/en/2.1/intro/tutorial01/\')(\s*)>(\s*|\n*)Official Django Tutorial(\s*|\n*)</a>(\s*|\n*)</li>',
'Django Rocks': r'<li>(\s*|\n*)<a(\s+)href(\s*)=(\s*)("http://www.djangorocks.com/"|\'http://www.djangorocks.com/\')(\s*)>(\s*|\n*)Django Rocks(\s*|\n*)</a>(\s*|\n*)</li>',
'How to Tango with Django': r'<li>(\s*|\n*)<a(\s+)href(\s*)=(\s*)("http://www.tangowithdjango.com/"|\'http://www.tangowithdjango.com/\')(\s*)>(\s*|\n*)How to Tango with Django(\s*|\n*)</a>(\s*|\n*)</li>',
'Bottle': r'<li>(\s*|\n*)<a(\s+)href(\s*)=(\s*)("http://bottlepy.org/docs/dev/"|\'http://bottlepy.org/docs/dev/\')(\s*)>(\s*|\n*)Bottle(\s*|\n*)</a>(\s*|\n*)</li>',
'Flask': r'<li>(\s*|\n*)<a(\s+)href(\s*)=(\s*)("http://flask.pocoo.org"|\'http://flask.pocoo.org\')(\s*)>(\s*|\n*)Flask(\s*|\n*)</a>(\s*|\n*)</li>',
}
expected_pages_order = list(Page.objects.order_by('-views')[:5])
expected_pages_li = []
# Populate expected_pages_li, picking out the entry from page_li_entries_regex.
for expected_page in expected_pages_order:
expected_pages_li.append(page_li_entries_regex[expected_page.title])
# Now we have the five entries regex to match, we can loop over and check each one exists.
for expected_regex in expected_pages_li:
print(self.content)
self.assertTrue(re.search(expected_regex, self.content),
f"{FAILURE_HEADER}Checks for the top five pages in the index() view's response failed. Check you are using the correct list of objects, the correct HTML markup, and try again. '{expected_regex}'{FAILURE_FOOTER}")
def test_index_response_titles(self):
"""
Checks whether the correct titles are used (including <h2> tags) for categories and pages.
This is listed as an exercise at the end of Chapter 6.
"""
expected_category_h2 = '<h2>Most Liked Categories</h2>'
expected_page_h2 = '<h2>Most Viewed Pages</h2>'
self.assertIn(expected_category_h2, self.content,
f"{FAILURE_HEADER}We couldn't find the markup '{expected_category_h2}' in your index.html template. Check you completed the Chapter 6 exercises as requested, and try again.{FAILURE_FOOTER}")
self.assertIn(expected_page_h2, self.content,
f"{FAILURE_HEADER}We couldn't find the markup '{expected_page_h2}' in your index.html template. Check you completed the Chapter 6 exercises as requested, and try again.{FAILURE_FOOTER}")
class Chapter6NoItemsIndexViewTests(TestCase):
"""
A few tests to complement the Chapter6IndexViewTests.
This time, we purposefully do not prepopulate the sample database with data from populate_rango.
As such, these tests examine whether the app being tested produces the correct output when no categories/pages are present.
"""
def setUp(self):
self.response = self.client.get(reverse('rango:index'))
self.content = self.response.content.decode()
def test_empty_index_context_dictionary(self):
"""
Runs assertions on the context dictionary, ensuring the categories and pages variables exist, but return empty (zero-length) QuerySet objects.
"""
self.assertTrue('categories' in self.response.context,
f"{FAILURE_HEADER}The 'categories' variable does not exist in the context dictionary for index(). (Empty check){FAILURE_FOOTER}")
self.assertEqual(type(self.response.context['categories']), QuerySet,
f"{FAILURE_HEADER}The 'categories' variable in the context dictionary for index() does yield a QuerySet object. (Empty check){FAILURE_FOOTER}")
self.assertEqual(len(self.response.context['categories']), 0,
f"{FAILURE_HEADER}The 'categories' variable in the context dictionary for index() is not empty. (Empty check){FAILURE_FOOTER}")
self.assertTrue('pages' in self.response.context,
f"{FAILURE_HEADER}The 'pages' variable does not exist in the context dictionary for index(). (Empty check){FAILURE_FOOTER}")
self.assertEqual(type(self.response.context['pages']), QuerySet,
f"{FAILURE_HEADER}The 'pages' variable in the context dictionary for index() does yield a QuerySet object. (Empty check){FAILURE_FOOTER}")
self.assertEqual(len(self.response.context['pages']), 0,
f"{FAILURE_HEADER}The 'pages' variable in the context dictionary for index() is not empty. (Empty check){FAILURE_FOOTER}")
def test_empty_index_response(self):
"""
Checks to see whether the correct messages appear for no categories and pages.
"""
self.assertIn('<strong>There are no categories present.</strong>', self.content,
f"{FAILURE_HEADER}When no categories are present, we can't find the required '<strong>There are no categories present.</strong>' markup in your index() view's output.{FAILURE_FOOTER}")
self.assertIn('<strong>There are no pages present.</strong>', self.content,
f"{FAILURE_HEADER}When no categories are present, we can't find the required '<strong>There are no pages present.</strong>' markup in your index() view's output. Read the Chapter 6 exercises carefully.{FAILURE_FOOTER}")
def test_sample_category(self):
"""
Checks to see if the correct output is displayed when a sample Category object is added.
For this test, we disregard the instance variable response.
"""
Category.objects.get_or_create(name='Test Category')
updated_response = self.client.get(reverse('rango:index')).content.decode()
category_regex = r'<li>(\s*|\n*)<a(\s+)href(\s*)=(\s*)("/rango/category/test-category/"|\'/rango/category/test-category/\')(\s*)>(\s*|\n*)Test Category(\s*|\n*)</a>(\s*|\n*)</li>'
self.assertTrue(re.search(category_regex, updated_response),
f"{FAILURE_HEADER}When adding a test category, we couldn't find the markup for it in the output of the index() view. Check you have included all the code correctly for displaying categories.{FAILURE_FOOTER}")
self.assertIn('<strong>There are no pages present.</strong>', self.content,
f"{FAILURE_HEADER}When no categories are present, we can't find the required '<strong>There are no pages present.</strong>' markup in your index() view's output. Read the Chapter 6 exercises carefully.{FAILURE_FOOTER}")
class Chapter6CategoryViewTests(TestCase):
"""
A series of tests for examining the show_category() view, looking at the context dictionary and rendered response.
We use the 'Other Frameworks' category for these tests to check the slugs work correctly, too.
"""
def setUp(self):
populate()
self.response = self.client.get(
reverse('rango:show_category', kwargs={'category_name_slug': 'other-frameworks'}))
self.content = self.response.content.decode()
def test_template_filename(self):
"""
Still using a template?
"""
self.assertTemplateUsed(self.response, 'rango/category.html',
f"{FAILURE_HEADER}The category.html template is not used for the show_category() view. The specification requires this.{FAILURE_FOOTER}")
def test_slug_functionality(self):
"""
Runs a simple test by changing the name of the "Other Frameworks" category to "Unscrupulous Nonsense".
Checks to see whether the slug updates with the name change.
"""
category = Category.objects.get_or_create(name='Other Frameworks')[0]
category.name = "Unscrupulous Nonsense"
category.save()
self.assertEquals('unscrupulous-nonsense', category.slug,
f"{FAILURE_HEADER}When changing the name of a category, the slug attribute was not updated (correctly) to reflect this change. Did you override the save() method in the Category model correctly?{FAILURE_FOOTER}")
def test_context_dictionary(self):
"""
Given the response, does the context dictionary match up with what is expected?
Is the category object being passed correctly, and are the pages being filtered correctly?
"""
other_frameworks_category = Category.objects.get_or_create(name='Other Frameworks')[0]
page_list = list(Page.objects.filter(category=other_frameworks_category))
self.assertTrue('category' in self.response.context,
f"{FAILURE_HEADER}The 'category' variable in the context dictionary for the show_category() view was not found. Did you spell it correctly?{FAILURE_FOOTER}")
self.assertTrue('pages' in self.response.context,
f"{FAILURE_HEADER}The 'pages' variable in the context dictionary for the show_category() view was not found.{FAILURE_FOOTER}")
self.assertEqual(self.response.context['category'], other_frameworks_category,
f"{FAILURE_HEADER}The category returned in the context dictionary for the show_category() view did not match what was expected. We expect to see a Category object returned here (specifically the 'Other Frameworks' category, for our tests).{FAILURE_FOOTER}")
self.assertEqual(list(self.response.context['pages']), page_list,
f"{FAILURE_HEADER}The list of pages returned in the context dictionary of the show_category() view was not correct. Did you filter the pages correctly in your view?{FAILURE_FOOTER}")
def test_response_markup(self):
"""
Some simple tests to make sure the markup returned is on track. Specifically, we look at the title and list of pages returned.
"""
expected_header = '<h1>Other Frameworks</h1>'
bottle_markup = r'<li>(\s*|\n*)<a(\s+)href(\s*)=(\s*)("http://bottlepy.org/docs/dev/"|\'http://bottlepy.org/docs/dev/\')(\s*)>(\s*|\n*)Bottle(\s*|\n*)</a>(\s*|\n*)</li>'
flask_markup = r'<li>(\s*|\n*)<a(\s+)href(\s*)=(\s*)("http://flask.pocoo.org"|\'http://flask.pocoo.org\')(\s*)>(\s*|\n*)Flask(\s*|\n*)</a>(\s*|\n*)</li>'
self.assertIn(expected_header, self.content,
f"{FAILURE_HEADER}The header tag '{expected_header}' was not found in the response for the show_category() view. Make sure the category.html template matches the specification.{FAILURE_FOOTER}")
self.assertTrue(re.search(bottle_markup, self.content),
f"{FAILURE_HEADER}Correctly formed <li> markup was not found for the pages to be displayed in the show_category() view. Make sure your category.html template is well-formed!{FAILURE_FOOTER}")
self.assertTrue(re.search(flask_markup, self.content),
f"{FAILURE_HEADER}Correctly formed <li> markup was not found for the pages to be displayed in the show_category() view. Make sure your category.html template is well-formed!{FAILURE_FOOTER}")
def test_for_homepage_link(self):
"""
Checks to see if a hyperlink to the homepage is present.
We didn't enforce a strict label for the link; we are more interested here in correct syntax.
"""
homepage_hyperlink_markup = r'<a(\s+)href="/rango/">(\w+)</a>'
self.assertTrue(re.search(homepage_hyperlink_markup, self.content),
f"{FAILURE_HEADER}We couldn't find a well-formed hyperlink to the Rango homepage in your category.html template. This is an exercise at the end of Chapter 6.{FAILURE_FOOTER}")
class Chapter6BadCategoryViewTests(TestCase):
"""
A few tests to examine some edge cases where categories do not exist, for example.
"""
def test_malformed_url(self):
"""
Tests to see whether the URL patterns have been correctly entered; many students have fallen over at this one.
Somehow.
"""
response = self.client.get('/rango/category/')
self.assertTrue(response.status_code == 404,
f"{FAILURE_HEADER}The URL /rango/category/ should return a status of code of 404 (not found). Check to see whether you have correctly entered your urlpatterns.{FAILURE_FOOTER}")
def test_nonexistent_category(self):
"""
Attempts to lookup a category that does not exist in the database and checks the response.
"""
response = self.client.get(
reverse('rango:show_category', kwargs={'category_name_slug': 'nonexistent-category'}))
lookup_string = 'The specified category does not exist.'
self.assertIn(lookup_string, response.content.decode(),
r"{FAILURE_HEADER}The expected message when attempting to access a non-existent category was not found. Check your category.html template.{FAILURE_FOOTER}")
def test_empty_category(self):
"""
Adds a Category without pages; checks to see what the response is.
"""
category = Category.objects.get_or_create(name='Test Category')
response = self.client.get(reverse('rango:show_category', kwargs={'category_name_slug': 'test-category'}))
lookup_string = '<strong>No pages currently in category.</strong>'
self.assertIn(lookup_string, response.content.decode(),
r"{FAILURE_HEADER}The expected message when accessing a category without pages was not found. Check your category.html template.{FAILURE_FOOTER}")
``` |
{
"source": "262877348/Data",
"score": 3
} |
#### File: Packages/Default/duplicate_line.py
```python
import sublime_plugin
class DuplicateLineCommand(sublime_plugin.TextCommand):
def run(self, edit):
for region in self.view.sel():
if region.empty():
line = self.view.line(region)
line_contents = self.view.substr(line) + '\n'
self.view.insert(edit, line.begin(), line_contents)
else:
self.view.insert(edit, region.begin(), self.view.substr(region))
```
#### File: Packages/Default/swap_line.py
```python
import sublime
import sublime_plugin
def expand_to_line(view, region):
"""
As view.full_line, but doesn't expand to the next line if a full line is
already selected
"""
if not (region.a == region.b) and view.substr(region.end() - 1) == '\n':
return sublime.Region(view.line(region).begin(), region.end())
else:
return view.full_line(region)
def extract_line_blocks(view):
blocks = [expand_to_line(view, s) for s in view.sel()]
if len(blocks) == 0:
return blocks
# merge any adjacent blocks
merged_blocks = [blocks[0]]
for block in blocks[1:]:
last_block = merged_blocks[-1]
if block.begin() <= last_block.end():
merged_blocks[-1] = sublime.Region(last_block.begin(), block.end())
else:
merged_blocks.append(block)
return merged_blocks
class SwapLineUpCommand(sublime_plugin.TextCommand):
def run(self, edit):
blocks = extract_line_blocks(self.view)
# No selection
if len(blocks) == 0:
return
# Already at BOF
if blocks[0].begin() == 0:
return
# Add a trailing newline if required, the logic is simpler if every line
# ends with a newline
add_trailing_newline = (self.view.substr(self.view.size() - 1) != '\n') and blocks[-1].b == self.view.size()
if add_trailing_newline:
# The insert can cause the selection to move. This isn't wanted, so
# reset the selection if it has moved to EOF
sel = [r for r in self.view.sel()]
self.view.insert(edit, self.view.size(), '\n')
if self.view.sel()[-1].end() == self.view.size():
# Selection has moved, restore the previous selection
self.view.sel().clear()
for r in sel:
self.view.sel().add(r)
# Fix up any block that should now include this newline
blocks[-1] = sublime.Region(blocks[-1].a, blocks[-1].b + 1)
# Process in reverse order
blocks.reverse()
for b in blocks:
prev_line = self.view.full_line(b.begin() - 1)
self.view.insert(edit, b.end(), self.view.substr(prev_line))
self.view.erase(edit, prev_line)
if add_trailing_newline:
# Remove the added newline
self.view.erase(edit, sublime.Region(self.view.size() - 1, self.view.size()))
# Ensure the selection is visible
self.view.show(self.view.sel(), False)
class SwapLineDownCommand(sublime_plugin.TextCommand):
def run(self, edit):
blocks = extract_line_blocks(self.view)
# No selection
if len(blocks) == 0:
return
# Already at EOF
if blocks[-1].end() == self.view.size():
return
# Add a trailing newline if required, the logic is simpler if every line
# ends with a newline
add_trailing_newline = (self.view.substr(self.view.size() - 1) != '\n')
if add_trailing_newline:
# No block can be at EOF (checked above), so no need to fix up the
# blocks
self.view.insert(edit, self.view.size(), '\n')
# Process in reverse order
blocks.reverse()
for b in blocks:
next_line = self.view.full_line(b.end())
contents = self.view.substr(next_line)
self.view.erase(edit, next_line)
self.view.insert(edit, b.begin(), contents)
if add_trailing_newline:
# Remove the added newline
self.view.erase(edit, sublime.Region(self.view.size() - 1, self.view.size()))
# Ensure the selection is visible
self.view.show(self.view.sel(), False)
```
#### File: Packages/Default/trim_trailing_white_space.py
```python
import sublime_plugin
class TrimTrailingWhiteSpaceCommand(sublime_plugin.TextCommand):
def run(self, edit):
trailing_white_space = self.view.find_all("[\t ]+$")
trailing_white_space.reverse()
for r in trailing_white_space:
self.view.erase(edit, r)
class TrimTrailingWhiteSpace(sublime_plugin.EventListener):
def on_pre_save(self, view):
if view.settings().get("trim_trailing_white_space_on_save") is True:
view.run_command("trim_trailing_white_space")
class EnsureNewlineAtEofCommand(sublime_plugin.TextCommand):
def run(self, edit):
if self.view.size() > 0 and self.view.substr(self.view.size() - 1) != '\n':
self.view.insert(edit, self.view.size(), "\n")
class EnsureNewlineAtEof(sublime_plugin.EventListener):
def on_pre_save(self, view):
if view.settings().get("ensure_newline_at_eof_on_save") is True:
if view.size() > 0 and view.substr(view.size() - 1) != '\n':
view.run_command("ensure_newline_at_eof")
```
#### File: Packages/Tag/tag_classes.py
```python
import sublime, sublime_plugin
import re
class TagClassesCommand(sublime_plugin.TextCommand):
def run(self, edit):
data = ''
classes = []
for region in self.view.sel():
if region.empty():
continue
data += self.view.substr(sublime.Region(region.begin(), region.end()))
if not data:
data += self.view.substr(sublime.Region(0, self.view.size()))
if data:
re_classes = (" ".join(re.compile('class="([^"]+)"').findall(data))).split()
for item in re_classes:
item = item.strip()
if '.'+item+' {}' not in classes:
classes.append('.'+item+' {}')
if classes:
s = sublime.load_settings('Tag Package.sublime-settings')
if s.get('tag_classes_sort', False):
classes.sort()
sublime.set_clipboard("\n".join(classes))
sublime.status_message("CSS Classes Copied to Clipboard")
``` |
{
"source": "262986832/pulsar",
"score": 2
} |
#### File: pulsar-client-cpp/python/schema_test.py
```python
from unittest import TestCase, main
import fastavro
import pulsar
from pulsar.schema import *
from enum import Enum
import json
from fastavro.schema import load_schema
class SchemaTest(TestCase):
serviceUrl = 'pulsar://localhost:6650'
def test_simple(self):
class Color(Enum):
red = 1
green = 2
blue = 3
class Example(Record):
_sorted_fields = True
a = String()
b = Integer()
c = Array(String())
d = Color
e = Boolean()
f = Float()
g = Double()
h = Bytes()
i = Map(String())
j = CustomEnum(Color)
fastavro.parse_schema(Example.schema())
self.assertEqual(Example.schema(), {
"name": "Example",
"type": "record",
"fields": [
{"name": "a", "type": ["null", "string"]},
{"name": "b", "type": ["null", "int"]},
{"name": "c", "type": ["null", {
"type": "array",
"items": "string"}]
},
{"name": "d",
"type": ["null", {
"type": "enum",
"name": "Color",
"symbols": ["red", "green", "blue"]}]
},
{"name": "e", "type": ["null", "boolean"]},
{"name": "f", "type": ["null", "float"]},
{"name": "g", "type": ["null", "double"]},
{"name": "h", "type": ["null", "bytes"]},
{"name": "i", "type": ["null", {
"type": "map",
"values": "string"}]
},
{"name": "j", "type": ["null", "Color"]}
]
})
def test_complex(self):
class Color(Enum):
red = 1
green = 2
blue = 3
class MySubRecord(Record):
_sorted_fields = True
x = Integer()
y = Long()
z = String()
color = CustomEnum(Color)
class Example(Record):
_sorted_fields = True
a = String()
sub = MySubRecord # Test with class
sub2 = MySubRecord() # Test with instance
fastavro.parse_schema(Example.schema())
self.assertEqual(Example.schema(), {
"name": "Example",
"type": "record",
"fields": [
{"name": "a", "type": ["null", "string"]},
{"name": "sub",
"type": ["null", {
"name": "MySubRecord",
"type": "record",
"fields": [
{'name': 'color', 'type': ['null', {'type': 'enum', 'name': 'Color', 'symbols':
['red', 'green', 'blue']}]},
{"name": "x", "type": ["null", "int"]},
{"name": "y", "type": ["null", "long"]},
{"name": "z", "type": ["null", "string"]}]
}]
},
{"name": "sub2",
"type": ["null", 'MySubRecord']
}
]
})
def test_complex_with_required_fields(self):
class MySubRecord(Record):
x = Integer(required=True)
y = Long(required=True)
z = String()
class Example(Record):
a = String(required=True)
sub = MySubRecord(required=True)
self.assertEqual(Example.schema(), {
"name": "Example",
"type": "record",
"fields": [
{"name": "a", "type": "string"},
{"name": "sub",
"type": {
"name": "MySubRecord",
"type": "record",
"fields": [{"name": "x", "type": "int"},
{"name": "y", "type": "long"},
{"name": "z", "type": ["null", "string"]}]
}
},
]
})
def test_invalid_enum(self):
class Color:
red = 1
green = 2
blue = 3
class InvalidEnum(Record):
a = Integer()
b = Color
# Enum will be ignored
self.assertEqual(InvalidEnum.schema(),
{'name': 'InvalidEnum', 'type': 'record',
'fields': [{'name': 'a', 'type': ["null", 'int']}]})
def test_initialization(self):
class Example(Record):
a = Integer()
b = Integer()
r = Example(a=1, b=2)
self.assertEqual(r.a, 1)
self.assertEqual(r.b, 2)
r.b = 5
self.assertEqual(r.b, 5)
# Setting non-declared field should fail
try:
r.c = 3
self.fail('Should have failed')
except AttributeError:
# Expected
pass
try:
Record(a=1, c=8)
self.fail('Should have failed')
except AttributeError:
# Expected
pass
except TypeError:
# Expected
pass
def _expectTypeError(self, func):
try:
func()
self.fail('Should have failed')
except TypeError:
# Expected
pass
def test_field_type_check(self):
class Example(Record):
a = Integer()
b = String(required=False)
self._expectTypeError(lambda: Example(a=1, b=2))
class E2(Record):
a = Boolean()
E2(a=False) # ok
self._expectTypeError(lambda: E2(a=1))
class E3(Record):
a = Float()
E3(a=1.0) # Ok
self._expectTypeError(lambda: E3(a=1))
class E4(Record):
a = Null()
E4(a=None) # Ok
self._expectTypeError(lambda: E4(a=1))
class E5(Record):
a = Long()
E5(a=1234) # Ok
self._expectTypeError(lambda: E5(a=1.12))
class E6(Record):
a = String()
E6(a="hello") # Ok
self._expectTypeError(lambda: E5(a=1.12))
class E6(Record):
a = Bytes()
E6(a="hello".encode('utf-8')) # Ok
self._expectTypeError(lambda: E5(a=1.12))
class E7(Record):
a = Double()
E7(a=1.0) # Ok
self._expectTypeError(lambda: E3(a=1))
class Color(Enum):
red = 1
green = 2
blue = 3
class OtherEnum(Enum):
red = 1
green = 2
blue = 3
class E8(Record):
a = Color
e = E8(a=Color.red) # Ok
self.assertEqual(e.a, Color.red)
e = E8(a='red') # Ok
self.assertEqual(e.a, Color.red)
e = E8(a=1) # Ok
self.assertEqual(e.a, Color.red)
self._expectTypeError(lambda: E8(a='redx'))
self._expectTypeError(lambda: E8(a=OtherEnum.red))
self._expectTypeError(lambda: E8(a=5))
class E9(Record):
a = Array(String())
E9(a=['a', 'b', 'c']) # Ok
self._expectTypeError(lambda: E9(a=1))
self._expectTypeError(lambda: E9(a=[1, 2, 3]))
self._expectTypeError(lambda: E9(a=['1', '2', 3]))
class E10(Record):
a = Map(Integer())
E10(a={'a': 1, 'b': 2}) # Ok
self._expectTypeError(lambda: E10(a=1))
self._expectTypeError(lambda: E10(a={'a': '1', 'b': 2}))
self._expectTypeError(lambda: E10(a={1: 1, 'b': 2}))
class SubRecord1(Record):
s = Integer()
class SubRecord2(Record):
s = String()
class E11(Record):
a = SubRecord1
E11(a=SubRecord1(s=1)) # Ok
self._expectTypeError(lambda: E11(a=1))
self._expectTypeError(lambda: E11(a=SubRecord2(s='hello')))
def test_field_type_check_defaults(self):
try:
class Example(Record):
a = Integer(default="xyz")
self.fail("Class declaration should have failed")
except TypeError:
pass # Expected
def test_serialize_json(self):
class Example(Record):
a = Integer()
b = Integer()
self.assertEqual(Example.schema(), {
"name": "Example",
"type": "record",
"fields": [
{"name": "a", "type": ["null", "int"]},
{"name": "b", "type": ["null", "int"]},
]
})
s = JsonSchema(Example)
r = Example(a=1, b=2)
data = s.encode(r)
self.assertEqual(json.loads(data), {'a': 1, 'b': 2})
r2 = s.decode(data)
self.assertEqual(r2.__class__.__name__, 'Example')
self.assertEqual(r2, r)
def test_serialize_avro(self):
class Example(Record):
a = Integer()
b = Integer()
self.assertEqual(Example.schema(), {
"name": "Example",
"type": "record",
"fields": [
{"name": "a", "type": ["null", "int"]},
{"name": "b", "type": ["null", "int"]},
]
})
s = AvroSchema(Example)
r = Example(a=1, b=2)
data = s.encode(r)
r2 = s.decode(data)
self.assertEqual(r2.__class__.__name__, 'Example')
self.assertEqual(r2, r)
def test_non_sorted_fields(self):
class T1(Record):
a = Integer()
b = Integer()
c = Double()
d = String()
class T2(Record):
b = Integer()
a = Integer()
d = String()
c = Double()
self.assertNotEqual(T1.schema()['fields'], T2.schema()['fields'])
def test_sorted_fields(self):
class T1(Record):
_sorted_fields = True
a = Integer()
b = Integer()
class T2(Record):
_sorted_fields = True
b = Integer()
a = Integer()
self.assertEqual(T1.schema()['fields'], T2.schema()['fields'])
def test_schema_version(self):
class Example(Record):
a = Integer()
b = Integer()
client = pulsar.Client(self.serviceUrl)
producer = client.create_producer(
'my-avro-python-schema-version-topic',
schema=AvroSchema(Example))
consumer = client.subscribe('my-avro-python-schema-version-topic', 'sub-1',
schema=AvroSchema(Example))
r = Example(a=1, b=2)
producer.send(r)
msg = consumer.receive()
self.assertIsNotNone(msg.schema_version())
self.assertEquals(b'\x00\x00\x00\x00\x00\x00\x00\x00', msg.schema_version().encode())
self.assertEqual(r, msg.value())
client.close()
def test_serialize_wrong_types(self):
class Example(Record):
a = Integer()
b = Integer()
class Foo(Record):
x = Integer()
y = Integer()
s = JsonSchema(Example)
try:
data = s.encode(Foo(x=1, y=2))
self.fail('Should have failed')
except TypeError:
pass # expected
try:
data = s.encode('hello')
self.fail('Should have failed')
except TypeError:
pass # expected
def test_defaults(self):
class Example(Record):
a = Integer(default=5)
b = Integer()
c = String(default='hello')
r = Example()
self.assertEqual(r.a, 5)
self.assertEqual(r.b, None)
self.assertEqual(r.c, 'hello')
def test_none_value(self):
"""
The objective of the test is to check that if no value is assigned to the attribute, the validation is returning
the expect default value as defined in the Field class
"""
class Example(Record):
a = Null()
b = Boolean()
c = Integer()
d = Long()
e = Float()
f = Double()
g = Bytes()
h = String()
r = Example()
self.assertIsNone(r.a)
self.assertFalse(r.b)
self.assertIsNone(r.c)
self.assertIsNone(r.d)
self.assertIsNone(r.e)
self.assertIsNone(r.f)
self.assertIsNone(r.g)
self.assertIsNone(r.h)
####
def test_json_schema(self):
class Example(Record):
a = Integer()
b = Integer()
# Incompatible variation of the class
class BadExample(Record):
a = String()
b = Integer()
client = pulsar.Client(self.serviceUrl)
producer = client.create_producer(
'my-json-python-topic',
schema=JsonSchema(Example))
# Validate that incompatible schema is rejected
try:
client.subscribe('my-json-python-topic', 'sub-1',
schema=JsonSchema(BadExample))
self.fail('Should have failed')
except Exception as e:
pass # Expected
try:
client.subscribe('my-json-python-topic', 'sub-1',
schema=StringSchema(BadExample))
self.fail('Should have failed')
except Exception as e:
pass # Expected
try:
client.subscribe('my-json-python-topic', 'sub-1',
schema=AvroSchema(BadExample))
self.fail('Should have failed')
except Exception as e:
pass # Expected
consumer = client.subscribe('my-json-python-topic', 'sub-1',
schema=JsonSchema(Example))
r = Example(a=1, b=2)
producer.send(r)
msg = consumer.receive()
self.assertEqual(r, msg.value())
producer.close()
consumer.close()
client.close()
def test_string_schema(self):
client = pulsar.Client(self.serviceUrl)
producer = client.create_producer(
'my-string-python-topic',
schema=StringSchema())
# Validate that incompatible schema is rejected
try:
class Example(Record):
a = Integer()
b = Integer()
client.create_producer('my-string-python-topic',
schema=JsonSchema(Example))
self.fail('Should have failed')
except Exception as e:
pass # Expected
consumer = client.subscribe('my-string-python-topic', 'sub-1',
schema=StringSchema())
producer.send("Hello")
msg = consumer.receive()
self.assertEqual("Hello", msg.value())
self.assertEqual(b"Hello", msg.data())
client.close()
def test_bytes_schema(self):
client = pulsar.Client(self.serviceUrl)
producer = client.create_producer(
'my-bytes-python-topic',
schema=BytesSchema())
# Validate that incompatible schema is rejected
try:
class Example(Record):
a = Integer()
b = Integer()
client.create_producer('my-bytes-python-topic',
schema=JsonSchema(Example))
self.fail('Should have failed')
except Exception as e:
pass # Expected
consumer = client.subscribe('my-bytes-python-topic', 'sub-1',
schema=BytesSchema())
producer.send(b"Hello")
msg = consumer.receive()
self.assertEqual(b"Hello", msg.value())
client.close()
def test_avro_schema(self):
class Example(Record):
a = Integer()
b = Integer()
# Incompatible variation of the class
class BadExample(Record):
a = String()
b = Integer()
client = pulsar.Client(self.serviceUrl)
producer = client.create_producer(
'my-avro-python-topic',
schema=AvroSchema(Example))
# Validate that incompatible schema is rejected
try:
client.subscribe('my-avro-python-topic', 'sub-1',
schema=AvroSchema(BadExample))
self.fail('Should have failed')
except Exception as e:
pass # Expected
try:
client.subscribe('my-avro-python-topic', 'sub-2',
schema=JsonSchema(Example))
self.fail('Should have failed')
except Exception as e:
pass # Expected
consumer = client.subscribe('my-avro-python-topic', 'sub-3',
schema=AvroSchema(Example))
r = Example(a=1, b=2)
producer.send(r)
msg = consumer.receive()
self.assertEqual(r, msg.value())
producer.close()
consumer.close()
client.close()
def test_json_enum(self):
class MyEnum(Enum):
A = 1
B = 2
C = 3
class Example(Record):
name = String()
v = MyEnum
w = CustomEnum(MyEnum)
x = CustomEnum(MyEnum, required=True, default=MyEnum.A, required_default=True)
topic = 'my-json-enum-topic'
client = pulsar.Client(self.serviceUrl)
producer = client.create_producer(
topic=topic,
schema=JsonSchema(Example))
consumer = client.subscribe(topic, 'test',
schema=JsonSchema(Example))
r = Example(name='test', v=MyEnum.C, w=MyEnum.B)
producer.send(r)
msg = consumer.receive()
self.assertEqual('test', msg.value().name)
self.assertEqual(MyEnum.C, MyEnum(msg.value().v))
self.assertEqual(MyEnum.B, MyEnum(msg.value().w))
self.assertEqual(MyEnum.A, MyEnum(msg.value().x))
client.close()
def test_avro_enum(self):
class MyEnum(Enum):
A = 1
B = 2
C = 3
class Example(Record):
name = String()
v = MyEnum
w = CustomEnum(MyEnum)
x = CustomEnum(MyEnum, required=True, default=MyEnum.B, required_default=True)
topic = 'my-avro-enum-topic'
client = pulsar.Client(self.serviceUrl)
producer = client.create_producer(
topic=topic,
schema=AvroSchema(Example))
consumer = client.subscribe(topic, 'test',
schema=AvroSchema(Example))
r = Example(name='test', v=MyEnum.C, w=MyEnum.A)
producer.send(r)
msg = consumer.receive()
msg.value()
self.assertEqual(MyEnum.C, msg.value().v)
self.assertEqual(MyEnum.A, MyEnum(msg.value().w))
self.assertEqual(MyEnum.B, MyEnum(msg.value().x))
client.close()
def test_avro_map_array(self):
class MapArray(Record):
values = Map(Array(Integer()))
class MapMap(Record):
values = Map(Map(Integer()))
class ArrayMap(Record):
values = Array(Map(Integer()))
class ArrayArray(Record):
values = Array(Array(Integer()))
topic_prefix = "my-avro-map-array-topic-"
data_list = (
(topic_prefix + "0", AvroSchema(MapArray),
MapArray(values={"A": [1, 2], "B": [3]})),
(topic_prefix + "1", AvroSchema(MapMap),
MapMap(values={"A": {"B": 2},})),
(topic_prefix + "2", AvroSchema(ArrayMap),
ArrayMap(values=[{"A": 1}, {"B": 2}, {"C": 3}])),
(topic_prefix + "3", AvroSchema(ArrayArray),
ArrayArray(values=[[1, 2, 3], [4]])),
)
client = pulsar.Client(self.serviceUrl)
for data in data_list:
topic = data[0]
schema = data[1]
record = data[2]
producer = client.create_producer(topic, schema=schema)
consumer = client.subscribe(topic, 'sub', schema=schema)
producer.send(record)
msg = consumer.receive()
self.assertEqual(msg.value().values, record.values)
consumer.acknowledge(msg)
consumer.close()
producer.close()
client.close()
def test_avro_required_default(self):
class MySubRecord(Record):
_sorted_fields = True
x = Integer()
y = Long()
z = String()
class Example(Record):
a = Integer()
b = Boolean(required=True)
c = Long()
d = Float()
e = Double()
f = String()
g = Bytes()
h = Array(String())
i = Map(String())
j = MySubRecord()
class ExampleRequiredDefault(Record):
_sorted_fields = True
a = Integer(required_default=True)
b = Boolean(required=True, required_default=True)
c = Long(required_default=True)
d = Float(required_default=True)
e = Double(required_default=True)
f = String(required_default=True)
g = Bytes(required_default=True)
h = Array(String(), required_default=True)
i = Map(String(), required_default=True)
j = MySubRecord(required_default=True)
self.assertEqual(ExampleRequiredDefault.schema(), {
"name": "ExampleRequiredDefault",
"type": "record",
"fields": [
{
"name": "a",
"type": [
"null",
"int"
],
"default": None
},
{
"name": "b",
"type": "boolean",
"default": False
},
{
"name": "c",
"type": [
"null",
"long"
],
"default": None
},
{
"name": "d",
"type": [
"null",
"float"
],
"default": None
},
{
"name": "e",
"type": [
"null",
"double"
],
"default": None
},
{
"name": "f",
"type": [
"null",
"string"
],
"default": None
},
{
"name": "g",
"type": [
"null",
"bytes"
],
"default": None
},
{
"name": "h",
"type": [
"null",
{
"type": "array",
"items": "string"
}
],
"default": None
},
{
"name": "i",
"type": [
"null",
{
"type": "map",
"values": "string"
}
],
"default": None
},
{
"name": "j",
"type": [
"null",
{
"name": "MySubRecord",
"type": "record",
"fields": [
{
"name": "x",
"type": [
"null",
"int"
]
},
{
"name": "y",
"type": [
"null",
"long"
],
},
{
"name": "z",
"type": [
"null",
"string"
]
}
]
}
],
"default": None
}
]
})
client = pulsar.Client(self.serviceUrl)
producer = client.create_producer(
'my-avro-python-default-topic',
schema=AvroSchema(Example))
producer_default = client.create_producer(
'my-avro-python-default-topic',
schema=AvroSchema(ExampleRequiredDefault))
producer.close()
producer_default.close()
client.close()
def test_default_value(self):
class MyRecord(Record):
A = Integer()
B = String()
C = Boolean(default=True, required=True)
D = Double(default=6.4)
topic = "my-default-value-topic"
client = pulsar.Client(self.serviceUrl)
producer = client.create_producer(
topic=topic,
schema=JsonSchema(MyRecord))
consumer = client.subscribe(topic, 'test', schema=JsonSchema(MyRecord))
r = MyRecord(A=5, B="text")
producer.send(r)
msg = consumer.receive()
self.assertEqual(msg.value().A, 5)
self.assertEqual(msg.value().B, u'text')
self.assertEqual(msg.value().C, True)
self.assertEqual(msg.value().D, 6.4)
producer.close()
consumer.close()
client.close()
def test_serialize_schema_complex(self):
class Color(Enum):
red = 1
green = 2
blue = 3
class NestedObj1(Record):
_sorted_fields = True
na1 = String()
nb1 = Double()
class NestedObj2(Record):
_sorted_fields = True
na2 = Integer()
nb2 = Boolean()
nc2 = NestedObj1()
class NestedObj3(Record):
_sorted_fields = True
color = CustomEnum(Color)
na3 = Integer()
class NestedObj4(Record):
_avro_namespace = 'xxx4'
_sorted_fields = True
na4 = String()
nb4 = Integer()
class ComplexRecord(Record):
_avro_namespace = 'xxx.xxx'
_sorted_fields = True
a = Integer()
b = Integer()
color = Color
color2 = Color
color3 = CustomEnum(Color, required=True, default=Color.red, required_default=True)
nested = NestedObj2()
nested2 = NestedObj2()
mapNested = Map(NestedObj3())
mapNested2 = Map(NestedObj3())
arrayNested = Array(NestedObj4())
arrayNested2 = Array(NestedObj4())
print('complex schema: ', ComplexRecord.schema())
self.assertEqual(ComplexRecord.schema(), {
"name": "ComplexRecord",
"namespace": "xxx.xxx",
"type": "record",
"fields": [
{"name": "a", "type": ["null", "int"]},
{'name': 'arrayNested', 'type': ['null', {'type': 'array', 'items':
{'name': 'NestedObj4', 'namespace': 'xxx4', 'type': 'record', 'fields': [
{'name': 'na4', 'type': ['null', 'string']},
{'name': 'nb4', 'type': ['null', 'int']}
]}}
]},
{'name': 'arrayNested2', 'type': ['null', {'type': 'array', 'items': 'xxx4.NestedObj4'}]},
{"name": "b", "type": ["null", "int"]},
{'name': 'color', 'type': ['null', {'type': 'enum', 'name': 'Color', 'symbols': [
'red', 'green', 'blue']}]},
{'name': 'color2', 'type': ['null', 'Color']},
{'name': 'color3', 'default': 'red', 'type': 'Color'},
{'name': 'mapNested', 'type': ['null', {'type': 'map', 'values':
{'name': 'NestedObj3', 'type': 'record', 'fields': [
{'name': 'color', 'type': ['null', 'Color']},
{'name': 'na3', 'type': ['null', 'int']}
]}}
]},
{'name': 'mapNested2', 'type': ['null', {'type': 'map', 'values': 'NestedObj3'}]},
{"name": "nested", "type": ['null', {'name': 'NestedObj2', 'type': 'record', 'fields': [
{'name': 'na2', 'type': ['null', 'int']},
{'name': 'nb2', 'type': ['null', 'boolean']},
{'name': 'nc2', 'type': ['null', {'name': 'NestedObj1', 'type': 'record', 'fields': [
{'name': 'na1', 'type': ['null', 'string']},
{'name': 'nb1', 'type': ['null', 'double']}
]}]}
]}]},
{"name": "nested2", "type": ['null', 'NestedObj2']}
]
})
def encode_and_decode(schema_type):
data_schema = AvroSchema(ComplexRecord)
if schema_type == 'json':
data_schema = JsonSchema(ComplexRecord)
nested_obj1 = NestedObj1(na1='na1 value', nb1=20.5)
nested_obj2 = NestedObj2(na2=22, nb2=True, nc2=nested_obj1)
r = ComplexRecord(a=1, b=2, color=Color.red, color2=Color.blue,
nested=nested_obj2, nested2=nested_obj2,
mapNested={
'a': NestedObj3(na3=1, color=Color.green),
'b': NestedObj3(na3=2),
'c': NestedObj3(na3=3, color=Color.red)
}, mapNested2={
'd': NestedObj3(na3=4, color=Color.red),
'e': NestedObj3(na3=5, color=Color.blue),
'f': NestedObj3(na3=6)
}, arrayNested=[
NestedObj4(na4='value na4 1', nb4=100),
NestedObj4(na4='value na4 2', nb4=200)
], arrayNested2=[
NestedObj4(na4='value na4 3', nb4=300),
NestedObj4(na4='value na4 4', nb4=400)
])
data_encode = data_schema.encode(r)
data_decode = data_schema.decode(data_encode)
self.assertEqual(data_decode.__class__.__name__, 'ComplexRecord')
self.assertEqual(data_decode, r)
self.assertEqual(r.color3, Color.red)
self.assertEqual(r.mapNested['a'].color, Color.green)
self.assertEqual(r.mapNested['b'].color, None)
print('Encode and decode complex schema finish. schema_type: ', schema_type)
encode_and_decode('avro')
encode_and_decode('json')
def test_sub_record_set_to_none(self):
class NestedObj1(Record):
na1 = String()
nb1 = Double()
class NestedObj2(Record):
na2 = Integer()
nb2 = Boolean()
nc2 = NestedObj1()
data_schema = AvroSchema(NestedObj2)
r = NestedObj2(na2=1, nb2=True)
data_encode = data_schema.encode(r)
data_decode = data_schema.decode(data_encode)
self.assertEqual(data_decode.__class__.__name__, 'NestedObj2')
self.assertEqual(data_decode, r)
self.assertEqual(data_decode.na2, 1)
self.assertTrue(data_decode.nb2)
def test_produce_and_consume_complex_schema_data(self):
class Color(Enum):
red = 1
green = 2
blue = 3
class NestedObj1(Record):
na1 = String()
nb1 = Double()
class NestedObj2(Record):
na2 = Integer()
nb2 = Boolean()
nc2 = NestedObj1()
class NestedObj3(Record):
na3 = Integer()
color = CustomEnum(Color, required=True, required_default=True, default=Color.blue)
class NestedObj4(Record):
na4 = String()
nb4 = Integer()
class ComplexRecord(Record):
a = Integer()
b = Integer()
color = CustomEnum(Color)
nested = NestedObj2()
mapNested = Map(NestedObj3())
arrayNested = Array(NestedObj4())
client = pulsar.Client(self.serviceUrl)
def produce_consume_test(schema_type):
topic = "my-complex-schema-topic-" + schema_type
data_schema = AvroSchema(ComplexRecord)
if schema_type == 'json':
data_schema= JsonSchema(ComplexRecord)
producer = client.create_producer(
topic=topic,
schema=data_schema)
consumer = client.subscribe(topic, 'test', schema=data_schema)
nested_obj1 = NestedObj1(na1='na1 value', nb1=20.5)
nested_obj2 = NestedObj2(na2=22, nb2=True, nc2=nested_obj1)
r = ComplexRecord(a=1, b=2, nested=nested_obj2, mapNested={
'a': NestedObj3(na3=1, color=Color.red),
'b': NestedObj3(na3=2, color=Color.green),
'c': NestedObj3(na3=3)
}, arrayNested=[
NestedObj4(na4='value na4 1', nb4=100),
NestedObj4(na4='value na4 2', nb4=200)
])
producer.send(r)
msg = consumer.receive()
value = msg.value()
self.assertEqual(value.__class__.__name__, 'ComplexRecord')
self.assertEqual(value, r)
print('Produce and consume complex schema data finish. schema_type', schema_type)
produce_consume_test('avro')
produce_consume_test('json')
client.close()
def custom_schema_test(self):
def encode_and_decode(schema_definition):
avro_schema = AvroSchema(None, schema_definition=schema_definition)
company = {
"name": "company-name",
"address": 'xxx road xxx street',
"employees": [
{"name": "user1", "age": 25},
{"name": "user2", "age": 30},
{"name": "user3", "age": 35},
],
"labels": {
"industry": "software",
"scale": ">100",
"funds": "1000000.0"
},
"companyType": "companyType1"
}
data = avro_schema.encode(company)
company_decode = avro_schema.decode(data)
self.assertEqual(company, company_decode)
schema_definition = {
'doc': 'this is doc',
'namespace': 'example.avro',
'type': 'record',
'name': 'Company',
'fields': [
{'name': 'name', 'type': ['null', 'string']},
{'name': 'address', 'type': ['null', 'string']},
{'name': 'employees', 'type': ['null', {'type': 'array', 'items': {
'type': 'record',
'name': 'Employee',
'fields': [
{'name': 'name', 'type': ['null', 'string']},
{'name': 'age', 'type': ['null', 'int']}
]
}}]},
{'name': 'labels', 'type': ['null', {'type': 'map', 'values': 'string'}]},
{'name': 'companyType', 'type': ['null', {'type': 'enum', 'name': 'CompanyType', 'symbols':
['companyType1', 'companyType2', 'companyType3']}]}
]
}
encode_and_decode(schema_definition)
# Users could load schema from file by `fastavro.schema`
# Or use `avro.schema` like this `avro.schema.parse(open("examples/company.avsc", "rb").read()).to_json()`
encode_and_decode(load_schema("examples/company.avsc"))
def custom_schema_produce_and_consume_test(self):
client = pulsar.Client(self.serviceUrl)
def produce_and_consume(topic, schema_definition):
print('custom schema produce and consume test topic - ', topic)
example_avro_schema = AvroSchema(None, schema_definition=schema_definition)
producer = client.create_producer(
topic=topic,
schema=example_avro_schema)
consumer = client.subscribe(topic, 'test', schema=example_avro_schema)
for i in range(0, 10):
company = {
"name": "company-name" + str(i),
"address": 'xxx road xxx street ' + str(i),
"employees": [
{"name": "user" + str(i), "age": 20 + i},
{"name": "user" + str(i), "age": 30 + i},
{"name": "user" + str(i), "age": 35 + i},
],
"labels": {
"industry": "software" + str(i),
"scale": ">100",
"funds": "1000000.0"
},
"companyType": "companyType" + str((i % 3) + 1)
}
producer.send(company)
for i in range(0, 10):
msg = consumer.receive()
company = {
"name": "company-name" + str(i),
"address": 'xxx road xxx street ' + str(i),
"employees": [
{"name": "user" + str(i), "age": 20 + i},
{"name": "user" + str(i), "age": 30 + i},
{"name": "user" + str(i), "age": 35 + i},
],
"labels": {
"industry": "software" + str(i),
"scale": ">100",
"funds": "1000000.0"
}
}
self.assertEqual(msg.value(), company)
consumer.acknowledge(msg)
consumer.close()
producer.close()
schema_definition = {
'doc': 'this is doc',
'namespace': 'example.avro',
'type': 'record',
'name': 'Company',
'fields': [
{'name': 'name', 'type': ['null', 'string']},
{'name': 'address', 'type': ['null', 'string']},
{'name': 'employees', 'type': ['null', {'type': 'array', 'items': {
'type': 'record',
'name': 'Employee',
'fields': [
{'name': 'name', 'type': ['null', 'string']},
{'name': 'age', 'type': ['null', 'int']}
]
}}]},
{'name': 'labels', 'type': ['null', {'type': 'map', 'values': 'string'}]}
]
}
produce_and_consume('custom-schema-test-1', schema_definition=schema_definition)
produce_and_consume('custom-schema-test-2', schema_definition=load_schema("examples/company.avsc"))
client.close()
if __name__ == '__main__':
main()
``` |
{
"source": "2636869666/flask_frame",
"score": 3
} |
#### File: 2636869666/flask_frame/json_demo.py
```python
import json
from flask import Flask
from flask.json import jsonify
app = Flask(__name__)
@app.route('/json')
def str_json():
# 将字典转换为json格式的字符串
data ={'name':'zhangsan','age':18}
json_data1 = json.dumps(data)
print(type(json_data1))
# 将字符串格式的字典先转换为字典再转换为json格式的字符串
data2 = """{"name":"zhangsan","age":18}"""
json_dict = json.loads(data2)
json_data2 = json.dumps(json_dict)
print(type(json_data2))
# 将字典转为json格式的数据
data3 = {'name':'zhangsan','age':18}
json_data3 = jsonify(data3)
print(type(json_data3))
return json_data1
if __name__ == '__main__':
app.run(debug=True)
```
#### File: 2636869666/flask_frame/main.py
```python
from flask import Flask, render_template, request, make_response, url_for, redirect
from werkzeug.routing import BaseConverter
import json_demo
app = Flask(__name__)
class MyConverter(BaseConverter):
def __init__(self,url_map,*args):
super().__init__(url_map)
self.regex = args[0]
def to_python(self, value):
value = "aaaaaa"
return value
def to_url(self, value):
value = "bbbbbb"
return value
# 作为字典存入url_map方便取用
app.url_map.converters["re"]= MyConverter
@app.route('/')
def index():
# 模板渲染,在此处渲染后返回给浏览器,并可以在后面自定义状态码
return render_template('index.html'),666
@app.route('/login',methods=["get","post"])
def login():
# 在同一页面进行登录和校验
if request.method == 'post':
# 利用request获取相应的值
uname = request.form.get("username")
pwd = request.form.get("password")
if uname == 'a' and pwd == 'a':
response = make_response("登录成功")
response.cookie.set = uname
response.cookie.set = pwd
return response
# 此处渲染的页面是以输入地址而发起的请求,所以为GET请求
return render_template('login.html')
@app.route('/check/<name>/<age>')
def check(name,age):
return"姓名为%s,年龄为%s"%(name,age)
@app.route('/demo')
def demo():
return redirect(url_for(json_demo.str_json))
# return redirect(url_for('index'))
if __name__ == '__main__':
app.run(debug=True)
``` |
{
"source": "264768502/NetBooter_Control",
"score": 3
} |
#### File: 264768502/NetBooter_Control/NetBooter_Control.py
```python
import sys,os
import time
import re
import serial
import telnetlib
import httplib
import base64
import string
class NetBooter_Control:
'''
Offer NetBooter Control class:
Support serial/telnet/http control
Support outlet status checker / power on / power off / reboot
Power on/off return setting success or fail, but reboot no return
How to use it:
From Serial
NetBooter = NetBooter_Control(mode='serial',serial_port='COM1')
NetBooter.power_on(1) #Return (True,'') for set Outlet 1 ON success
NetBooter.power_off(5) #Return (True,'') for set Outlet 5 OFF success
NetBooter.reboot(3) #No return, use NetBooter internal reboot function, don't suggest to use it
Outlet3_Status = NetBooter.check_outlet_status(3) #Return (True,'') for Outlet 3 is ON | (False,'') for OFF
From HTTP
NetBooter = NetBooter_Control(mode='http',ip='192.168.1.101')
NetBooter.power_on(2) #Return (True,'') for set Outlet 2 ON success
NetBooter.power_off(4) #Return (True,'') for set Outlet 4 OFF success
Outlet3_Status = NetBooter.check_outlet_status(3) #Return (True,'') for Outlet 3 is ON | (False,'') for OFF
'''
def __init__(self,mode='serial',serial_port='COM1',id='admin',password='<PASSWORD>',ip='0.0.0.0'):
'''
Class init
Input: mode(serial/telnet/http)
id/password [for login NetBooter]
For serial: serial_port([Windows]COM1/COM2/COM3/[Linux]/dev/tty...)
For telnet/http: ip
'''
if not isinstance(mode,str): raise Exception('['+os.path.basename(__file__)+']['+sys._getframe().f_code.co_name+'] Invalid mode '+str(mode))
if not isinstance(id,str): raise Exception('['+os.path.basename(__file__)+']['+sys._getframe().f_code.co_name+'] Invalid id '+str(id))
if not isinstance(password,str): raise Exception('['+os.path.basename(__file__)+']['+sys._getframe().f_code.co_name+'] Invalid password '+str(password))
self.mode = mode.lower()
self.id = id
self.password = password
if self.mode == 'serial':
self.NetBooter_serial = serial.Serial()
self.NetBooter_serial.port = serial_port
self.NetBooter_serial.baudrate = 9600
self.NetBooter_serial.timeout = 3
self.NetBooter_serial.bytesize = serial.EIGHTBITS
self.NetBooter_serial.parity = serial.PARITY_NONE
self.NetBooter_serial.stopbits = serial.STOPBITS_ONE
self.NetBooter_serial.xonxoff = 0
try:
self.NetBooter_serial.open()
except Exception as e:
raise Exception(str(e))
if not self.NetBooter_serial.isOpen():
raise Exception('['+os.path.basename(__file__)+']['+sys._getframe().f_code.co_name+'] Fail to open '+str(serial_port))
for outlet in xrange(1,6):
self.power_on(outlet)
elif self.mode == 'telnet':
self.ip = ip
self.NetBooter_telnet = telnetlib.Telnet(self.ip)
elif self.mode == 'http':
self.ip = ip
self.auth = base64.encodestring('%s:%s' % (self.id, self.password)).replace('\n', '')
self.NetBooter_httpconnection = httplib.HTTPConnection(self.ip,timeout=10)
self.__check_netbooter__()
def __check_netbooter__(self):
if self.mode == 'serial':
try:
self.NetBooter_serial.flush()
self.NetBooter_serial.flushInput()
self.NetBooter_serial.flushOutput()
self.NetBooter_serial.write('\nsysshow\n')
self.NetBooter_serial.flush()
self.NetBooter_serial.flushInput()
temp1 = self.NetBooter_serial.read(300)
self.NetBooter_serial.write('\nsysshow\n')
self.NetBooter_serial.flush()
self.NetBooter_serial.flushInput()
temp2 = self.NetBooter_serial.read(300)
status = temp1+temp2
self.NetBooter_serial.flushOutput()
except Exception as e:
raise Exception(str(e))
if status.find('System Name') == -1:
raise Exception('Invalid NetBooter')
elif self.mode == 'telnet':
pass
elif self.mode == 'http':
NetBooter_Pattern = re.compile(r'Synaccess.*?NetBooter',re.I)
NetBooter_rly_Pattern = re.compile(r'<a onclick="ajxCmd\(\'(.*?rly.*?)\d\'\);">')
NetBooter_rb_Pattern = re.compile(r'<a onclick="ajxCmd\(\'(.*?rb.*?)\d\'\);">')
try:
self.NetBooter_httpconnection.putrequest("POST",'')
self.NetBooter_httpconnection.putheader("Authorization", "Basic %s" % self.auth)
self.NetBooter_httpconnection.endheaders()
response = self.NetBooter_httpconnection.getresponse()
res = response.read()
except Exception as e:
raise Exception('['+os.path.basename(__file__)+']['+sys._getframe().f_code.co_name+'] Init http connection to NetBooter fail: '+str(e))
if response.status != 200:
raise Exception('['+os.path.basename(__file__)+']['+sys._getframe().f_code.co_name+'] Init http connection to NetBooter fail: '+str(response.status))
if not NetBooter_Pattern.search(res):
raise Exception('['+os.path.basename(__file__)+']['+sys._getframe().f_code.co_name+'] http connection is not NetBooter: '+str(res))
rly_pair = NetBooter_rly_Pattern.search(res)
if rly_pair:
self.rly_url = rly_pair.group(1)
else:
raise Exception('['+os.path.basename(__file__)+']['+sys._getframe().f_code.co_name+'] Fail to find NetBooter rly url: '+str(res))
rb_pair = NetBooter_rb_Pattern.search(res)
if rb_pair:
self.rb_url = rb_pair.group(1)
else:
raise Exception('['+os.path.basename(__file__)+']['+sys._getframe().f_code.co_name+'] Fail to find NetBooter rb url: '+str(res))
def __del__(self):
if self.mode == 'serial':
self.NetBooter_serial.close()
elif self.mode == 'telnet':
self.NetBooter_telnet.close()
elif self.mode == 'http':
self.NetBooter_httpconnection.close()
def check_outlet_status(self,outlet):
'''
Check outlet status
Input: outlet(1/2/3/4/5)
Output: True,''(For ON)/False,''(For OFF)/Exception,Exception Reason
'''
if outlet not in (1,2,3,4,5,'1','2','3','4','5'):
raise Exception('['+os.path.basename(__file__)+']['+sys._getframe().f_code.co_name+'] Invalid NetBooter outlet: '+str(outlet))
outlet = int(outlet)
if self.mode == 'serial':
if not self.NetBooter_serial.readable() or not self.NetBooter_serial.writable():
raise Exception('['+os.path.basename(__file__)+']['+sys._getframe().f_code.co_name+'] NetBooter Serial not Readable/Writeable')
try:
self.NetBooter_serial.flush()
self.NetBooter_serial.flushInput()
self.NetBooter_serial.flushOutput()
self.NetBooter_serial.write('\nsysshow\n')
self.NetBooter_serial.flush()
self.NetBooter_serial.flushInput()
temp1 = self.NetBooter_serial.read(300)
self.NetBooter_serial.write('\nsysshow\n')
self.NetBooter_serial.flush()
self.NetBooter_serial.flushInput()
temp2 = self.NetBooter_serial.read(300)
status = temp1+temp2
self.NetBooter_serial.flushOutput()
except Exception as e:
raise Exception(str(e))
try:
for line in status.split('\n'):
if line.find('Outlet Status(1-On, 0-Off. Outlet 1 to 5):') > -1:
#Clean Unrecognizable Code
line = line[43:].replace('\x00','')
#Outlet list should be ['','0/1','0/1','0/1','0/1','0/1','']
outlets = line.split(' ')
if outlets[outlet] == '0':
return False,''
elif outlets[outlet] == '1':
return True,''
else:
raise Exception('['+os.path.basename(__file__)+']['+sys._getframe().f_code.co_name+'] Invalid Status: '+str(outlets))
except Exception as e:
return 'Exception','['+os.path.basename(__file__)+']['+sys._getframe().f_code.co_name+']'+str(e)
return 'Exception','['+os.path.basename(__file__)+']['+sys._getframe().f_code.co_name+'] Not find outlet: '+str(status)
elif self.mode == 'telnet':
try:
self.NetBooter_telnet.write('\r\nsysshow\r\n'.encode('ascii'))
temp = self.NetBooter_telnet.read_until('Note - use WEB access for more settings',2)
except Exception as e:
raise Exception(str(e))
try:
for line in status.split('\n'):
if line.find('Outlet Status(1-On, 0-Off. Outlet 1 to 5):') > -1:
#Clean Unrecognizable Code
line = line[43:].replace('\x00','')
#Outlet list should be ['','0/1','0/1','0/1','0/1','0/1','']
outlets = line.split(' ')
if outlets[outlet] == '0':
return False,''
elif outlets[outlet] == '1':
return True,''
else:
raise Exception('['+os.path.basename(__file__)+']['+sys._getframe().f_code.co_name+'] Invalid Status: '+str(outlets))
except Exception as e:
return 'Exception','['+os.path.basename(__file__)+']['+sys._getframe().f_code.co_name+']'+str(e)
return 'Exception','['+os.path.basename(__file__)+']['+sys._getframe().f_code.co_name+'] Not find outlet: '+str(status)
elif self.mode == 'http':
res = self.NetBooter_httppost(url="/status.xml")
if res[0] != True:
return 'Exception','['+os.path.basename(__file__)+']['+sys._getframe().f_code.co_name+'] No proper response from NetBooter: '+res[1]
swoutlet = outlet - 1
pattern = re.compile(r'<rly%s>(1|0)</rly%s>'%(swoutlet,swoutlet))
if not pattern.search(res[1]):
return 'Exception','['+os.path.basename(__file__)+']['+sys._getframe().f_code.co_name+'] Not find proper outlet status: '+res[1]
status = pattern.search(res[1]).group()[6:7]
if status == '0':
return False,''
elif status == '1':
return True,''
else:
raise Exception('['+os.path.basename(__file__)+']['+sys._getframe().f_code.co_name+'] Invalid Status: '+str(status))
def login(self):
'''
Login NetBooter for serial/telnet mode
No output
'''
if self.mode == 'serial':
if not self.NetBooter_serial.writable():
raise Exception('['+os.path.basename(__file__)+']['+sys._getframe().f_code.co_name+'] NetBooter Serial not Writeable')
try:
self.NetBooter_serial.flush()
self.NetBooter_serial.flushInput()
self.NetBooter_serial.flushOutput()
self.NetBooter_serial.write('\n!\nlogin\n')
self.NetBooter_serial.flush()
self.NetBooter_serial.flushInput()
self.NetBooter_serial.flushOutput()
self.NetBooter_serial.write(str(self.id)+'\n')
self.NetBooter_serial.flush()
self.NetBooter_serial.flushInput()
self.NetBooter_serial.flushOutput()
self.NetBooter_serial.write(str(self.password)+'\n')
self.NetBooter_serial.flush()
self.NetBooter_serial.flushInput()
self.NetBooter_serial.flushOutput()
except Exception as e:
raise Exception('['+os.path.basename(__file__)+']['+sys._getframe().f_code.co_name+']'+str(e))
elif self.mode == 'telnet':
try:
self.NetBooter_telnet.write('\r\nlogin\r\n'.encode('ascii'))
self.NetBooter_telnet.write(str(self.id)+'\r\n'.encode('ascii'))
self.NetBooter_telnet.write(str(self.password)+'\r\n'.encode('ascii'))
except Exception as e:
raise Exception('['+os.path.basename(__file__)+']['+sys._getframe().f_code.co_name+']'+str(e))
def power_on(self,outlet):
'''
Set specific outlet on
Input: outlet(1/2/3/4/5)
Output: True,''[Set success]/False,''[Set fail]/Exception,''
'''
if outlet not in (1,2,3,4,5,'1','2','3','4','5'):
raise Exception('['+os.path.basename(__file__)+']['+sys._getframe().f_code.co_name+'] Invalid NetBooter outlet: '+str(outlet))
outlet = int(outlet)
if self.mode == 'http':
current_status = self.check_outlet_status(outlet)
if current_status[0] == True:
return True,''
elif current_status[0] == False:
swoutlet = outlet - 1
url = "/%s%s"%(self.rly_url,swoutlet)
res = self.NetBooter_httppost(url)
if res[0] == True:
if res[1] == 'Success! ':
new_status = self.check_outlet_status(outlet)
if new_status[0] == True:
return True,''
elif new_status[0] == False:
return False,'['+os.path.basename(__file__)+']['+sys._getframe().f_code.co_name+'] Power on outlet fail2: '+new_status[1]
else:
return 'Exception','['+os.path.basename(__file__)+']['+sys._getframe().f_code.co_name+']'+new_status[1]
else:
return False,'['+os.path.basename(__file__)+']['+sys._getframe().f_code.co_name+'] Power on outlet fail1: '+res[1]
else:
return 'Exception','['+os.path.basename(__file__)+']['+sys._getframe().f_code.co_name+']'+res[1]
else:
return 'Exception','['+os.path.basename(__file__)+']['+sys._getframe().f_code.co_name+']'+current_status[1]
time.sleep(2)
self.login()
if self.mode == 'serial':
if not self.NetBooter_serial.writable():
raise Exception('['+os.path.basename(__file__)+']['+sys._getframe().f_code.co_name+'] NetBooter Serial not Writeable')
try:
self.NetBooter_serial.flush()
self.NetBooter_serial.flushInput()
self.NetBooter_serial.flushOutput()
self.NetBooter_serial.write('\npset '+str(outlet)+' 1\n')
self.NetBooter_serial.flush()
self.NetBooter_serial.flushInput()
self.NetBooter_serial.flushOutput()
time.sleep(1)
except Exception as e:
raise Exception('['+os.path.basename(__file__)+']['+sys._getframe().f_code.co_name+']'+str(e))
elif self.mode == 'telnet':
try:
self.NetBooter_telnet.write(('\r\npset '+str(outlet)+' 1\r\n').encode('ascii'))
except Exception as e:
raise Exception('['+os.path.basename(__file__)+']['+sys._getframe().f_code.co_name+']'+str(e))
res_on = self.check_outlet_status(outlet)
if res_on[0] == True:
return True,''
elif res_on[0] == False:
return False,''
else:
return 'Exception','['+os.path.basename(__file__)+']['+sys._getframe().f_code.co_name+']'+res_on[1]
def power_off(self,outlet):
'''
Set specific outlet off
Input: outlet(1/2/3/4/5)
Output: True,''[Set success]/False,''[Set fail]/Exception,''
'''
if outlet not in (1,2,3,4,5,'1','2','3','4','5'):
raise Exception('['+os.path.basename(__file__)+']['+sys._getframe().f_code.co_name+'] Invalid NetBooter outlet: '+str(outlet))
outlet = int(outlet)
if self.mode == 'http':
current_status = self.check_outlet_status(outlet)
if current_status[0] == False:
return True,''
elif current_status[0] == True:
swoutlet = outlet - 1
url = "/%s%s"%(self.rly_url,swoutlet)
res = self.NetBooter_httppost(url)
if res[0] == True:
if res[1] == 'Success! ':
new_status = self.check_outlet_status(outlet)
if new_status[0] == False:
return True,''
elif new_status[0] == True:
return False,'['+os.path.basename(__file__)+']['+sys._getframe().f_code.co_name+'] Power off outlet fail2: '+new_status[1]
else:
return 'Exception','['+os.path.basename(__file__)+']['+sys._getframe().f_code.co_name+']'+new_status[1]
else:
return False,'['+os.path.basename(__file__)+']['+sys._getframe().f_code.co_name+'] Power off outlet fail1: '+res[1]
else:
return 'Exception','['+os.path.basename(__file__)+']['+sys._getframe().f_code.co_name+']'+res[1]
else:
return 'Exception','['+os.path.basename(__file__)+']['+sys._getframe().f_code.co_name+']'+current_status[1]
time.sleep(2)
self.login()
if self.mode == 'serial':
if not self.NetBooter_serial.writable():
raise Exception('['+os.path.basename(__file__)+']['+sys._getframe().f_code.co_name+'] NetBooter Serial not Writeable')
try:
self.NetBooter_serial.flush()
self.NetBooter_serial.flushInput()
self.NetBooter_serial.flushOutput()
self.NetBooter_serial.write('\npset '+str(outlet)+' 0\n')
self.NetBooter_serial.flush()
self.NetBooter_serial.flushInput()
self.NetBooter_serial.flushOutput()
time.sleep(1)
except Exception as e:
raise Exception('['+os.path.basename(__file__)+']['+sys._getframe().f_code.co_name+']'+str(e))
elif self.mode == 'telnet':
try:
self.NetBooter_telnet.write(('\r\npset '+str(outlet)+' 0\r\n').encode('ascii'))
except Exception as e:
raise Exception('['+os.path.basename(__file__)+']['+sys._getframe().f_code.co_name+']'+str(e))
res_off = self.check_outlet_status(outlet)
if res_off[0] == False:
return True,''
elif res_off[0] == True:
return False,''
else:
return 'Exception','['+os.path.basename(__file__)+']['+sys._getframe().f_code.co_name+']'+res_off[1]
def reboot(self,outlet):
'''
Set specific outlet reboot by internal reboot function from NetBooter
Input: outlet(1/2/3/4/5)
No output
'''
if outlet not in (1,2,3,4,5,'1','2','3','4','5'):
raise Exception('['+os.path.basename(__file__)+']['+sys._getframe().f_code.co_name+'] Invalid NetBooter outlet: '+str(outlet))
outlet = int(outlet)
if self.mode == 'http':
current_status = self.check_outlet_status(outlet)
swoutlet = outlet - 1
url = "/%s%s"%(self.rb_url,swoutlet)
res = self.NetBooter_httppost(url)
time.sleep(3)
if res[0] == True:
if res[1] == 'Success! ':
new_status = self.check_outlet_status(outlet)
self.login()
if self.mode == 'serial':
if not self.NetBooter_serial.writable():
raise Exception('['+os.path.basename(__file__)+']['+sys._getframe().f_code.co_name+'] NetBooter Serial not Writeable')
try:
self.NetBooter_serial.flush()
self.NetBooter_serial.flushInput()
self.NetBooter_serial.flushOutput()
self.NetBooter_serial.write('\nrb '+str(outlet)+'\n')
self.NetBooter_serial.flush()
self.NetBooter_serial.flushInput()
self.NetBooter_serial.flushOutput()
#time.sleep(1)
except Exception as e:
raise Exception('['+os.path.basename(__file__)+']['+sys._getframe().f_code.co_name+']'+str(e))
elif self.mode == 'telnet':
try:
self.NetBooter_telnet.write(('\r\nrb '+str(outlet)+'\r\n').encode('ascii'))
except Exception as e:
raise Exception('['+os.path.basename(__file__)+']['+sys._getframe().f_code.co_name+']'+str(e))
def NetBooter_httppost(self,url):
'''
Common NetBooter http post
Input: url(/status.xml[for get stauts] or /cmd.cgi?rly=#1[for set power on/off])
'''
try:
self.NetBooter_httpconnection.putrequest("POST", url)
self.NetBooter_httpconnection.putheader("Authorization", "Basic %s" % self.auth)
self.NetBooter_httpconnection.endheaders()
response = self.NetBooter_httpconnection.getresponse()
res = response.read()
except Exception as e:
return 'Exception','['+os.path.basename(__file__)+']['+sys._getframe().f_code.co_name+']'+str(e)
if response.status != 200:
return False,'['+os.path.basename(__file__)+']['+sys._getframe().f_code.co_name+'] Unknown http connection status: '+str(response.status)
return True,res
``` |
{
"source": "264768502/QD780_Control",
"score": 2
} |
#### File: 264768502/QD780_Control/QD780.py
```python
import sys,os
import time
import serial
timing_map = {"[email protected]" : "480i29",
"[email protected]" : "480i30",
"[email protected]" : "480i60",
"[email protected]" : "480p59",
"[email protected]" : "480p60",
"[email protected]" : "576i25",
"[email protected]" : "576i50",
"[email protected]" : "576p50",
"[email protected]" : "720p23",
"[email protected]" : "720p24",
"[email protected]" : "720p25",
"[email protected]" : "720p29",
"[email protected]" : "720p30",
"[email protected]" : "720p50",
"[email protected]" : "720p59",
"[email protected]" : "720p60",
"[email protected]" : "1080i25",
"[email protected]" : "1080i29",
"[email protected]" : "1080i30",
"[email protected]" : "1080i50",
"[email protected]" : "1080i60",
"[email protected]" : "1080p23",
"[email protected]" : "1080p24",
"[email protected]" : "1080p25",
"[email protected]" : "1080p29",
"[email protected]" : "1080p30",
"[email protected]" : "1080p50",
"[email protected]" : "1080p59",
"[email protected]" : "1080p60",
"[email protected]" : "2160p23",
"[email protected]" : "2160p24",
"[email protected]" : "2160p25",
"[email protected]" : "2160p29",
"[email protected]" : "2160p30",
"[email protected]" : "2160p50",
"[email protected]" : "2160p59",
"[email protected]" : "2160p60",
"[email protected]" : "640x480_60Hz",
"[email protected]" : "640x480_75Hz",
"[email protected]" : "800x600_60Hz",
"[email protected]" : "800x600_72Hz",
"[email protected]" : "800x600_75Hz",
"[email protected]" : "1024x768_60Hz",
"[email protected]" : "1024x768_70Hz",
"[email protected]" : "1024x768_75Hz",
"[email protected]" : "1024x768_85Hz",
"[email protected]" : "1280x768_60Hz",
"[email protected]" : "1280x768_75Hz",
"[email protected]" : "1280x768_85Hz",
"[email protected]" : "1280x960_60Hz",
"[email protected]" : "1280x960_85Hz",
"[email protected]" : "1360x768_60Hz",
"[email protected]" : "1152x864_75Hz",
"[email protected]" : "1280x1024_60Hz",
"[email protected]" : "1280x1024_75Hz",
"[email protected]" : "1280x1024_85Hz",
"[email protected]" : "1680x1050_60Hz",
"[email protected]" : "1680x1050_85Hz",
"[email protected]" : "1920x1080_60Hz",
}
class QD780_Control(object):
"""
"""
__command_prefix__ = 'R:\> '
def __init__(self,serial_port='COM1',baudrate=115200,cmd_retry_time=10):
"""
Class init
Input: For serial: serial_port([Windows]COM1/COM2/COM3/[Linux]/dev/tty...)
"""
self.QD780_serial = serial.Serial()
self.QD780_serial.port = serial_port
self.QD780_serial.baudrate = baudrate
self.QD780_serial.timeout = 1 #0.1
self.QD780_serial.bytesize = serial.EIGHTBITS
self.QD780_serial.parity = serial.PARITY_NONE
self.QD780_serial.stopbits = serial.STOPBITS_ONE
self.QD780_serial.xonxoff = 0
self.__cmd_retry_time = cmd_retry_time
try:
self.QD780_serial.open()
self.QD780_serial.write('\n')
except Exception as e:
raise Exception(str(e))
if not self.QD780_serial.isOpen():
raise Exception('['+os.path.basename(__file__)+']['+sys._getframe().f_code.co_name+'] Fail to open '+str(serial_port))
def __del__(self):
self.QD780_serial.close()
def __flush_all__(self):
self.QD780_serial.flush()
self.QD780_serial.flushInput()
self.QD780_serial.flushOutput()
def wait_console(self,timeout = 10):
self.__flush_all__()
self.QD780_serial.write('\n')
start_time = time.time()
while True:
time.sleep(0.01)
buffer_size = self.QD780_serial.inWaiting()
if buffer_size > 0:
buffer_data = self.QD780_serial.read(buffer_size)
if buffer_data.find(self.__command_prefix__) != -1:
self.__flush_all__()
return True
if time.time() - start_time > timeout:
self.__flush_all__()
return False
def write(self,cmd):
assert isinstance(cmd,str)
self.__flush_all__()
self.QD780_serial.write(cmd+'\n')
self.__flush_all__()
self.wait_console()
def read(self,cmd,timeout=10):
assert isinstance(cmd,str)
self.__flush_all__()
self.QD780_serial.write(cmd+'\n')
last_buffer_size = 0
buffer_data = ''
start_time = time.time()
while True:
time.sleep(0.01)
buffer_size = self.QD780_serial.inWaiting()
if buffer_size > 0 and buffer_size != last_buffer_size:
buffer_data = buffer_data + self.QD780_serial.read(buffer_size)
if buffer_data.find(cmd) != -1:
temp1 = buffer_data[buffer_data.find(cmd)+len(cmd):]
if temp1.find(self.__command_prefix__) > 4:
result_data = temp1[2:temp1.find(self.__command_prefix__)-2]
return result_data
last_buffer_size = buffer_size
if time.time() - start_time > timeout:
self.__flush_all__()
return False
def set_interface(self,arg = 'HDMI'):
"""
Switch QD780 output interface
Input: HDMI/DVI_VESA/DVI_EIA/VGA/YCbCr
Output: True/False
"""
assert isinstance(arg,str)
assert arg.upper() in ('HDMI', 'DVI_VESA', 'DVI_EIA', 'VGA', 'YCBCR', 'COMPONENT')
for retry_time in xrange(self.__cmd_retry_time):
if arg.upper() == 'HDMI':
self.write('XVSI 4')
res = self.read('XVSI?')
if res == '4':
return True
elif arg.upper() == 'DVI_VESA':
self.write('XVSI 2')
res = self.read('XVSI?')
if res == '2':
return True
elif arg.upper() == 'DVI_EIA':
self.write('XVSI 3')
res = self.read('XVSI?')
if res == '3':
return True
elif arg.upper() == 'VGA':
self.write('XVSI 9')
self.write('AVST 2')
self.write('SSST 1')
res1 = self.read('XVSI?')
res2 = self.read('AVST?')
res3 = self.read('SSST?')
res = (res1,res2,res3)
if res1 == '9' and res2 == '2' and res3 == '1':
return True
elif arg.upper() == 'YCBCR' or arg == 'COMPONENT':
self.write('XVSI 9')
self.write('AVST 6')
self.write('SSST 3')
res1 = self.read('XVSI?')
res2 = self.read('AVST?')
res3 = self.read('SSST?')
res = (res1, res2, res3)
if res1 == '9' and res2 == '6' and res3 == '3':
return True
return False
def set_resolution(self, arg='1080p60'):
"""
Set Resolution on QD780
Input: resoluton, such as 480p50
Output: True/False
"""
assert isinstance(arg, str)
for retry_time in xrange(self.__cmd_retry_time):
self.write('FMTL '+arg)
self.write('FMTU')
res = self.read('FMTL?')
if res == arg:
return True
return False
def set_video_bit(self, arg=8):
"""
Set Bit Depth of video from QD780
Input: arg (8/10/12)
Output: True / False
"""
assert isinstance(arg, str) or isinstance(arg, int)
__common_sleep__ = 1.5
for retry_time in xrange(self.__cmd_retry_time):
self.write('NBPC '+str(arg))
self.write('FMTU')
res = self.read('NBPC?')
if res == str(arg):
return True
return False
def set_color_space(self, arg='YUV422'):
"""
Set Color Space on QD780
Input: Color Space, such as YUV422/YUV444/RGB444/YUV420
Output: True/False
Limitation: Currently, YUV420 only support 3840x2160p50/60 timing
"""
assert isinstance(arg, str) and arg in ('YUV422', 'YUV444', 'RGB444', 'YUV420')
for retry_time in xrange(self.__cmd_retry_time):
if arg == 'YUV422':
self.write('DVST 14')
self.write('DVSM 2')
self.write('ALLU')
res1 = self.read('DVST?')
res2 = self.read('DVSM?')
if res1 == '14' and res2 == '2':
return True
elif arg == 'YUV444':
self.write('DVST 14')
self.write('DVSM 4')
self.write('ALLU')
res1 = self.read('DVST?')
res2 = self.read('DVSM?')
if res1 == '14' and res2 == '4':
return True
elif arg == 'YUV420':
return False # YUV420 only support 3840x2160p50/60 timing
elif arg == 'RGB444':
self.write('DVST 10')
self.write('DVSM 0')
self.write('ALLU')
res1 = self.read('DVST?')
res2 = self.read('DVSM?')
if res1 == '10' and res2 == '0':
return True
return False
def set_pattern(self, arg = 'PGCwrgb'):
"""
Switch QD780 output pattern
Input: pattern
Output: True/False
Pattern List: SMPTEBar | Regulate | Flat_Yel
H_Stair | Checker | Flat_Blk
Pluge | Focus | Crosshtch
Needle | Multibrst | Anmorphic
HiLoTrk | SplitGray | GrayBar
Overscan | LG_V_CBAR | Staircase
Window1 | LG_H_CBAR | PulseBar
Window2 | V_3BARS | Rev_Grid
Raster | Flat_Wht | Linearity
DecodAdj | Flat_Red | PRN24Bit
DecodChk | Flat_Grn | ZonePlate
ColorBar | Flat_Blu | User01
Ramp | Flat_Cyn | AuxTest
Converge | Flat_Mag | PGCwrgb
"""
assert isinstance(arg,str)
pattern_list=('SMPTEBar','Regulate', 'Flat_Yel',
'H_Stair', 'Checker', 'Flat_Blk',
'Pluge', 'Focus', 'Crosshtch',
'Needle', 'Multibrst','Anmorphic',
'HiLoTrk', 'SplitGray','GrayBar',
'Overscan','LG_V_CBAR','Staircase',
'Window1', 'LG_H_CBAR','PulseBar',
'Window2', 'V_3BARS', 'Rev_Grid',
'Raster', 'Flat_Wht', 'Linearity',
'DecodAdj','Flat_Red', 'PRN24Bit',
'DecodChk','Flat_Grn', 'ZonePlate',
'ColorBar','Flat_Blu', 'AuxTest',
'Ramp', 'Flat_Cyn', 'PGCwrgb',
'Converge','Flat_Mag')
for retry_time in xrange(self.__cmd_retry_time):
self.write('IMGL '+arg)
self.write('IMGU')
res = self.read('IMGU?')
if res == False:
continue
elif res.lower() == arg.lower():
return True
elif arg == 'Anmorphic' and res == 'Crosshtch':
return True
elif arg in ('PulseBar','Rev_Grid') and res == 'Linearity':
return True
elif arg in ('Checker','GrayBar','Staircase') and res == 'H_Stair':
return True
elif arg in ('Flat_Blk','Flat_Wht','Flat_Red','Flat_Grn','Flat_Blu','Flat_Cyn','Flat_Mag') and res == 'Flat':
return True
elif arg == 'AuxTest':
return True
elif arg not in pattern_list:
return True
return False
def read_edid(self):
"""
Read EDID, return RAW Data
Like below:
00FFFFFFFFFFFF00593A04100101010100180103806E3E782A97DDA45651A1240A474AA5CE0001010101010101010101010101010101023A801871382D40582C450047684200001E641900404100263018883600476842000018000000FC0045353030692D42310A20202020000000FD00384C1F500F000A20202020202001A70203217149010607020305900420260907071507508301000067030C001000001E023A801871382D40582C450047684200001E011D007251D01E206E28550047684200001E8C0AA01451F01600267C430047684200009800000000000000000000000000000000000000000000000000000000000000000000000000000000C9
"""
for retry_time in xrange(self.__cmd_retry_time):
res = self.read('EDID?')
if res != False:
return res
return False
def read_hdcp(self):
"""
Read HDCP, return True(Pass)/False(Fail)
"""
for retry_time in xrange(self.__cmd_retry_time):
res = self.read('HDCP?')
if res == '0':
return True
elif res == '1':
return False
return False
def avmute_QD780(self, arg = 0):
"""
Set AVMute on QD780
Input: 0(Disable) / 1(Enable)
Output: True/False
"""
if arg in (0,'0',False,'Disable'): set = '0'
elif arg in (1,'1',True ,'Enable' ): set = '1'
for retry_time in xrange(self.__cmd_retry_time):
self.write('AVMG '+set)
return True
return False
``` |
{
"source": "26618929/saltshaker_api",
"score": 2
} |
#### File: saltshaker_api/resources/gitfs.py
```python
from flask_restful import Resource, reqparse, request
from fileserver.git_fs import gitlab_project
from common.const import role_dict
from common.log import loggers
from common.sso import access_required
from common.audit_log import audit_log
from flask import g
from resources.sls import delete_sls
import base64
logger = loggers()
parser = reqparse.RequestParser()
parser.add_argument("product_id", type=str, required=True, trim=True)
parser.add_argument("branch", type=str, default="master", trim=True)
parser.add_argument("path", type=str, default="", trim=True)
parser.add_argument("project_type", type=str, required=True, trim=True)
parser.add_argument("action", type=str, default="", trim=True)
parser.add_argument("content", type=str, default="", trim=True)
# 获取所有分支
class BranchList(Resource):
@access_required(role_dict["common_user"])
def get(self):
args = parser.parse_args()
project, _ = gitlab_project(args["product_id"], args["project_type"])
if isinstance(project, dict):
return project, 500
else:
branch_list = []
try:
branch = project.branches.list()
for b in branch:
branch_list.append(b.name)
except Exception as e:
logger.error("Get branch error: %s" % e)
return {"status": False, "message": str(e)}, 500
return {"data": branch_list, "status": True, "message": ""}, 200
# 获取目录结构
class FilesList(Resource):
@access_required(role_dict["common_user"])
def get(self):
args = parser.parse_args()
project, product_name = gitlab_project(args["product_id"], args["project_type"])
if isinstance(project, dict):
return project, 500
else:
file_list = []
try:
items = project.repository_tree(path=args["path"], ref_name=args["branch"])
except Exception as e:
logger.error("Get file list error: %s" % e)
return {"status": False, "message": str(e)}, 404
if args["path"] == "/" or args["path"] is "":
for i in items:
if i["type"] == "tree":
file_list.append({"title": i["name"],
"type": i["type"],
"path": i["name"],
"loading": False,
"children": []
})
else:
file_list.append({"title": i["name"],
"type": i["type"],
"path": i["name"],
})
return {"data": [{
"title": product_name,
"expand": True,
"children": file_list,
"type": "tree",
}], "status": True, "message": ""}, 200
else:
for i in items:
if i["type"] == "tree":
file_list.append({"title": i["name"],
"type": i["type"],
"path": args["path"] + "/" + i["name"],
"loading": False,
"children": []
})
else:
file_list.append({"title": i["name"],
"type": i["type"],
"path": args["path"] + "/" + i["name"],
})
return {"data": file_list, "status": True, "message": ""}, 200
# 获取文件内容
class FileContent(Resource):
@access_required(role_dict["common_user"])
def get(self):
args = parser.parse_args()
project, _ = gitlab_project(args["product_id"], args["project_type"])
if isinstance(project, dict):
return project, 500
else:
try:
content = project.files.get(file_path=args["path"], ref=args["branch"])
content_decode = content.decode().decode("utf-8")
except Exception as e:
logger.error("Get file content: %s" % e)
return {"status": False, "message": str(e)}, 404
return {"data": content_decode, "status": True, "message": ""}, 200
# 创建修改提交文件
class Commit(Resource):
@access_required(role_dict["common_user"])
def post(self):
args = parser.parse_args()
user = g.user_info["username"]
project, _ = gitlab_project(args["product_id"], args["project_type"])
# 支持的action create, delete, move, update
data = {
'branch': args["branch"],
'commit_message': args["action"] + " " + args["path"],
'actions': [
{
'action': args["action"],
'file_path': args["path"],
'content': args["content"]
}
]
}
if isinstance(project, dict):
return project, 500
else:
try:
project.commits.create(data)
# 假如删除,删除数据库中封装的SLS信息
if args["action"] == "delete":
delete_sls(args["path"])
audit_log(user, args["path"], args["product_id"], "sls", args["action"])
except Exception as e:
logger.error("Commit file: %s" % e)
return {"status": False, "message": str(e)}, 500
return {"status": True, "message": ""}, 200
# 上传文件
class Upload(Resource):
@access_required(role_dict["common_user"])
def post(self):
args = parser.parse_args()
user = g.user_info["username"]
project, _ = gitlab_project(args["product_id"], args["project_type"])
file = request.files['file']
if args["path"]:
file_path = args["path"] + "/" + file.filename
content = file.read()
try:
content_decode = content.decode()
actions = [
{
'action': 'create',
'file_path': file_path,
'content': content_decode
}
]
except Exception as e:
return {"status": False, "message": str(e)}, 500
# try:
# content_decode = content.decode()
# actions = [
# {
# 'action': args["action"],
# 'file_path': file_path,
# 'content': base64.b64encode(content_decode),
# 'encoding': 'base64',
# }
# ]
# except Exception as e:
# print(e)
data = {
'branch': args["branch"],
'commit_message': args["action"] + " " + args["path"],
'actions': actions
}
if isinstance(project, dict):
return project, 500
else:
try:
project.commits.create(data)
audit_log(user, file_path, args["product_id"], "sls", "upload")
except Exception as e:
logger.error("Upload file: %s" % e)
return {"status": False, "message": str(e)}, 500
return {"status": True, "message": ""}, 200
``` |
{
"source": "2678918253/Learning-with-Noisy-Class-Labels-for-Instance-Segmentation",
"score": 3
} |
#### File: 2678918253/Learning-with-Noisy-Class-Labels-for-Instance-Segmentation/noisy_labels_SN_VOC.py
```python
import numpy as np
import json
def unjson(file):
with open(file, 'r') as fo:
dict = json.load(fo)
return dict
# r is noise rate
r = 0.2
count = 0
p_a = ''
p_g = ''
a = unjson(p_a)
for i in range(len(a['annotations'])):
if np.random.random() < r:
a['annotations'][i]['category_id'] = np.random.randint(1, 20)
count += 1
with open(p_g, 'w') as file:
json.dump(a, file)
print(count)
``` |
{
"source": "2678flores/Balance-Comercial-de-Mercancias-Series-de-tiempo-",
"score": 3
} |
#### File: 2678flores/Balance-Comercial-de-Mercancias-Series-de-tiempo-/RSA y G-M Final.py
```python
def XMCD(a,b): ##### obteniendo el inverso de eB con el algoritmo extendido de euclides ####
u0 = 0
u1 = 1
v0 = 1
v1 = 0
while b > 0:
cociente = a//b
residuo = a - b * cociente
u = cociente * u1 + u0
v = cociente * v1 + v0
a = b
b = residuo
u0 = int(u1)
u1 = int(u)
v0 = int(v1)
v1 = int(v)
return v0 #aqui puedo agregar 'a' que representa el MCD
def msg2num(s): #### Convierte el mensaje a cifrar a codigo ASCII ####
n = []
for c in s:
a = ord(c)
a = int(a)
n.append(a)
return n
def TFPA(exp,mod,num): ##### Cifra el mensaje con llave publica de RSA ####
bas = 2
resto = []
while exp > 0:
residuo = exp % bas
exp = (exp - residuo) / bas
resto.append(residuo)
A = resto
tamano = len(A)
pot = []
x = num % mod
pot.append(x)
for i in range(0, tamano - 1):
x = (x**2) % mod
pot.append(x)
R = pot
res = 1
for i in range(0,tamano):
if A[i] == 1:
producto = (A[i] * R[i]) % mod
res = int((res * producto) % mod)
return res
def numbinario(ctex): #### Cambia cada numero cifrado en RSA a codigo binario ####
binar = []
for x in ctex:
resto = []
while x > 0:
residuo = x % 2
x = (x - residuo) / 2
resto.append(residuo)
binar.append(resto)
return binar
def encrip_G_M(m1,a1,N1): ##### Cifra el mensaje el codigo binario (ya cifrado con RSA) a Goldwasser-Micali ####
import random
r = random.randint(1,N1)
if m1 == 0:
c = (r**2) % N1
else:
z = r**2
c = (a1 * z) % N1
return c
def binario(m1): #### Convierte una lista de codigo binario a un numero en base 10 ####
X=[]
for i in range(0,len(m1)):
x = 2 ** i
X.append(x)
multiplica = []
for i in range(0,len(m1)):
if m1[i] == 1:
mult = m1[i] * X[i]
multiplica.append(mult)
for x in multiplica:
resultado = 0
i = 0
while i in range(0,len(multiplica)):
resultado = resultado + multiplica[i]
i = i + 1
return resultado
#---------------------Preparando la llave publica (Esto le corresponde a Bob)-------------------------------
print '(BOB) PREPARANDO LA LLAVE PUBLICA \n'
p =1223 #73939133#1223 #int(raw_input('Ingrese un numero entero primo p: '))
q =1987 #524287#1987 #int(raw_input('Ingrese un numero entero primo q: '))
eB =948047 #987773#948047 #int(raw_input('Ingresa un numero entero eB: ')) #eA debe ser primo relativo a phi.
a =537 #53737#537 #int(raw_input('Ingresa un numero a: ')) #Elegimos a t.q. a es no residuo cuadratico (mod p) y (mod q).
p1= (p - 1) / 2
N = p * q
phi = (p - 1) * (q - 1)
print'La llave publica es kpub',(N,eB,a),'\n'
#----------------------------------Preparando la llave privada de Bob-----------------------------------
print '(BOB) PREPARANDO LA LLAVE PRIVADA \n'
kpriv = XMCD(eB,phi)
print 'La llave privada es kpriv',(kpriv,p),'\n'
#---------------------------Convirtiendo el mensaje de Alice a Codigo ASCCI-------------------------------
print '(ALICE) ENCRIPTANDO EL MENSAJE \n'
mensaje = str(raw_input('Introduce el mensaje a cifrar para Bob: '))
textnum = msg2num(mensaje)
print 'El mensaje en codigo ASCCI es: \n',textnum,'\n'
#---------------------------------- Cifrando con llave publica de RSA (PRIMER CANDADO)------------------------------------
ctext = []
for k in textnum:
equis = TFPA(eB,N,k)
ctext.append(equis)
print 'El texto cifrado con kpub de bob en RSA es: \n',ctext,'\n'
#-------------------------Convirtiendo mensaje cifrado con RSA a codigo binario -----------------------------
AX = numbinario(ctext)
print 'El texto cifrado con kpub de RSA en codigo binario es:\n',AX,'\n'
#------------------Cifrando codigo binario con llave publica de sistema Goldwasser-Micali (SEGUNDO CANDADO) ----------------
GM = []
for k in AX:
gm = []
for x in k:
Z = int(encrip_G_M(x,a,N))
gm.append(Z)
GM.append(gm)
print 'Tu texto cifrado con RSA y con Goldwasser-Micali es:\n','INICIA MENSAJE CIFRADO \n',GM,'\nFIN DE MENSAJE CIFRADO \n'
#----------------Decifrando ciphertext con llave privada a en sistema Goldwasser-Micali--------------------------
print '(BOB) DECIFRANDO EL MENSAJE \n'
DGM = []
for k in GM:
dgm = []
for j in k:
Z1 = int(TFPA(p1,p,j))
if Z1 == 1:
bi = 0
else:
bi = 1
dgm.append(bi)
DGM.append(dgm)
print 'Desencriptando Goldwasser-Micali: \n',DGM,'\n'
#-------------------------------------Convirtiendo texto cifrado con RSA en binario a base 10-------------------
PENCIL = []
for x in DGM:
Pencil = binario(x)
PENCIL.append(Pencil)
print 'De binario a decimal: \n',PENCIL,'\n'
#---------------------------------Desencriptando ctext con llave privada RSA------------------------------------
DRSA = []
for x in PENCIL:
drsa = int(TFPA(kpriv,N,x))
DRSA.append(drsa)
print 'Desenciptacion RSA:\n', DRSA,'\n'
#---------------------Convirtiendo los numeros a sus correspondientes caracteres con codigo ASCCI------------------
Mensaje = []
for c in DRSA:
letter = chr(c)
Mensaje.append(letter)
MENSAJE = "".join(Mensaje)
print 'MENSAJE ORIGINA DE ALICE:\n',MENSAJE,'\nFIN DE MENSAJE ORIGINAL'
``` |
{
"source": "2694048168/ComputerVisionDeepLearning",
"score": 3
} |
#### File: DigitalImageProcessing/python/bilateral_filter.py
```python
import sys
import math
import cv2 as cv
import numpy as np
def getClosenessWeight(sigma_g, H, W):
"""构建空间权重模板"""
r, c = np.mgrid[0:H:1, 0:W:1]
r -= (H - 1) // 2
c -= (W - 1) // 2
closeWeight = np.exp(-0.5*(np.power(r, 2) + np.power(c, 2)) / math.pow(sigma_g, 2))
return closeWeight
def bilateralFilterGray(image, H, W, sigma_g, sigma_d):
"""双边滤波
Args:
image (ndarray): 输入单通道图像, 灰度级范围[0,1]
H ([int]): 权重模板的高
W ([int]): 权重模板的宽
sigma_g ([float]): 空间距离权重模板的标准差,大于 1
sigma_d ([float]): 相似性权重模板的标准差, 小于 1
Returns:
[ndarray]: 双边滤波结果图像, 浮点型矩阵
"""
closenessWeight = getClosenessWeight(sigma_g, H, W)
center_H = (H - 1) // 2
center_W = (W - 1) // 2
rows, cols = image.shape
bilateral_filter_gray_image = np.zeros(image.shape, np.float32)
for r in range(rows):
for c in range(cols):
pixel = image[r][c]
# 边界判断
rTop = 0 if r-center_H < 0 else r-center_H
rBottom = rows-1 if r+center_H > rows-1 else r+center_H
cLeft = 0 if c-center_W < 0 else c-center_W
cRight = cols-1 if c+center_W > cols-1 else c+center_W
# 权重模板作用区域
region = image[rTop:rBottom+1, cLeft:cRight+1]
# 构建灰度值相似性的权重因子
similarityWeightTemp = np.exp(-0.5*np.power(region-pixel, 2.0)/math.pow(sigma_d, 2))
closenessWeightTemp = closenessWeight[rTop-r+center_H:rBottom-r+center_H+1, cLeft-c+center_W:cRight-c+center_W+1]
# 两个权重模板点乘
weightTemp = similarityWeightTemp * closenessWeightTemp
# 归一化权重模板
weightTemp = weightTemp / np.sum(weightTemp)
# 权重模板和对应的邻域值相乘求和
bilateral_filter_gray_image[r][c] = np.sum(region * weightTemp)
return bilateral_filter_gray_image
# --------------------------
if __name__ == "__main__":
if len(sys.argv) > 1:
image = cv.imread(sys.argv[1], 0)
if image is None:
print(f"Error: no such file or dictory.")
cv.imshow("OriginImage", image)
# [0-255] ----> [0-1]
image = image/255.0
# 1. 双边滤波
bilateral_filter_gray_image = bilateralFilterGray(image, 33, 33, 19, 0.2)
bilateral_filter_gray_image = (bilateral_filter_gray_image * 255).astype(np.uint8)
cv.imshow("bilateral_filter", bilateral_filter_gray_image)
cv.waitKey(0)
cv.destroyAllWindows()
else:
print(f"Usage: python histogram imageFile.")
```
#### File: DigitalImageProcessing/python/calc_gray_hist.py
```python
import sys
import cv2 as cv
import numpy as np
import matplotlib.pyplot as plt
def calcGrayHist(image):
"""计算图像的灰度直方图。
Args:
image ([type]): 单通道的灰度图像,图像深度为 8 bit
Returns:
一维 ndarray : 灰度图像的直方图,每一个灰度级对应的像素个数
"""
rows, cols = image.shape
grayHist = np.zeros([256], np.uint64)
for idx_row in range(rows):
for idx_col in range(cols):
grayHist[image[idx_row][idx_col]] += 1
return grayHist
# --------------------------
if __name__ == "__main__":
if len(sys.argv) > 1:
image = cv.imread(sys.argv[1], 0)
if image is None:
print(f"Error: no such file or dictory.")
# 1. 手动计算直方图
grayHist = calcGrayHist(image)
# 可视化灰度直方图
x_range = range(256)
plt.plot(x_range, grayHist, linewidth=2, color="black")
y_maxValue = np.max(grayHist)
plt.axis([0, 255, 0, y_maxValue])
plt.xlabel("Gray Level")
plt.ylabel("Number of Pixels")
plt.show()
plt.close()
# 2. 利用 matplotlib 计算直方图
rows, cols = image.shape
# 二维矩阵转换为一维数组
pixelSequence = image.reshape([rows*cols, ])
numberBins = 256 # 灰度等级
histgram, bins, patch = plt.hist(pixelSequence, numberBins, facecolor="black", histtype="bar")
plt.xlabel(u"GrayLevel")
plt.ylabel(u"Number of Pixels")
plt.axis([0, 255, 0, np.max(histgram)])
plt.show()
plt.close()
else:
print(f"Usage: python histogram imageFile.")
```
#### File: DigitalImageProcessing/python/diff_gaussian.py
```python
import sys
from scipy import signal
import numpy as np
import cv2 as cv
def gaussConv(image, size, sigma):
"""函数 gaussConv 实现非归一化的高斯卷积
Args:
image ([ndarray]): [description]
size ([tuple]): 卷积核的大小,二元元组,(高,宽)
sigma ([float]): 高斯函数的标准差
Returns:
[ndarray]: 高斯卷积结果
"""
H, W = size
# 构建水平方向上非归一化的高斯卷积核
_, x_col = np.mgrid[0:1, 0:W]
x_col = x_col - (W - 1) / 2
x_kernel = np.exp(-np.power(x_col, 2.0))
img_xk = signal.convolve2d(image, x_kernel, "same", "symm", 0)
# 构造垂直方向非归一化的高斯卷积核
y_row, _ = np.mgrid[0:H, 0:1]
y_row = y_row - (H - 1) / 2
y_kernel = np.exp(-np.power(y_row, 2.0))
img_xk_yk = signal.convolve2d(img_xk, y_kernel, "same", "symm", 0)
img_xk_yk = img_xk_yk * 1.0/(2 * np.pi * pow(sigma, 2.0))
return img_xk_yk
def DiffGuassian(image, size, sigma, k=1.1):
# 标准差为 sigma 的非归一化高斯卷积核
img_gauss_kernel_1 = gaussConv(image, size, sigma)
# 标准差为 k*sigma 的非归一化高斯卷积核
img_gauss_kernel_k = gaussConv(image, size, k*sigma)
# 两个高斯卷积的差分
diff_guass = img_gauss_kernel_k - img_gauss_kernel_1
diff_guass = diff_guass / (pow(sigma, 2.0)*(k - 1))
return diff_guass
# --------------------------
if __name__ == "__main__":
if len(sys.argv) > 1:
image = cv.imread(sys.argv[1], 0)
if image is None:
print(f"Error: no such file or dictory.")
cv.imshow("OriginImage",image)
# -------- Difference of Guassian Operator --------
sigma = 0.2
k = 1.1
size = (3, 3)
img_diff_gauss = DiffGuassian(image, size, sigma, k)
# 1. 二值化处理
edge = np.copy(img_diff_gauss)
edge[edge>0] = 255
edge[edge<=0] = 0
edge = edge.astype(np.uint8)
cv.imshow("edge_binary", edge)
# 2. 抽象化处理
asbstraction_img = -np.copy(img_diff_gauss)
asbstraction_img = asbstraction_img.astype(np.float32)
asbstraction_img[asbstraction_img>=0] = 1.0
asbstraction_img[asbstraction_img<0] = 1.0 + np.tanh(asbstraction_img[asbstraction_img<0])
cv.imshow("abstraction_edge", asbstraction_img)
cv.waitKey(0)
cv.destroyAllWindows()
else:
print("Usge: python.py imageFile")
```
#### File: DigitalImageProcessing/python/hist_equalization.py
```python
import sys
import math
import cv2 as cv
import numpy as np
def calcGrayHist(image):
"""计算图像的灰度直方图。
Args:
image ([type]): 单通道的灰度图像,图像深度为 8 bit
Returns:
一维 ndarray : 灰度图像的直方图,每一个灰度级对应的像素个数
"""
rows, cols = image.shape
grayHist = np.zeros([256], np.uint64)
for idx_row in range(rows):
for idx_col in range(cols):
grayHist[image[idx_row][idx_col]] += 1
return grayHist
def equalizeHist(image):
"""全局直方图均衡化。
Args:
image (ndarray): 矩阵形式的输入图像
Returns:
[ndarray]: 矩阵形式的经过直方图均衡化后的输出图像
"""
# 对于直方图均衡化的实现主要分四个步骤:
# 1. 计算图像的灰度直方图。
rows, cols = image.shape
grayHist = calcGrayHist(image)
# 2. 计算灰度直方图的累加直方图。
zeroCumuMoment = np.zeros([256], np.uint32)
for p in range(256):
if p == 0:
zeroCumuMoment[p] = grayHist[0]
else:
zeroCumuMoment[p] = zeroCumuMoment[p-1] + grayHist[p]
# 3. 根据累加直方图和直方图均衡化原理得到输入灰度级和输出灰度级之间的映射关系。
output_q = np.zeros([256], np.uint8)
cofficient = 256.0 / (rows * cols)
for p in range(256):
q = cofficient * float(zeroCumuMoment[p]) - 1
if q >= 0:
output_q[p] = math.floor(q)
else:
output_q[p] = 0
# 4. 根据第三步得到的灰度级映射关系,循环得到输出图像的每一个像素的灰度级。
equalizeHistImage = np.zeros(image.shape, np.uint8)
for idx_row in range(rows):
for idx_col in range(cols):
equalizeHistImage[idx_row][idx_col] = output_q[image[idx_row][idx_col]]
return equalizeHistImage
# --------------------------
if __name__ == "__main__":
if len(sys.argv) > 1:
image = cv.imread(sys.argv[1], 0)
if image is None:
print(f"Error: no such file or dictory.")
cv.imshow("OriginImage", image)
# 1. 直方图均衡化
equalHistImg = equalizeHist(image)
cv.imshow("EqualizeHistImage", equalHistImg)
# 2. 自适应直方图均衡化 ----> 限制对比度的自适应直方图均衡化
# 构建 CLAHE 对象
clahe = cv.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8));
# 限制对比度的自适应直方图均衡化
dst_contrastLimit = clahe.apply(image)
cv.imshow("ContrastLimitImage", dst_contrastLimit)
cv.waitKey(0)
cv.destroyAllWindows()
else:
print(f"Usage: python histogram imageFile.")
```
#### File: DigitalImageProcessing/python/laplace_gaussian_operator.py
```python
import sys
from scipy import signal
import numpy as np
import cv2 as cv
def createLaplaceGaussianKernel(sigma, size):
"""构建高斯拉普拉斯卷积核
Args:
sigma ([float]): 高斯函数的标准差
size ([tuple]): 高斯核的大小,奇数
Returns:
[ndarray]: 高斯拉普拉斯卷积核
"""
H, W = size
r, c = np.mgrid[0:H:1, 0:W:1]
r = r - (H - 1) / 2
c = c - (W - 1) / 2
sigma2 = pow(sigma, 2.0)
norm2 = np.power(r, 2.0) + np.power(c, 2.0)
LoGKernel = (norm2 / sigma2 - 2)*np.exp(-norm2 / (2 * sigma2))
return LoGKernel
def LaplaceGaussianOperator(image, sigma, size, _boundary="symm", _fillvalue=0):
# Laplace of Guassian convolution kernel
laplace_gaussian_kernel = createLaplaceGaussianKernel(sigma=sigma, size=size)
img_laplace_gaussian_conv = signal.convolve2d(image, laplace_gaussian_kernel, mode="same", boundary=_boundary, fillvalue=_fillvalue)
return img_laplace_gaussian_conv
# --------------------------
if __name__ == "__main__":
if len(sys.argv) > 1:
image = cv.imread(sys.argv[1], 0)
if image is None:
print(f"Error: no such file or dictory.")
cv.imshow("OriginImage",image)
# -------- Laplace of Guassian Operator --------
img_laplce_gaussian_conv = LaplaceGaussianOperator(image, 1, (7, 7))
# 阈值化处理获取二值图
edge_binary = np.copy(img_laplce_gaussian_conv)
edge_binary[edge_binary>0] = 255
edge_binary[edge_binary<=0] = 0
edge_binary = edge_binary.astype(np.uint8)
cv.imshow("EdgeBinary", edge_binary)
# 反色处理,以黑色显示边缘
edge_black_binary = 255 - edge_binary
cv.imshow("EdgeBinaryBlack", edge_black_binary)
cv.waitKey(0)
cv.destroyAllWindows()
else:
print("Usge: python.py imageFile")
```
#### File: DigitalImageProcessing/python/laplace_operator.py
```python
import sys
from scipy import signal
import numpy as np
import cv2 as cv
def LaplaceOperator(image, _boundary="fill", _fillvalue=0):
# laplace convolution kernel
laplace_kernel = np.array([[0, -1, 0], [-1, 4, -1], [0, -1, 0]], np.float32)
# laplace_kernel = np.array([[-1, -1, -1], [-1, 8, -1], [-1, -1, -1]], np.float32)
img_laplace_conv = signal.convolve2d(image, laplace_kernel, mode="same", boundary=_boundary, fillvalue=_fillvalue)
return img_laplace_conv
# --------------------------
if __name__ == "__main__":
if len(sys.argv) > 1:
image = cv.imread(sys.argv[1], 0)
if image is None:
print(f"Error: no such file or dictory.")
cv.imshow("OriginImage",image)
# -------- Laplace Operator --------
img_laplce_conv = LaplaceOperator(image, "symm")
# case 1, 阈值化处理
thresholdEdge = np.copy(img_laplce_conv)
thresholdEdge[thresholdEdge>0] = 255
thresholdEdge[thresholdEdge<0] = 0
thresholdEdge = thresholdEdge.astype(np.uint8)
cv.imshow("ThresholdEdge", thresholdEdge)
# case 2, 抽象化处理(水墨画效果)
asbstractionEdge = np.copy(img_laplce_conv)
asbstractionEdge = asbstractionEdge.astype(np.float32)
asbstractionEdge[asbstractionEdge>=0] = 1.0
asbstractionEdge[asbstractionEdge<0] = 1.0 + np.tanh(asbstractionEdge[asbstractionEdge<0])
cv.imshow("AsbstractionEdge", asbstractionEdge)
cv.waitKey(0)
cv.destroyAllWindows()
else:
print("Usge: python.py imageFile")
```
#### File: DigitalImageProcessing/python/local_threshold_otsu.py
```python
import sys
import math
import numpy as np
import cv2 as cv
def calcGrayHist(image):
rows, cols = image.shape
grayHist = np.zeros([256], np.uint64)
for idx_row in range(rows):
for idx_col in range(cols):
grayHist[image[idx_row][idx_col]] += 1
return grayHist
def otsuThreshold(image):
rows, cols = image.shape
grayHist = calcGrayHist(image)
uniformGrayHist = grayHist / float(rows*cols)
# 零阶累积矩阵和一阶累积矩阵
zeroCumuMoment = np.zeros([256], np.float32)
oneCumuMoment = np.zeros([256], np.float32)
for k in range(256):
if k == 0:
zeroCumuMoment[k] = uniformGrayHist[0]
oneCumuMoment[k] = (k) * uniformGrayHist[0]
else:
zeroCumuMoment[k] = zeroCumuMoment[k-1] + uniformGrayHist[k]
oneCumuMoment[k] = oneCumuMoment[k-1] + k * uniformGrayHist[k]
# 计算类间方差
variance = np.zeros([256], np.float32)
for k in range(255):
if zeroCumuMoment[k] == 0 or zeroCumuMoment[k] == 1:
variance[k] = 0
else:
variance[k] = math.pow(oneCumuMoment[255]*zeroCumuMoment[k] - oneCumuMoment[k], 2) / (zeroCumuMoment[k]*(1.0-zeroCumuMoment[k]))
# 找到阈值
threshLoc = np.where(variance[0:255] == np.max(variance[0:255]))
thresholdVal = threshLoc[0][0]
thresholdImg = np.copy(image)
thresholdImg[thresholdImg > thresholdVal] = 255
thresholdImg[thresholdImg <= thresholdVal] = 0
return thresholdVal, thresholdImg
# --------------------------
if __name__ == "__main__":
if len(sys.argv) > 1:
image = cv.imread(sys.argv[1], 0)
if image is None:
print(f"Error: no such file or directory.")
sys.exit()
cv.imshow("OriginImage", image)
thresholdVal, thresholdImg = otsuThreshold(image)
print(f"The threshold value is {thresholdVal}")
cv.imshow("ThresholdImage", thresholdImg)
cv.waitKey(0)
cv.destroyAllWindows()
else:
print(f"Usage: python gaussBlur imageFile.")
```
#### File: DigitalImageProcessing/python/low_high_filter.py
```python
import sys
import numpy as np
import cv2 as cv
def fft2Image(src):
rows, cols = src.shape[:2]
# 获取快速傅里叶变换的最优扩充
row_padding = cv.getOptimalDFTSize(rows)
col_padding = cv.getOptimalDFTSize(cols)
# 下侧面和右侧面进行 zero-padding
img_fft = np.zeros((row_padding, col_padding, 2), np.float32)
img_fft[:rows, :cols, 0] = src
# 快速傅里叶变换
cv.dft(img_fft, img_fft, cv.DFT_COMPLEX_OUTPUT)
return img_fft
# ------------ 傅里叶变换中两个度量: 幅度谱和相位谱 ------------
def amplitudeSepectrum(img_fft):
real_part = np.power(img_fft[:, :, 0], 2.0)
imaginary_part = np.power(img_fft[:, :, 1], 2.0)
amplitude_part = np.sqrt(real_part + imaginary_part)
return amplitude_part
def graySpectrum(amplitude):
# 对比度拉伸
amplitude_log = np.log(amplitude + 1.0)
# 归一化
spectrum_norm = np.zeros(amplitude_log.shape, np.float32)
cv.normalize(amplitude_log, spectrum_norm, 0, 1, cv.NORM_MINMAX)
return spectrum_norm
def createLPFilter(shape, center, radius, lpType=2, n=2):
"""构建三种低通滤波器:理想滤波器,巴特沃斯滤波器,高斯滤波器
Args:
shape ([tuple]): 滤波器的大小,表示快速傅里叶变换的尺寸; (high, width)
center ([tuple]): 傅里叶谱的中心位置; (x, y)
radius ([float]): 截至频率;
lpType (int, optional): 滤波器的类型. Defaults to 2.
n (int, optional): 巴特沃斯滤波器的阶数. Defaults to 2.
Returns:
[ndarray]: 低通滤波器
"""
rows, cols = shape[:2]
r, c = np.mgrid[0:rows:1, 0:cols:1]
c = c - center[0]
r = r - center[1]
d = np.power(c, 2.0) + np.power(r, 2.0)
lpFilter = np.zeros(shape, np.float32)
if radius <= 0:
return lpFilter
# case 1, 理想低通滤波器
if lpType == 0:
lpFilter = np.copy(d)
lpFilter[lpFilter < pow(radius, 2.0)] = 1
lpFilter[lpFilter >= pow(radius, 2.0)] = 0
# case 2, 巴特沃斯低通滤波器
elif lpType == 1:
lpFilter = 1.0 / (1.0 + np.power(np.sqrt(d) / radius, 2 * n))
# case 3, 高斯低通滤波器
elif lpType == 2:
lpFilter = np.exp(-d / (2.0 * pow(radius, 2.0)))
return lpFilter
# --------------------------
# 截至频率
radius = 50
MAX_RADIUS = 100
lpType = 0
MAX_LPTYPE = 2
# --------------------------
if __name__ == "__main__":
if len(sys.argv) > 1:
image = cv.imread(sys.argv[1], 0)
if image is None:
print(f"Error: no such file or directory.")
sys.exit()
cv.imshow("OriginImage", image)
# ------------ step 1, reading image file ------------
fimage = np.zeros(image.shape, np.float)
# ------------ step 2, (-1)^(r+c) ------------
for r in range(image.shape[0]):
for c in range(image.shape[1]):
if (r+c) % 2:
fimage[r][c] = -1 * image[r][c]
else:
fimage[r][c] = image[r][c]
# ------------ step 3 and step 4, zero-padding and FFT ------------
FImagefft2 = fft2Image(fimage)
amplitude = amplitudeSepectrum(FImagefft2)
spectrum = graySpectrum(amplitude)
cv.imshow("OriginalSpectrum", spectrum)
minValue, maxValue, minLoc, maxLoc = cv.minMaxLoc(amplitude)
cv.namedWindow("lpFilterSpectrum", 1)
def nothing(*arg):
pass
cv.createTrackbar("lpType", "lpFilterSpectrum", lpType, MAX_LPTYPE, nothing)
cv.createTrackbar("radius", "lpFilterSpectrum", radius, MAX_RADIUS, nothing)
result = np.zeros(spectrum.shape, np.float32)
while True:
radius = cv.getTrackbarPos("radius", "lpFilterSpectrum")
lpType = cv.getTrackbarPos("lpType", "lpFilterSpectrum")
# ------------ step 5, 构建低通滤波器 ------------
lpFilter = createLPFilter(spectrum.shape, maxLoc, radius, lpType)
# ------------ step 6, 低通滤波器和快速傅里叶变换的对应位置做点乘 ------------
rows, cols = spectrum.shape[:2]
fImagefft2_lpFilter = np.zeros(FImagefft2.shape, FImagefft2.dtype)
for i in range(2):
fImagefft2_lpFilter[:rows, :cols, i] = FImagefft2[:rows, :cols, i] * lpFilter
lp_amplitude = amplitudeSepectrum(fImagefft2_lpFilter)
lp_spectrum = graySpectrum(lp_amplitude)
cv.imshow("lpFilterSpectrum", lp_spectrum)
# ------------ step 7 and step 8, 对低通滤波器变换执行傅里叶逆变换,取实部 ------------
cv.dft(fImagefft2_lpFilter, result, cv.DFT_REAL_OUTPUT + cv.DFT_INVERSE + cv.DFT_SCALE)
# ------------ step 9, (-1)^(r+c) ------------
for r in range(rows):
for c in range(cols):
if (r + c) % 2:
result[r][c] = -1 * result[r][c]
# ------------ step 10, 数据类型转换,截取左方角部分 ------------
for r in range(rows):
for c in range(cols):
if result[r][c] < 0:
result[r][c] = 0
elif result[r][c] > 255:
result[r][c] = 255
lpResult = result.astype(np.uint8)
lpResult = lpResult[:image.shape[0], :image.shape[1]]
cv.imshow("LPFilter", lpResult)
ch = cv.waitKey(5)
if ch == 27:
break
cv.waitKey(0)
cv.destroyAllWindows()
else:
print(f"Usage: python python-scripy.py imageFile.")
```
#### File: DigitalImageProcessing/python/open_close_morphology.py
```python
import sys
import cv2 as cv
# --------------------------
if __name__ == "__main__":
if len(sys.argv) > 1:
image = cv.imread(sys.argv[1], 0)
if image is None:
print(f"Error: no such file or dictory.")
cv.imshow("OriginImage",image)
# 结构元半径和迭代次数以及最大值
r, i = 1, 1
MAX_R, MAX_I = 20, 20
# 显示形态学处理效果的窗口
cv.namedWindow("morphology", 1)
# 回调函数
def nothing(*arg):
pass
# 调节结构元半径
cv.createTrackbar("r", "morphology", r, MAX_R, nothing)
# 调节迭代次数
cv.createTrackbar("i", "morphology", r, MAX_I, nothing)
while True:
# 获取当前进度条上的 r 半径值
r = cv.getTrackbarPos("r", "morphology")
# 获取当前进度条上的 i 迭代次数
i = cv.getTrackbarPos("i", "morphology")
# 创建结构元
s = cv.getStructuringElement(cv.MORPH_RECT, (2*r+1, 2*r+1))
# 1. 进行形态学处理——开运算
# d = cv.morphologyEx(image, cv.MORPH_OPEN, s, iterations=i)
# 2. 进行形态学处理——闭运算
# d = cv.morphologyEx(image, cv.MORPH_CLOSE, s, iterations=i)
# 3. 进行形态学处理——顶帽变换
# d = cv.morphologyEx(image, cv.MORPH_TOPHAT, s, iterations=i)
# 4. 进行形态学处理——底帽变换
# d = cv.morphologyEx(image, cv.MORPH_BLACKHAT, s, iterations=i)
# 5. 进行形态学处理——形态学梯度
d = cv.morphologyEx(image, cv.MORPH_GRADIENT, s, iterations=i)
# 可视化处理效果
cv.imshow("morphology", d)
# cv.imwrite("./image/open.png", d)
ch = cv.waitKey(5)
# Esc 退出循环
if ch == 27:
break
cv.waitKey(0)
cv.destroyAllWindows()
else:
print("Usge: python.py imageFile")
```
#### File: DigitalImageProcessing/python/prewitt_operator.py
```python
import sys
from scipy import signal
import numpy as np
import cv2 as cv
def PrewittOperator(image, _boundary="symm"):
"""Prewitt 可分离边缘检测算子
Args:
image ([ndarray]): 原始输入图像
_boundary (str, optional): 边界填充模式. Defaults to "symm".
Returns:
[tuple]: Prewitt 算子输出结果,元组形式
"""
# 1 垂直方向的均值平滑
kernel_smooth_y = np.array([[1], [1], [1]], np.float32)
img_conv_prewitt_x = signal.convolve2d(image, kernel_smooth_y, mode="same", boundary=_boundary)
# 2 水平方向的差分操作
diff_x = np.array([[1, 0, -1]], np.float32)
img_conv_prewitt_x = signal.convolve2d(image, diff_x, mode="same", boundary=_boundary)
# 1 水平方向的均值平滑
kernel_smooth_x = np.array([[1, 1, 1]], np.float32)
img_conv_prewitt_y = signal.convolve2d(image, kernel_smooth_x, mode="same", boundary=_boundary)
# 2 垂直方向的差分操作
diff_y = np.array([[1], [0], [-1]], np.float32)
img_conv_prewitt_y = signal.convolve2d(img_conv_prewitt_y, diff_y, mode="same", boundary=_boundary)
return (img_conv_prewitt_x, img_conv_prewitt_y)
# --------------------------
if __name__ == "__main__":
if len(sys.argv) > 1:
image = cv.imread(sys.argv[1], 0)
if image is None:
print(f"Error: no such file or dictory.")
cv.imshow("OriginImage",image)
# 注意区分边缘强度和边缘强度的灰度级显示
img_prewitt_x, img_prewitt_y = PrewittOperator(image)
# 计算绝对值,获取水平方向和垂直方向的边缘强度
abs_img_prewitt_x = np.abs(img_prewitt_x)
abs_img_prewitt_y = np.abs(img_prewitt_y)
# 水平方向和垂直方向的边缘强度的灰度级显示
edge_x = abs_img_prewitt_x.copy()
edge_y = abs_img_prewitt_y.copy()
# 将大于 255 直接进行饱和操作
edge_x[edge_x>255] = 255
edge_y[edge_y>255] = 255
# 数据类型转换
edge_x = edge_x.astype(np.uint8)
edge_y = edge_y.astype(np.uint8)
cv.imshow("edge_x", edge_x)
cv.imshow("edge_y", edge_y)
# 根据 prewitt 两个卷积结果, 计算最终的边缘强度
# 计算最终的边缘强度有多种方式, 采用 插值法
edge = 0.5*abs_img_prewitt_x + 0.5*abs_img_prewitt_y
# 边缘轻度的灰度级显示
edge[edge>255] = 255
edge = edge.astype(np.uint8)
cv.imshow("edge", edge)
cv.waitKey(0)
cv.destroyAllWindows()
else:
print("Usge: python.py imageFile")
```
#### File: DigitalImageProcessing/python/scharr_operator.py
```python
import sys
from scipy import signal
import numpy as np
import cv2 as cv
def ScharrOperator(image, _boundary="symm"):
# image 和 scharr_x 在水平方向卷积,反映垂直方向的边缘强度
scharr_x = np.array([[3, 0, -3], [10, 0, -10], [3, 0, -3]], np.float32)
img_convX = signal.convolve2d(image, scharr_x, mode="same", boundary=_boundary)
# image 和 scharr_y 在垂直方向卷积,反映水平方向的边缘强度
scharr_y = np.array([[3, 10, 3], [0, 0, 0], [-3, -10, -3]], np.float32)
img_convY = signal.convolve2d(image, scharr_y, mode="same", boundary=_boundary)
return (img_convX, img_convY)
# --------------------------
if __name__ == "__main__":
if len(sys.argv) > 1:
image = cv.imread(sys.argv[1], 0)
if image is None:
print(f"Error: no such file or dictory.")
cv.imshow("OriginImage",image)
img_convX, img_convY= ScharrOperator(image)
# image 和 scharr_x 在水平方向卷积结果,反映垂直方向的边缘强度
img_convX = np.abs(img_convX)
edge_vertical = img_convX.astype(np.uint8)
cv.imshow("edge_vertical", edge_vertical)
# image 和 scharr_y 在垂直方向卷积结果,反映水平方向的边缘强度
img_convY = np.abs(img_convY)
edge_horizontal = img_convY.astype(np.uint8)
cv.imshow("edge_horizontal", edge_horizontal)
# 利用平方和的开方赖衡量最后的输出的边缘
edge = np.sqrt(np.power(img_convX, 2.0) + np.power(img_convY, 2.0))
edge = np.round(edge)
edge[edge>255] = 255
edge = edge.astype(np.uint8)
cv.imshow("edge", edge)
cv.waitKey(0)
cv.destroyAllWindows()
else:
print("Usge: python.py imageFile")
```
#### File: DigitalImageProcessing/python/sobel_operator.py
```python
import sys
import math
from scipy import signal
import numpy as np
import cv2 as cv
def PascalSmooth(n):
"""函数 PascalSmooth 返回 n 阶的非归一化的高斯平滑算子,
即指数为 n-1 的二项式展开式的系数,
其中对于阶乘的实现,利用 Python 的函数包 math 中的 factorial,其参数 n 为奇数。
Args:
n ([int]): 高斯卷积算子的阶数(奇数)
Returns:
[array]: 高斯卷积算子中的系数,即用于 Soble 算子中平滑核参数
"""
pascalSmooth = np.zeros([1, n], np.float32)
for idx in range(n):
pascalSmooth[0][idx] = math.factorial(n - 1) / math.factorial(idx) * math.factorial(n - 1 - idx)
return pascalSmooth
def PascalDiff(n):
"""函数 PascalDiff 返回 n 阶差分算子,完成 Sobel 在方向上的差分操作
Args:
n ([int]): Sobel 进行 n 阶差分
Returns:
[array]: Soble n 阶差分结果
"""
pascalDiff = np.zeros([1, n], np.float32)
pascalSmooth_previous = PascalSmooth(n - 1)
for idx in range(n):
if idx == 0:
# 恒等于 1
pascalDiff[0][idx] = pascalSmooth_previous[0][idx]
elif idx == n - 1:
# 恒等于 -1
pascalDiff[0][idx] = -pascalSmooth_previous[0][idx - 1]
else:
pascalDiff[0][idx] = pascalSmooth_previous[0][idx] - pascalSmooth_previous[0][idx - 1]
return pascalDiff
def GetSobelKernel(n):
""" PascalSmooth 返回的平滑算子和 PascalDiff 返回的差分算子进行 full 卷积,
就可以得到完整的水平方向和垂直方向上的 nxn 的 Sobel 算子。
注意,真正在进行 Sobel 卷积时,这一步是多余的,直接通过卷积的分离性就可以完成 Sobel 卷积,
这里只是为了得到完整的 Sobel 核,通过定义函数 getSobelKernel 来实现, 返回值包括水平方向和垂直方向上的 Sobel 核。
Args:
n ([int]): n 阶 Sobel 算子
Returns:
[array]: 水平方向和垂直方向的 Sobel 卷积核
"""
pascalSmoothKernel = PascalSmooth(n)
pascalDiffKernel = PascalDiff(n)
# 水平方向上卷积核
sobelKernel_x = signal.convolve2d(pascalSmoothKernel.transpose(), pascalDiffKernel, mode="full")
# 垂直方向上卷积核
sobelKernel_y = signal.convolve2d(pascalSmoothKernel, pascalDiffKernel.transpose(), mode="full")
return (sobelKernel_x, sobelKernel_y)
def SobelOperator(image, n):
""" 构建了 Sobel 平滑算子和差分算子后,通过这两个算子来完成图像矩阵与 Sobel 算子的 same 卷积,
函数 SobelOperator 实现该功能:
图像矩阵先与垂直方向上的平滑算子卷积得到的卷积结果,
再与水平方向上的差分算子卷积,
这样就得到了图像矩阵与sobel_x 核的卷积。
与该过程类似,图像矩阵先与水平方向上的平滑算子卷积得到的卷积结果,
再与垂直方向上的差分算子卷积,
这样就得到了图像矩阵与 sobel_y 核的卷积。
Args:
image ([ndarray]): 进行 Sobel 算子的原始输入图像
n ([int]): 进行 Sobel 算子的阶数
Returns:
[ndarray]: 水平方向上的 Sobel 卷积结果;垂直方向上的卷积结果
"""
pascalSmoothKernel = PascalSmooth(n)
pascalDiffKernel = PascalDiff(n)
# -------- 与水平方向上 Sobel 卷积核进行卷积 --------
# 可分离卷积核 1. 先进行垂直方向的平滑
img_sobel_x = signal.convolve2d(image, pascalSmoothKernel.transpose(), mode="same")
# 可分离卷积核 2. 再进行水平方向的差分
img_sobel_x = signal.convolve2d(img_sobel_x, pascalDiffKernel, mode="same")
# -------- 与水平方向上 Sobel 卷积核进行卷积 --------
# 可分离卷积核 1. 先进行垂直方向的平滑
img_sobel_y = signal.convolve2d(image, pascalSmoothKernel, mode="same")
# 可分离卷积核 2. 再进行水平方向的差分
img_sobel_y = signal.convolve2d(img_sobel_x, pascalDiffKernel.transpose(), mode="same")
return img_sobel_x, img_sobel_y
# --------------------------
if __name__ == "__main__":
if len(sys.argv) > 1:
image = cv.imread(sys.argv[1], 0)
if image is None:
print(f"Error: no such file or dictory.")
cv.imshow("OriginImage",image)
# 注意区分边缘强度和边缘强度的灰度级显示
img_soble_x, img_soble_y = SobelOperator(image, 3)
# 计算绝对值,获取水平方向和垂直方向的边缘强度
abs_img_soble_x = np.abs(img_soble_x)
abs_img_soble_y = np.abs(img_soble_y)
# 水平方向和垂直方向的边缘强度的灰度级显示
edge_x = abs_img_soble_x.copy()
edge_y = abs_img_soble_y.copy()
# 将大于 255 直接进行饱和操作
edge_x[edge_x>255] = 255
edge_y[edge_y>255] = 255
# 数据类型转换
edge_x = edge_x.astype(np.uint8)
edge_y = edge_y.astype(np.uint8)
cv.imshow("edge_x", edge_x)
cv.imshow("edge_y", edge_y)
# 根据 sobel 两个卷积结果, 计算最终的边缘强度
# 计算最终的边缘强度有多种方式, 采用 平方和开方方式
edge = np.sqrt(np.power(img_soble_x, 2.0) + np.power(img_soble_y, 2.0))
# 边缘轻度的灰度级显示
# Sobel 边缘检测,将边缘强度大于 255 的值直接截断为 255,这样得到的边缘有可能不够平滑
# edge[edge>255] = 255
# edge = edge.astype(np.uint8)
# cv.imshow("edge_255", edge)
# 另一种方式,对所得到的边缘强度进行直方图正规化处理或者归一化处理。
# 对边缘强度进行归一化处理得到边缘强度的灰度级显示,如果得到的对比度较低,还可以通过伽马变换进行对比度增强
edge = edge / np.max(edge)
edge = np.power(edge, 1)
edge *= 255
edge = edge.astype(np.uint8)
cv.imshow("Soble_scale", edge)
# cv.imwrite("./image/Soble_scale.png", edge)
cv.waitKey(0)
cv.destroyAllWindows()
else:
print("Usge: python.py imageFile")
```
#### File: DigitalImageProcessing/python/spectral_residual_significance.py
```python
import sys
import math
import numpy as np
import cv2 as cv
def FFT2Image(src):
rows, cols = src.shape[:2]
# 获取快速傅里叶变换的最优扩充
row_padding = cv.getOptimalDFTSize(rows)
col_padding = cv.getOptimalDFTSize(cols)
# 下侧面和右侧面进行 zero-padding
img_fft = np.zeros((row_padding, col_padding, 2), np.float32)
img_fft[:rows, :cols, 0] = src
# 快速傅里叶变换
cv.dft(img_fft, img_fft, cv.DFT_COMPLEX_OUTPUT)
return img_fft
def AmplitudeSepectrum(img_fft):
real_part = np.power(img_fft[:, :, 0], 2.0)
imaginary_part = np.power(img_fft[:, :, 1], 2.0)
amplitude_part = np.sqrt(real_part + imaginary_part)
return amplitude_part
def graySpectrum(amplitude):
# 对比度拉伸
amplitude_log = np.log(amplitude + 1.0)
# 归一化
spectrum_norm = np.zeros(amplitude_log.shape, np.float32)
cv.normalize(amplitude_log, spectrum_norm, 0, 1, cv.NORM_MINMAX)
return spectrum_norm
def phaseSpectrum(fft_img):
rows, cols = fft_img.shape[:2]
# 计算对应的相位角
phase_angle = np.arctan2(fft_img[:, :, 1], fft_img[:, :, 0])
# 将相位角进行转换
phase_spectrum = phase_angle / math.pi * 180
return phase_spectrum
# --------------------------
if __name__ == "__main__":
if len(sys.argv) > 1:
image = cv.imread(sys.argv[1], 0)
if image is None:
print(f"Error: no such file or dictory.")
cv.imshow("OriginImage", image)
# ------------ 显著性检测: 谱残差检测 ------------
# step 1, computer fft of image
fft_img = FFT2Image(image)
# step 2, compute amplitude spectrum of fft
amplitude_spectrum = AmplitudeSepectrum(fft_img)
# compute gray level of amplitude spectrum
amplitude_spectrum_log = graySpectrum(amplitude_spectrum)
# step 3, compute phase spectrum of fft
phase_spectrum = phaseSpectrum(fft_img)
# 利用余弦函数计算余弦谱, 对应实部
cos_phase_spectrum = np.cos(phase_spectrum)
# 利用正弦函数计算正弦谱, 对应虚部
sin_phase_spectrum = np.sin(phase_spectrum)
# step 4, 对幅度谱的灰度级进行均值平滑
mean_log_amplitude_spectrum = cv.boxFilter(amplitude_spectrum_log, cv.CV_32FC1, (3, 3))
# step 5, 计算谱残差
amplitude_spectrum_residual = amplitude_spectrum_log - mean_log_amplitude_spectrum
# step 6, 谱残差的幂指数运算
exp_amplitude_spectrum_residual = np.exp(amplitude_spectrum_residual)
# 分别计算实数部分和虚数部分
real_part = exp_amplitude_spectrum_residual * cos_phase_spectrum
imaginary_part = exp_amplitude_spectrum_residual * sin_phase_spectrum
# 合并实部和虚部
com_real_imaginary = np.zeros((real_part.shape[0], real_part.shape[1], 2), np.float32)
com_real_imaginary[:, :, 0] = real_part
com_real_imaginary[:, :, 1] = imaginary_part
# step 7, 根据新的幅度谱和相位谱, 进行傅里叶逆变换
ifft_img = np.zeros(com_real_imaginary.shape, np.float32)
cv.dft(com_real_imaginary, ifft_img, cv.DFT_COMPLEX_OUTPUT + cv.DFT_INVERSE)
# step 8, 显著性
saliency_map = np.power(ifft_img[:, :, 0], 2) + np.power(ifft_img[:, :, 1], 2)
# 对显著性进行高斯平滑
saliency_map = cv.GaussianBlur(saliency_map, (5, 5), 2.5)
# show the saliency map for test
# saliency_map = cv.normalize(saliency_map, saliency_map, 0, 1, cv.NORM_MINMAX)
saliency_map = saliency_map / np.max(saliency_map)
# 利用 伽马变换提高对比度
saliency_map = np.power(saliency_map, 0.5)
saliency_map = np.round(saliency_map*255)
saliency_map = saliency_map.astype(np.uint8)
cv.imshow("SaliencyMap", saliency_map)
cv.waitKey()
cv.destroyAllWindows()
else:
print("Usage: python python-scripy.py imageFile")
``` |
{
"source": "26ANSH/Pathshaala",
"score": 3
} |
#### File: Pathshaala/teacher/firebase.py
```python
import pyrebase
# Fill your Firebase Details here -
CONFIG = {
'apiKey': "",
'authDomain': "",
'databaseURL': "",
'projectId': "",
'storageBucket': "",
'messagingSenderId': "",
'appId': ""
};
# start firebase authentication
firebase = pyrebase.initialize_app(CONFIG)
auth = firebase.auth()
storage=firebase.storage()
def create_user(email, password):
try:
user = auth.create_user_with_email_and_password(email, password)
return user['localId']
except:
return 400
def teacher_login(email, password):
try:
user = auth.sign_in_with_email_and_password(email, password)
return user['localId']
except:
return 400
def uploadimage(fr, to):
location = storage.child(to)
location.put(fr)
return storage.child(to).get_url()
```
#### File: Pathshaala/teacher/models.py
```python
from asyncio import tasks
import firebase_admin
from firebase_admin import credentials, firestore, storage
import random
from google.cloud.firestore import ArrayUnion, Increment
import datetime
# Firebase Credentials
# Fill your firebase Details which you get from console.firebase.com
cred = credentials.Certificate({
"type": "",
"project_id": "",
"private_key_id": "",
"private_key": "",
"client_email": "",
"client_id": "",
"auth_uri": "https://accounts.google.com/o/oauth2/auth",
"token_uri": "https://oauth2.googleapis.com/token",
"auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs",
"client_x509_cert_url": ""
})
firebase_admin.initialize_app(cred)
db = firestore.client()
def _new_user(id, fname, lname,email, country, gender):
num = int('{:06}'.format(random.randrange(1, 10**6)))
db.collection('teachers').document(id).set({
'id':id,
'email':email,
'first_name':fname,
'last_name':lname,
'country':country,
'gender':gender,
'token': num,
'metadata':{'students':0,'studentLimit':500,'courses':0,'courseLimit':10}
})
return num
def get_token(id):
return db.collection('teachers').document(id).get().to_dict()['token']
def _new_course(name, teacher_id, description, tags, img):
# random_number = str(random.randint((10**(3)), (10**(3))))
time = datetime.datetime.now()
tags = tags.split(',')
tags = list(map(str.strip, tags))
course = db.collection('courses').document()
code = video_code()
details = {'id':course.id, 'code':name.replace(' ', '-'), 'live': code ,'name':name.title(), 'teacher_id':teacher_id, 'description':description, 'tags':tags, 'img':img, 'from':time, 'students':[], 'students':0}
course.set(details)
db.collection('teachers').document(teacher_id).update({'metadata.courses': Increment(1)})
def get_meta(id):
meta = db.collection('teachers').document(id).get().to_dict()['metadata']
meta['total_course_percent'] = (meta['courses']/meta['courseLimit']) * 100
meta['total_students_percent'] = (meta['students']/meta['studentLimit']) * 100
return meta
def get_courses(teacher_id):
courses = db.collection('courses').where('teacher_id', '==', teacher_id).get()
courses = [i.to_dict() for i in courses]
return courses
def add_student(email, c_id):
time = datetime.datetime.now()
checking = db.collection('students').where('email', '==', email).stream()
for check in checking:
id = check.id
students = db.collection('courses').document(c_id).collection('students').where('id', '==' , id).stream()
for student in students:
return 400, student.to_dict()
data = check.to_dict()
data = {'id':id, 'email':email, 'added':time, 'verified':data['verified'], 'name':data['name']}
db.collection('courses').document(c_id).collection('students').document(id).set(data)
db.collection('courses').document(c_id).update({'students': Increment(1)})
return 200, data
new = db.collection('students').document()
id = new.id
db.collection('courses').document(c_id).update({'students': Increment(1)})
new.set({'email':email, 'id':id, 'verified':False, 'created':time})
db.collection('courses').document(c_id).collection('students').document(id).set({'id':id, 'email':email, 'added':time, 'verified':False, 'name':'Invite Sent'})
return 100, id
def get_student_details(code, t_id):
return [student.to_dict() for student in db.collection('courses').where('code', '==', code).where('teacher_id', '==', t_id).get()[0].reference.collection('students').get()]
def video_code():
nums=[8,4,4,4,12]
code = ''
for num in nums:
random_number = str(random.randint((10**(num-1)), (10**(num))))
code += random_number + '-'
return code[:-1]
def get_course(code, t_id):
return list(db.collection('courses').where('code', '==', code).where('teacher_id', '==', t_id).stream())[0].to_dict()
```
#### File: Pathshaala/teacher/views.py
```python
from django.shortcuts import render, redirect
from django.http import HttpResponse, JsonResponse
from django.contrib.auth.models import User
from django.contrib.auth import authenticate, login, logout
from .firebase import create_user, teacher_login, uploadimage
from .models import _new_course, _new_user, get_token, get_courses, get_meta, add_student, get_student_details, get_course
from django.core.mail import EmailMessage
from django.template.loader import render_to_string
from django.conf import settings
import asyncio
from asgiref.sync import sync_to_async
from django.core.signing import Signer
import json
from django.views.decorators.csrf import csrf_exempt
def page_not_found_view(request, exception):
return render(request, 'error/404.html', status=404)
decode = Signer()
def is_student(request):
if request.user.username.split('_')[0]=='student':
return True
else:
return False
def teacher_auth(request):
if request.user.is_authenticated and request.user.username.split('_')[0]=='teacher':
return True
else:
return False
def index(request):
if teacher_auth(request):
return render(request, 'student/say.html', {'say': f' Pathshaala ❤️ {request.user.first_name} Details = {request.user}'})
elif is_student(request):
return redirect('/teacher/auth/login/?error=You are already Logged in as a Student')
else:
return redirect('/teacher/auth/login/?error=Login to Access !!!')
async def signin(request):
if not await sync_to_async(teacher_auth)(request):
if request.method == 'POST':
form = request.POST
user = await sync_to_async(create_user)(form['email'], form['password'])
if user == 400:
# print(user)
return HttpResponse('User already exists')
else:
token = await sync_to_async(_new_user)(user,form['fname'],form['lname'], form['email'], form['country'], form['gender'])
await sync_to_async(User.objects.create_user)(first_name = form['fname'], last_name = form['lname'], username='teacher_'+user, password=form['password'], email=form['email'])
authenticated_user = await sync_to_async(authenticate)(username='teacher_'+user, password=form['password'])
# print(authenticated_user)
msg = render_to_string('mail.html', {'header':'Welcome to Pathshaala👨💻🔥', 'name': form['fname'],'link':'https://pathshaala.azurewebsites.net/teacher/auth/verify/?user={}&code={}'.format(decode.sign(user), decode.sign(token)), 'email':form['email']})
email = EmailMessage(
'Welcome to Pathshaala',
msg,
settings.EMAIL_HOST_USER,
[form['email']]
)
email.content_subtype = 'html'
email.fail_silently = False
mail = sync_to_async(email.send)
asyncio.create_task(mail())
authenticated_user.is_active = False
await sync_to_async(authenticated_user.save)()
# await sync_to_async(login)(request, authenticated_user)
return redirect('/teacher/auth/login/?alert=Account Created! Please Verify to continue, Link sent via email')
else:
return render(request,'teacher/signup.html')
else:
return redirect('index')
def User_Login(request):
if not teacher_auth(request):
if request.method == 'POST':
form = request.POST
email=form['email']
password=form['password']
username = teacher_login(email, password)
if username != 400:
id = 'teacher_'+username
user = User.objects.get(username=id)
if user.is_active:
authenticated_user = authenticate(username=id, password=password)
login(request, authenticated_user)
return redirect('dashboard')
else:
return redirect("/teacher/auth/login/?error=Please Verify to continue, Link sent via email at {}".format(email))
else:
# return HttpResponse('Invalid credentials, Try again')
# return render(request,'teacher/login.html', {'error' : 'Wrong Email or Psssword ! Retry'})
return redirect('/teacher/auth/login/?error=Wrong Email or Password! Please retry!')
else:
if request.GET.get('error'):
error = request.GET.get('error')
return render(request,'teacher/login.html', {'error':error})
elif request.GET.get('alert'):
alert = request.GET.get('alert')
return render(request,'teacher/login.html', {'alert':alert})
return render(request,'teacher/login.html')
elif is_student(request):
return redirect('/teacher/auth/login/?error=You are already Logged in as a Student')
else:
return redirect('dashboard')
def my_logout(request):
if teacher_auth(request):
logout(request)
return redirect('indextt')
def verifyemail(request):
if request.GET.get('code') and request.GET.get('user'):
try:
id = decode.unsign(request.GET.get('user'))
code = decode.unsign(request.GET.get('code'))
username='teacher_'+id
user = User.objects.get(username=username)
except:
return redirect('/teacher/auth/login/?error=INVALID LINK')
if user.is_active:
return redirect('/teacher/auth/login/?alert=Email Already Verified')
else:
if int(code) == int(get_token(id)):
user.is_active = True
user.save()
return redirect('/teacher/auth/login/?alert=Email Verified! Now you can Login to your account')
else:
return redirect('/teacher/auth/login/?error=INVALID LINK')
else:
return redirect('/teacher/auth/login/?error=INVALID LINK')
def dashboard(request):
if teacher_auth(request):
return render(request, 'teacher/dashboard/main.html', {'userName':{'fname':request.user.first_name, 'lname':request.user.last_name}, 'data':get_meta(request.user.username.split('_')[1]) })
else:
return redirect('/teacher/auth/login/?error=Login to Access !!!')
@csrf_exempt
def students(request, course_id):
if teacher_auth(request):
if request.method == 'POST':
received_json_data = json.loads(request.body)
print(course_id, received_json_data)
code, response = add_student(received_json_data['mail'], request.session['current_course'])
if code == 400:
return JsonResponse({'code':'exist'})
elif code == 200:
name = response['name']
return JsonResponse({'email':received_json_data['mail'], 'code':'✅','name':name })
else:
msg = render_to_string('mail.html', {'header':'Welcome to Pathshaala 👨💻', 'name': '','link':'https://pathshaala.azurewebsites.net','t_name':request.user.first_name, 'email':received_json_data['mail']})
email = EmailMessage(
f'Invite from {request.user.first_name}',
msg,
settings.EMAIL_HOST_USER,
[received_json_data['mail']]
)
email.content_subtype = 'html'
email.fail_silently = False
email.send()
return JsonResponse({'email':received_json_data['mail'], 'code':'⛔️','name':'Invite Sent' })
else:
students_data = get_student_details(course_id, request.user.username.split('_')[1])
course_info = get_course(course_id, request.user.username.split('_')[1])
return render(request, 'teacher/dashboard/courses/users.html', {'course':course_info ,'userName':{'fname':request.user.first_name, 'lname':request.user.last_name}, 'students':students_data})
else:
return redirect('/teacher/auth/login/?error=Login to Access !!!')
def courses(request):
if teacher_auth(request):
courses = get_courses(request.user.username.split('_')[1])
return render(request, 'teacher/dashboard/course.html',{'courses': courses, 'count':len(courses), 'userName':{'fname':request.user.first_name, 'lname':request.user.last_name}})
else:
return redirect('/teacher/auth/login/?error=Login to Access !!!')
async def new_course(request):
if await sync_to_async(teacher_auth)(request):
if request.method == 'POST':
# return HttpResponse('Post Method')
form = request.POST
file = request.FILES["course-image-upload"]
name = form['course-name']
url = await sync_to_async(uploadimage)(file, "display_images/courses/"+file.name)
# asyncio.create_task(sync_to_async(_new_course)(name, request.user.username.split('_')[1],form['course-description'], form['course-tags'], url))
await sync_to_async(_new_course)(name, request.user.username.split('_')[1],form['course-description'].capitalize(), form['course-tags'], url)
return render(request,'teacher/dashboard/add_course.html', {'alert':f'Course "{name.title()}" Has been Created', 'userName':{'fname':request.user.first_name, 'lname':request.user.last_name}})
else:
return render(request, 'teacher/dashboard/add_course.html', {'userName':{'fname':request.user.first_name, 'lname':request.user.last_name}})
else:
return redirect('/teacher/auth/login/?error=Login to Access !!!')
# https://firebasestorage.googleapis.com/v0/b/pathshaala-e8244.appspot.com/o/display_images%2Fcourses%2FCheers!.png?alt=media&token=ba8852c3-ef5c-4135-98bc-743a8714ce79
def sr(request):
if teacher_auth(request):
return render(request, 'teacher/dashboard/resource.html', {'userName':{'fname':request.user.first_name, 'lname':request.user.last_name}})
else:
return redirect('/teacher/auth/login/?error=Login to Access !!!')
def sv(request):
if teacher_auth(request):
return render(request, 'teacher/dashboard/yt.html', {'userName':{'fname':request.user.first_name, 'lname':request.user.last_name}})
else:
return redirect('/teacher/auth/login/?error=Login to Access !!!')
def course(request, course_id):
if teacher_auth(request):
try:
course_info = get_course(course_id, request.user.username.split('_')[1])
except:
return HttpResponse('No such course')
request.session['current_course'] = course_info['id']
return render(request, 'teacher/dashboard/courses/main.html', {'course':course_info, 'userName':{'fname':request.user.first_name, 'lname':request.user.last_name}})
else:
return redirect('/teacher/auth/login/?error=Login to Access !!!')
def docs(request, course_id):
if teacher_auth(request):
course_info = get_course(course_id, request.user.username.split('_')[1])
print(request.session['current_course'])
return render(request, 'teacher/dashboard/courses/pages.html', {'course':course_info, 'userName':{'fname':request.user.first_name, 'lname':request.user.last_name}})
else:
return redirect('/teacher/auth/login/?error=Login to Access !!!')
``` |
{
"source": "26ANSH/python-docs-hello-django",
"score": 2
} |
#### File: python-docs-hello-django/hello/views.py
```python
from django.http import HttpResponse
from django.shortcuts import render
import os
def hello(request):
if "USER_NAME" in os.environ:
return HttpResponse(f"<h1>Hello, World!</h1><br>This is Ansh from Azure CLI")
else:
return HttpResponse(f"<h1>Hello, {os.environ['USER_NAME']}!</h1><br>This is Ansh from Azure CLI")
``` |
{
"source": "26aseem/Capstone-2021-GUI",
"score": 3
} |
#### File: 26aseem/Capstone-2021-GUI/app.py
```python
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import math
import streamlit as st
import pickle
from pickle import load
from PIL import Image
import seaborn as sns
import statsmodels.api as sm
### Streamlit code starts here
st.title("Time Series Analysis of Disaster Tweets")
st.markdown("The dashboard will help the government and humanitarian aid agencies to plan and coordinate the natural disaster relief efforts, resulting in more people being saved and more effective distribution of emergency supplies during a natural hazard")
st.sidebar.title("Select Visual Charts")
st.sidebar.markdown("Select the Charts/Plots accordingly:")
# Some CSS Markdown for styling
STYLE = """
<style>
img {
max-width: 100%;
}
</style>
"""
st.markdown(STYLE, unsafe_allow_html=True)
### Time Series Code goes here
# Dataset
# Load the Dataset
tweets1 = pd.read_csv("https://raw.githubusercontent.com/anidevhere/Temp/main/nepal_mix_1.csv")[['text','type']]
tweets2 = pd.read_csv("https://raw.githubusercontent.com/anidevhere/Temp/main/italy_mix_1.csv")[['text','type']]
tweets3 = pd.read_csv("https://raw.githubusercontent.com/anidevhere/Temp/main/Covid-19.csv")[['text','type']]
names = [tweets1,tweets2,tweets3]
# Concatenate the datasets
tweets = pd.concat(names,ignore_index = True)
# Reshuffle the dataset
tweets = tweets.sample(frac = 1)
# Reindex the dataset
tweets['index'] = list(range(0,tweets.shape[0],1))
tweets.set_index('index', inplace=True)
tweets['type'] = tweets['type'].map({0: 'Need', 1: 'Availability', 2: 'Other'})
# Change column names for consistency
tweets.columns = ['text', 'type']
print('Shape of the Dataset:',tweets.shape)
# Dataset Description
h = st.sidebar.slider('Select the number of tweets using the slider', 1, tweets.shape[0], 10)
data_tweets = tweets.sample(h)
data_tweets['index'] = list(range(0, h, 1))
data_tweets.set_index('index', inplace=True)
st.table(data_tweets)
# Checking for class balancing and get unique labels:
chart_visual_class_balancing = st.sidebar.checkbox('Class Labels', True)
if chart_visual_class_balancing==True:
fig = plt.figure(figsize=(8, 4))
sns.countplot(y=tweets.loc[:, 'type'],data=tweets).set_title("Count of tweets in each class")
st.pyplot(fig)
tweets['type'] = tweets['type'].map({'Need':0, 'Availability':1,'Other':2})
# Get all the labels used in the labelling column
label = tweets.type.unique()
print("Labels:", label)
# Remove label 2 from the list because not required for time series analysis
label = np.delete(label,np.where(label == 2))
print("Labels:", label)
# Add names to the numerical labels
label_name = []
for i in label:
if i == 0:
label_name.append("Need")
elif i == 1:
label_name.append("Availability")
# Choose interval
interval = 30
start_date = "2021-04-01"
# Create Timestamps with intervals
ds = pd.date_range(start=start_date, periods=interval)
dates = []
for i in ds:
dates.append(i.strftime('%m-%d-%Y'))
del ds
# Divide the Dataset into intervals
# Divide the dataset into the given number of intervals
num_of_tweets_per_interval = math.floor(tweets.shape[0]/interval)
# Create Time Series with intervals
data = []
count_of_data = []
for i in label:
count_of_data.append([])
for i in range(1,interval+1,1):
# Draw a sample from the tweets
tw = tweets.sample(n=num_of_tweets_per_interval, random_state=10, replace=False)
# Append the statistics of the drawn sample to the list
stat = dict()
for j in range(0,len(label)):
stat[label[j]] = list(tw['type']).count(label[j])
count_of_data[j].append(list(tw['type']).count(label[j]))
data.append(stat)
# Remove the already drawn tweets from the dataset
tweets.drop(labels=list(tw.index.values),inplace=True)
# Real Time Series starts here
# Load Dataset
df = pd.DataFrame(count_of_data).T
# Set Index
df['Date'] = pd.to_datetime(dates)
df.set_index('Date', inplace=True)
df.columns = ['Need', 'Availability']
st.title("Twitter Data Description")
chart_visual_tweets = st.sidebar.selectbox('Select Chart/Plot type',
('Stacked Bar Chart', 'Side-by-Side Bar Chart', 'Line Chart'))
# Plot 1
if chart_visual_tweets=='Side-by-Side Bar Chart':
# set width of bars
barWidth = 0.25
# Set position of bar on X axis
r = [np.arange(interval)]
for i in range(1, len(label)):
r1 = [x + barWidth for x in r[-1]]
r.append(r1)
# Plotting a line plot after changing it's width and height
f = plt.figure()
f.set_figwidth(20)
f.set_figheight(8)
# Make the plot
for i,lab in enumerate(label):
plt.bar(r[i], count_of_data[i], width=barWidth, edgecolor='white', label=label_name[i])
# Add xticks on the middle of the group bars
plt.xlabel('Time Series', fontweight='bold')
plt.xticks([r + barWidth for r in range(len(count_of_data[0]))], list(dates))
plt.tick_params(axis ='x', rotation =90)
# Create legend & Show graphic
plt.legend()
plt.show()
st.pyplot(f)
# Plot 2
if chart_visual_tweets=='Stacked Bar Chart':
# Plotting a line plot after changing it's width and height
f = plt.figure()
f.set_figwidth(20)
f.set_figheight(8)
b = np.zeros(interval)
for i,lab in enumerate(label):
plt.bar(dates, count_of_data[i],bottom=b, edgecolor='white', label=label_name[i])
b += np.array(count_of_data[i])
plt.xlabel('Time Series', fontweight='bold')
plt.tick_params(axis ='x', rotation =90)
# Create legend & Show graphic
plt.legend()
plt.show()
st.pyplot(f)
# Plot 3
if chart_visual_tweets=='Line Chart':
# Plotting a line plot after changing it's width and height
f = plt.figure()
f.set_figwidth(20)
f.set_figheight(8)
ls = ['dashed', 'solid']
for i,lab in enumerate(label):
plt.plot(count_of_data[i], label=label_name[i], linestyle=ls[i], marker='o')
plt.xlabel('Time Series', fontweight='bold')
plt.tick_params(axis ='x', rotation =90)
# Create legend & Show graphic
plt.legend()
plt.show()
st.pyplot(f)
################################### Time Series Analysis starts here
st.title("Time Series Analysis of Tweets")
chart_visual_time_series = st.sidebar.selectbox('Select Need/Availability Label for Time series distribution',('Need', 'Availability'))
# y represemts the Need Label
# z represents the Availability Label
y = df['Need']
z = df['Availability']
if chart_visual_time_series=='Need':
fig, ax = plt.subplots(figsize=(20, 6))
ax.plot(y, marker='o', linewidth=0.5, label='Daily',ls='solid', c='red')
ax.plot(y.resample('3D').mean(),marker='o', markersize=8, linestyle='dashed', label='Half-Weekly Mean Resample')
ax.set_ylabel('Frequency')
ax.set_xlabel('Date')
ax.legend()
st.pyplot(fig)
if chart_visual_time_series=="Availability":
fig, ax = plt.subplots(figsize=(20, 6))
ax.plot(z, marker='o', linewidth=0.5, label='Daily',ls='solid', c='red')
ax.plot(z.resample('3D').mean(),marker='o', markersize=8, linestyle='dashed', label='Half-Weekly Mean Resample', color='blue')
ax.set_ylabel('Frequency')
ax.set_xlabel('Date')
ax.legend()
st.pyplot(fig)
################################### Seasonal Decomposition starts here
# The next step is to decompose the data to view more of the complexity behind the linear visualization.
# A useful Python function called seasonal_decompose within the 'statsmodels' package can help us to decompose the data
# into four different components:
# Observed
# Trended
# Seasonal
# Residual
st.title("Decompose the Data")
chart_visual_seasonal_decomposition = st.sidebar.selectbox('Select Need/Availability Label for Seasonal decomposition',
('Need of resources', 'Availability of resources'))
def seasonal_decompose (x):
decomposition_x = sm.tsa.seasonal_decompose(x, model='additive',extrapolate_trend='freq')
fig_x = decomposition_x.plot()
fig_x.set_size_inches(14,7)
plt.show()
st.pyplot(fig_x)
if chart_visual_seasonal_decomposition == "Need of resources":
seasonal_decompose(y)
elif chart_visual_seasonal_decomposition == "Availability of resources":
seasonal_decompose(z)
# Footer
footer="""<style>
a:link , a:visited{
color: blue;
background-color: transparent;
text-decoration: underline;
}
a:hover, a:active {
color: red;
background-color: transparent;
text-decoration: underline;
}
footer {visibility: hidden;}
.footer {
margin:0;
height:5px;
position:relative;
top:140px;
left: 0;
bottom: 0;
width: 100%;
background-color: white;
color: black;
text-align: center;
}
</style>
<div class="footer">
<p>Developed with <span style='color:red;'>❤</span> by <a style='text-align: center;' href="https://github.com/26aseem" target="_blank"><NAME></a></p>
</div>
"""
st.markdown(footer,unsafe_allow_html=True)
``` |
{
"source": "26aseem/TOPSIS",
"score": 3
} |
#### File: TOPSIS/TOPSIS-Aseem-101803469/topsis.py
```python
import sys
import pandas as pd
import time
import numpy as np
import math
class topsisPerformer:
def __init__(self):
self.outputTable = pd.DataFrame()
self.outputFileName = "result" + str(time.strftime("%Y%m%d", time.localtime(time.time()))) + ".csv"
def topsis(inputFile, weights, impacts):
# InputFile is the input table for performing Topsis
# Weights is the required weight values for each column
# Impacts is the required impact values for each column
try:
weights = list(map(float , weights.split(',')))
impacts = list(map(str , impacts.split(',')))
except:
print('Weights or Impacts are not provided in proper format ')
print ("Usages: topsis(<InputDataFile> <Weights> <Impacts>)")
print ("Example: topsis(inputfile.csv "1,1,1,2" "+,+,-,+")")
sys.exit(0)
for each in impacts :
if each not in ('+','-'):
print('Impacts are not provided in proper format ')
print('Example: "+,+,-,+"')
sys.exit(0)
try:
input_data = pd.read_csv(inputFile)
except:
print(file+' File not found');
sys.exit(0)
if len(list(input_data.columns))<=2:
print('Input file should contain atleast 3 or more columns '+ file)
sys.exit()
# Input File are read through read_csv() functions
inputTable = pd.read_csv(inputFile)
# Define the Labels for the data
labels = [i for i in inputTable[1:]]
# MAIN PROGRAM STARTS HERE
# Each file is manipulated
outputTable = inputTable.copy()
# Add the Topsis Score and Rank column to the output table
outputTable['Topsis Score'] = [0 for i in range(0,len(inputTable))]
outputTable['Rank'] = [0 for i in range(0,len(inputTable))]
# Parameters for output Table
sumOfSquares = {labels[i] : 0 for i in range(1,len(labels))}
newWeights = {labels[i] : 0 for i in range(1,len(labels))}
# Processing the input table
for index, row in inputTable.iterrows():
for i in labels[1:]:
sumOfSquares[i] += row[i]*row[i]
for i in sumOfSquares:
sumOfSquares[i] = math.sqrt(sumOfSquares[i])
# Perform the TOPSIS Normalization operations on the data
inputTable = outputTable.copy()
for index, row in outputTable.iterrows():
for i in labels[1:]:
inputTable.loc[index,i] = row[i] / sumOfSquares[i]
# Define weights and impact dictionary
newWeights = {i:weights[index]/sum(weights) for index,i in enumerate(labels[1:])}
newImpacts = {i:impacts[index] for index,i in enumerate(labels[1:])}
for index, row in outputTable.iterrows():
for i in labels[1:]:
inputTable.loc[index,i] *= newWeights[i]
v1 = dict()
v2 = dict()
for index, row in outputTable.iterrows():
for i in labels[1:]:
if newImpacts[i] == '+':
v1[i] = max(inputTable[i])
v2[i] = min(inputTable[i])
elif newImpacts[i] == '-':
v1[i] = min(inputTable[i])
v2[i] = max(inputTable[i])
inputTable = inputTable.append(v1,ignore_index=True)
inputTable = inputTable.append(v2,ignore_index=True)
inputTable['S1'] = [0 for i in range(0,len(inputTable))]
inputTable['S2'] = [0 for i in range(0,len(inputTable))]
inputTable['S12'] = [0 for i in range(0,len(inputTable))]
for index, row in inputTable.iterrows():
if index < len(inputTable) - 2:
for i in labels[1:]:
row['S1'] += (float(inputTable.loc[len(inputTable)-2][i]) - float(row[i])) * (float(inputTable.loc[len(inputTable)-2][i]) - float(row[i]))
row['S2'] += (float(inputTable.loc[len(inputTable)-1][i]) - float(row[i])) * (float(inputTable.loc[len(inputTable)-1][i]) - float(row[i]))
row['S1'] = math.sqrt(row['S1'])
row['S2'] = math.sqrt(row['S2'])
row['S12'] = row['S1'] + row['S2']
outputTable.loc[index,'Topsis Score'] = row['S2'] / (row['S1'] + row['S2'])
# Ranking of the different Models
rank_values = sorted(outputTable['Topsis Score'], reverse = True)
for index,row in inputTable.iterrows():
if index < len(inputTable) - 2:
outputTable.loc[index,'Rank'] = rank_values.index(outputTable.loc[index]['Topsis Score']) + 1
self.outputTable = outputTable
print('Topsis Performer was Successful')
print(outputTable)
# Download the Topsis Report
def topsisReport(self, outputFile = self.outputFileName):
# Save DataFrame to csv File
self.outputTable.to_csv(outputFile, index=False)
print('\nResult File successfully created')
``` |
{
"source": "26medias/GAN-toolkit",
"score": 3
} |
#### File: GAN-toolkit/detector/iris_detector.py
```python
import numpy as np
import cv2
from pathlib import Path
from .ELG.elg_keras import KerasELG
FILE_PATH = str(Path(__file__).parent.resolve())
NET_INPUT_SHAPE = (108, 180)
class IrisDetector():
def __init__(self, path_elg_weights="/content/eye-detector/elg_keras.h5"):
self.elg = None
self.detector = None
self.path_elg_weights = path_elg_weights
self.build_ELG()
def build_ELG(self):
self.elg = KerasELG()
self.elg.net.load_weights(self.path_elg_weights)
def set_detector(self, detector):
self.detector = detector
def detect_iris(self, im, landmarks=None):
"""
Input:
im: RGB image
Outputs:
output_eye_landmarks: list of eye landmarks having shape (2, 18, 2) with ordering (L/R, landmarks, x/y).
"""
if landmarks == None:
try:
faces, landmarks = self.detector.detect_face(im, with_landmarks=True)
except:
raise NameError("Error occured during face detection. Maybe face detector has not been set.")
left_eye_idx = slice(36, 42)
right_eye_idx = slice(42, 48)
output_eye_landmarks = []
for lm in landmarks:
left_eye_im, left_x0y0 = self.get_eye_roi(im, lm[left_eye_idx])
right_eye_im, right_x0y0 = self.get_eye_roi(im, lm[right_eye_idx])
inp_left = self.preprocess_eye_im(left_eye_im)
inp_right = self.preprocess_eye_im(right_eye_im)
input_array = np.concatenate([inp_left, inp_right], axis=0)
pred_left, pred_right = self.elg.net.predict(input_array)
lms_left = self.elg._calculate_landmarks(pred_left, eye_roi=left_eye_im)
lms_right = self.elg._calculate_landmarks(pred_right, eye_roi=right_eye_im)
eye_landmarks = np.concatenate([lms_left, lms_right], axis=0)
eye_landmarks = eye_landmarks + np.array([left_x0y0, right_x0y0]).reshape(2,1,2)
output_eye_landmarks.append(eye_landmarks)
return output_eye_landmarks
@staticmethod
def get_eye_roi(im, lms, ratio_w=1.5):
def adjust_hw(hw, ratio_w=1.5):
"""
set RoI height and width to the same ratio of NET_INPUT_SHAPE
"""
h, w = hw[0], hw[1]
new_w = w * ratio_w
new_h = NET_INPUT_SHAPE[0] / NET_INPUT_SHAPE[1] * new_w
return np.array([new_h, new_w])
h, w = im.shape[:2]
min_xy = np.min(lms, axis=0)
max_xy = np.max(lms, axis=0)
hw = max_xy - min_xy
hw = adjust_hw(hw, ratio_w=ratio_w)
center = np.mean(lms, axis=0)
x0, y0 = center - (hw) / 2
x1, y1 = center + (hw) / 2
x0, y0, x1, y1 = map(np.int32,[x0, y0, x1, y1])
x0, y0 = np.maximum(x0, 0), np.maximum(y0, 0)
x1, y1 = np.minimum(x1, h), np.minimum(y1, w)
eye_im = im[x0:x1, y0:y1]
return eye_im, (x0, y0)
@staticmethod
def preprocess_eye_im(im):
im = cv2.cvtColor(im, cv2.COLOR_RGB2GRAY)
im = cv2.equalizeHist(im)
im = cv2.resize(im, (NET_INPUT_SHAPE[1], NET_INPUT_SHAPE[0]))[np.newaxis, ..., np.newaxis]
im = im / 255 * 2 - 1
return im
@staticmethod
def draw_pupil(im, lms, stroke=3):
draw = im.copy()
#draw = cv2.resize(draw, (inp_im.shape[2], inp_im.shape[1]))
pupil_center = np.zeros((2,))
pnts_outerline = []
pnts_innerline = []
for i, lm in enumerate(np.squeeze(lms)):
x, y = int(lm[0]), int(lm[1])
if i < 8:
draw = cv2.circle(draw, (y, x), stroke, (125,255,125), -1)
pnts_outerline.append([y, x])
elif i < 16:
draw = cv2.circle(draw, (y, x), stroke, (125,125,255), -1)
pnts_innerline.append([y, x])
pupil_center += (y,x)
elif i < 17:
pass
#draw = cv2.drawMarker(draw, (y, x), (255,200,200), markerType=cv2.MARKER_CROSS, markerSize=5, thickness=stroke, line_type=cv2.LINE_AA)
else:
pass
#draw = cv2.drawMarker(draw, (y, x), (255,125,125), markerType=cv2.MARKER_CROSS, markerSize=5, thickness=stroke, line_type=cv2.LINE_AA)
pupil_center = (pupil_center/8).astype(np.int32)
draw = cv2.cv2.circle(draw, (pupil_center[0], pupil_center[1]), stroke, (255,255,0), -1)
draw = cv2.polylines(draw, [np.array(pnts_outerline).reshape(-1,1,2)], isClosed=True, color=(125,255,125), thickness=stroke//2)
draw = cv2.polylines(draw, [np.array(pnts_innerline).reshape(-1,1,2)], isClosed=True, color=(125,125,255), thickness=stroke//2)
return draw
```
#### File: keras-multi-input/pyimagesearch/datasets.py
```python
from sklearn.preprocessing import LabelBinarizer
from sklearn.preprocessing import MinMaxScaler
import pandas as pd
import numpy as np
import glob
import cv2
import os
def load_house_attributes(inputPath):
# initialize the list of column names in the CSV file and then
# load it using Pandas
cols = ["bedrooms", "bathrooms", "area", "zipcode", "price"]
df = pd.read_csv(inputPath, sep=" ", header=None, names=cols)
# determine (1) the unique zip codes and (2) the number of data
# points with each zip code
zipcodes = df["zipcode"].value_counts().keys().tolist()
counts = df["zipcode"].value_counts().tolist()
# loop over each of the unique zip codes and their corresponding
# count
for (zipcode, count) in zip(zipcodes, counts):
# the zip code counts for our housing dataset is *extremely*
# unbalanced (some only having 1 or 2 houses per zip code)
# so let's sanitize our data by removing any houses with less
# than 25 houses per zip code
if count < 25:
idxs = df[df["zipcode"] == zipcode].index
df.drop(idxs, inplace=True)
# return the data frame
return df
def process_house_attributes(df, train, test):
# initialize the column names of the continuous data
continuous = ["bedrooms", "bathrooms", "area"]
# performin min-max scaling each continuous feature column to
# the range [0, 1]
cs = MinMaxScaler()
trainContinuous = cs.fit_transform(train[continuous])
testContinuous = cs.transform(test[continuous])
# one-hot encode the zip code categorical data (by definition of
# one-hot encoing, all output features are now in the range [0, 1])
zipBinarizer = LabelBinarizer().fit(df["zipcode"])
trainCategorical = zipBinarizer.transform(train["zipcode"])
testCategorical = zipBinarizer.transform(test["zipcode"])
# construct our training and testing data points by concatenating
# the categorical features with the continuous features
trainX = np.hstack([trainCategorical, trainContinuous])
testX = np.hstack([testCategorical, testContinuous])
# return the concatenated training and testing data
return (trainX, testX)
def load_house_images(df, inputPath):
# initialize our images array (i.e., the house images themselves)
images = []
# loop over the indexes of the houses
for i in df.index.values:
# find the four images for the house and sort the file paths,
# ensuring the four are always in the *same order*
basePath = os.path.sep.join([inputPath, "{}_*".format(i + 1)])
housePaths = sorted(list(glob.glob(basePath)))
# initialize our list of input images along with the output image
# after *combining* the four input images
inputImages = []
outputImage = np.zeros((64, 64, 3), dtype="uint8")
# loop over the input house paths
for housePath in housePaths:
# load the input image, resize it to be 32 32, and then
# update the list of input images
image = cv2.imread(housePath)
image = cv2.resize(image, (32, 32))
inputImages.append(image)
# tile the four input images in the output image such the first
# image goes in the top-right corner, the second image in the
# top-left corner, the third image in the bottom-right corner,
# and the final image in the bottom-left corner
outputImage[0:32, 0:32] = inputImages[0]
outputImage[0:32, 32:64] = inputImages[1]
outputImage[32:64, 32:64] = inputImages[2]
outputImage[32:64, 0:32] = inputImages[3]
# add the tiled image to our set of images the network will be
# trained on
images.append(outputImage)
# return our set of images
return np.array(images)
``` |
{
"source": "26medias/ml-utils",
"score": 3
} |
#### File: 26medias/ml-utils/datasets.py
```python
import os
import glob
import random
import math
import datetime as dt
import json
import ntpath
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
class featureReader:
def __init__(self, groups=[], look_back=50, horizon=8, dataRange=(0,1), featureShape="matrix", cache=True, rotate=True):
self.groups = groups
self.look_back = look_back
self.horizon = horizon
self.dataRange = dataRange
self.cache = cache
self.featureShape = featureShape
self.rotate = rotate
self.dfCache = {}
def valmap(self, value, istart, istop, ostart, ostop):
return ostart + (ostop - ostart) * ((value - istart) / (istop - istart))
def dpct(self, start, end):
return (end-start)/start;
def load(self, glob_pattern):
self.glob_pattern = glob_pattern
def load_dataset(self, csv_filename):
# Retrieve from the cache if there's one
if self.cache and csv_filename in self.dfCache:
return self.dfCache[csv_filename]
# Open the csv file into a dataframe, remove the invalid rows
df = pd.read_csv(csv_filename)
df = df.apply(pd.to_numeric, errors='coerce')
df = df.dropna()
df = df.reset_index(drop=True)
df['t'] = pd.to_datetime(df['t'], unit='s')
df = df.sort_values('t')
# Cache the data
if self.cache:
self.dfCache[csv_filename] = df
return df
def getFeatures(self):
features = {}
for groupName in self.groups:
for dataset in glob.glob(self.glob_pattern):
_features = self.getFeaturesForGroup(dataset, self.groups[groupName])
if groupName not in features:
features[groupName] = _features
else:
features[groupName] = np.append(features[groupName], _features, axis=0)
return features
def reshapeFeatures(self, features):
if self.featureShape is "matrix":
return features
elif self.featureShape is "img":
return features.reshape(features.shape[0], features.shape[1], 1)
def getFeaturesForGroup(self, dataset, group):
# Load the dataset
df = self.load_dataset(dataset)
# Create the subset dataframe
_df = df[group['columns']].copy()
for col in _df.columns:
if col!='t' and col!='o':
_df[col] = _df[col].transform(lambda x: self.valmap(x, group['inputRange'][0], group['inputRange'][1], self.dataRange[0], self.dataRange[1]))
# Convert the dataframe to numpy
lines = np.asarray(_df);
# Assemble the timesteps
timesteps_X = []
l = len(lines)
for n, v in enumerate(lines):
if n >= self.look_back and n<l-self.horizon:
in_steps = []
for i in range(self.look_back-1, -1, -1):
in_steps.append(np.array(lines[n-i]))
_line = np.array(in_steps)
if self.rotate is True:
_line = np.rot90(np.asarray(_line), 1)
#print(">shape: ", _line.shape)
#_line = _line.reshape(_line.shape[0], _line.shape[1], 1)
_line = self.reshapeFeatures(_line)
timesteps_X.append(_line)
timesteps_X = np.asarray(timesteps_X)
timesteps_X = np.clip(timesteps_X, a_min = self.dataRange[0], a_max = self.dataRange[1])
return timesteps_X
def getFeaturePrices(self):
output = np.asarray([])
for dataset in glob.glob(self.glob_pattern):
df = self.load_dataset(dataset)
_dft = df[['t']].copy()
_dfo = df['o'].tolist()
# Convert the dataframe to numpy
lines = np.asarray(_dfo);
# Assemble the timesteps
timesteps_X = []
l = len(lines)
for n, v in enumerate(lines):
if n >= self.look_back and n<l-self.horizon:
in_steps = []
for i in range(self.look_back-1, -1, -1):
in_steps.append(np.array(lines[n-i]))
_line = np.array(in_steps)
timesteps_X.append(np.asarray(_line))
timesteps_X = np.asarray(timesteps_X)
if len(output)==0:
output = timesteps_X
else:
output = np.append(output, timesteps_X, axis=0)
return output
def getHorizonOutput(self, in_steps, outputType):
if outputType=="range":
outputLine = np.array([self.dpct(in_steps[0], min(in_steps)), self.dpct(in_steps[0], max(in_steps))])
elif outputType=="signs":
outputLine = np.array([1 if self.dpct(in_steps[0], min(in_steps))<0 else 0, 1 if self.dpct(in_steps[0], max(in_steps))>0 else 0])
elif outputType=="diff":
outputLine = ((np.array(in_steps)-in_steps[0])/in_steps[0])[1:]
elif outputType=="count":
_p = in_steps[0]
outputLine = np.asarray([np.count_nonzero(np.array(in_steps)<_p)/(self.horizon-1), np.count_nonzero(np.array(in_steps)>_p)/(self.horizon-1)])
else:
outputLine = np.array(in_steps)
return outputLine
# range: Range in percent
# signs: Signs within range
def getTargets(self, outputType="range"):
output = np.asarray([])
for dataset in glob.glob(self.glob_pattern):
df = self.load_dataset(dataset)
_dfo = df['o'].tolist()
# Convert the dataframe to numpy
lines = np.asarray(_dfo);
# Assemble the timesteps
timesteps_X = []
l = len(lines)
for n, v in enumerate(lines):
if n >= self.look_back and n<l-self.horizon:
in_steps = []
for i in range(0, self.horizon, 1):
in_steps.append(lines[n+i])
outputLine = self.getHorizonOutput(in_steps, outputType)
timesteps_X.append(outputLine)
timesteps_X = np.asarray(timesteps_X)
if len(output)==0:
output = timesteps_X
else:
output = np.append(output, timesteps_X, axis=0)
return output
def previewFeature(self, features, prices, idx=0):
fig, axs = plt.subplots(len(features.keys())+1, figsize=(15,5))
plt.autoscale(tight=True)
fig.suptitle('Features Preview')
for i, (k, v) in enumerate(features.items()):
axs[i].imshow(features[k][idx].reshape(features[k][idx].shape[0], features[k][idx].shape[1]), cmap='RdBu', aspect="auto")
axs[len(axs)-1].plot(prices[idx])
``` |
{
"source": "26medias/opencv-machine-vision-pipeline",
"score": 3
} |
#### File: 26medias/opencv-machine-vision-pipeline/events_01_people.py
```python
from vision_pipeline import framework
# Event handler: When a new face is detected
def onNewFace(framework, object_id):
print("\nNew face detected!\n", framework.tracker.objects[object_id])
# Event handler: When a new person is detected
def onNewPerson(framework, object_id):
print("\nNew person detected!\n", framework.tracker.objects[object_id])
# Event handler: When we lose track of a face
def onFaceLost(framework, object_id):
print("\nWe lost track of this face:\n", framework.tracker.objects[object_id])
# Setup the pipeline on
visionPipeline = framework.VisionFramework(settings="settings/person_face.json")
# Trigger an event when a new face is detected
visionPipeline.on("object.create", onNewFace, {
"type": {
"$eq": "face"
},
"score": {
"$gt": 0.5
}
})
# Trigger an event when a new person is detected
visionPipeline.on("object.create", onNewPerson, {
"type": {
"$eq": "objects"
},
"label": {
"$eq": "person"
},
"object_score": {
"$gt": 0.7
}
})
# Trigger an event when we lose track of a face
visionPipeline.on("object.deactivate", onFaceLost, {
"type": {
"$eq": "face"
}
})
visionPipeline.capture(src=0, fps=30)
```
#### File: plugins/emotions/emotions.py
```python
from keras.preprocessing.image import img_to_array
import imutils
import cv2
from keras.models import load_model
import numpy as np
import sys
import os
import uuid
file_path = os.path.dirname(os.path.abspath(__file__)) + os.sep
emotion_model_path = file_path + 'models/_mini_XCEPTION.102-0.66.hdf5'
emotion_classifier = load_model(emotion_model_path, compile=True)
EMOTIONS = ["angry" ,"disgust","scared", "happy", "sad", "surprised", "neutral"]
def getEmotionsFromFace(face):
height = face.shape[0]
width = face.shape[1]
_face = cv2.resize(face, (64, 64))
_face = cv2.cvtColor(_face, cv2.COLOR_BGR2GRAY)
_face = _face.astype("float") / 255.0
_face = img_to_array(_face)
_face = np.expand_dims(_face, axis=0)
preds = emotion_classifier.predict(_face)[0]
emotion_probability = np.max(preds)
emotion = EMOTIONS[preds.argmax()]
output = {
"emotion": emotion,
"emotion_label": "{}: {:.2f}%".format(emotion, emotion_probability * 100),
"emotion_probability": emotion_probability
}
for (i, (emotion, prob)) in enumerate(zip(EMOTIONS, preds)):
output["emotion_"+str(i)] = "{}: {:.2f}%".format(emotion, prob * 100),
return output
```
#### File: opencv-machine-vision-pipeline/services/networks.py
```python
import os
import socket
import multiprocessing
import subprocess
import os
from urllib.request import *
from socket import timeout
import threading
import json
import time
def pinger(job_q, results_q):
"""
Do Ping
:param job_q:
:param results_q:
:return:
"""
DEVNULL = open(os.devnull, 'w')
while True:
ip = job_q.get()
if ip is None:
break
try:
subprocess.check_call(['ping', '-c1', ip],
stdout=DEVNULL)
results_q.put(ip)
except:
pass
def get_my_ip():
"""
Find my IP address
:return:
"""
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
ip = s.getsockname()[0]
s.close()
return ip
def map_network(pool_size=255):
"""
Maps the network
:param pool_size: amount of parallel ping processes
:return: list of valid ip addresses
"""
ip_list = list()
# get my IP and compose a base like 192.168.1.xxx
ip_parts = get_my_ip().split('.')
base_ip = ip_parts[0] + '.' + ip_parts[1] + '.' + ip_parts[2] + '.'
# prepare the jobs queue
jobs = multiprocessing.Queue()
results = multiprocessing.Queue()
pool = [multiprocessing.Process(target=pinger, args=(jobs, results)) for i in range(pool_size)]
for p in pool:
p.start()
# cue hte ping processes
for i in range(1, 255):
jobs.put(base_ip + '{0}'.format(i))
for p in pool:
jobs.put(None)
for p in pool:
p.join()
# collect he results
while not results.empty():
ip = results.get()
ip_list.append(ip)
return ip_list
output = {}
def download_index(url, lock):
#print("Trying ", url)
text = 'Fail'
try:
response = urlopen(Request(url))
#response = requests.get(url, timeout=20)
#text = response.json()
text = response.read()
r_json = json.loads(text)
#print(url, "Found", r_json)
output[url] = r_json
#except socket.timeout:
# print(url, "timeout")
except:
pass
#print(url, "failed")
#indexing
#with lock:
#access shared resources
#output[url] = text
def scanCandidates(ips):
urls = ["http://"+ip+"/info" for ip in ips if ip not in '192.168.0.101']
#print(urls)
n = 5 #number of parallel connections
chunks = [urls[i * n:(i + 1) * n] for i in range((len(urls) + n - 1) // n )]
lock = threading.Lock()
for chunk in chunks:
threads = []
for url in chunk:
thread = threading.Thread(target=download_index, args=(url, lock,))
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
#print("End")
return output
if __name__ == '__main__':
start = time.time()
print('Mapping...')
lst = map_network()
#lst = ['192.168.0.1', '192.168.0.100', '192.168.0.101', '192.168.0.103', '192.168.0.112', '192.168.0.104', '192.168.0.105']
#print(lst)
print("Scanning candidates...")
output = scanCandidates(lst)
end = time.time()
duration = end-start
d_len = len(output.keys())
print("Scanned in ", duration, "sec")
print(d_len, "services found.")
for k in output.keys():
print(output[k]['service_name']+" ("+output[k]['service_id']+")")
#print(output)
```
#### File: opencv-machine-vision-pipeline/services/ThreadStack.py
```python
import threading
import time
class ThreadStack:
def __init__(self, threads=5):
print("States started")
self.stack = []
self.threads = []
self.length = 0
for i in range(0,threads):
t = threading.Thread(target=self.processor)
self.threads.append(t)
t.start()
#for t in self.threads:
# t.join()
def processor(self):
while True:
#if len(self.stack)>0:
if self.length>0:
item = self.stack.pop(0)
self.length = self.length - 1
item[0](*item[1])
time.sleep(0.5)
def add(self, fn, args):
self.stack.append([fn, args])
self.length = self.length + 1
return len(self.stack)
``` |
{
"source": "26medias/reddit-stock-monitoring",
"score": 3
} |
#### File: reddit-stock-monitoring/main/symbols.py
```python
import os
import sys
import praw
import spacy
nlp = spacy.load('en_core_web_sm',disable=['ner','textcat'])
import nltk
from nltk.tokenize import word_tokenize
import glob
import pandas as pd
import re
from datetime import datetime
import threading
dev_mode = False
def fix_path(name):
if dev_mode == True:
return name
return sys.path[0]+'/'+name
# Get the symbols
class Tickers:
def __init__(self):
df = pd.DataFrame()
for filename in glob.glob(fix_path('datasets/symbols/*')):
_df = pd.read_csv(filename, sep='\t')
_df['source'] = re.findall(r"symbols\/([a-zA-Z]+)\.txt", filename)[0]
df = df.append(_df)
self.df = df.dropna()
tickers = Tickers()
df = tickers.df
# Symbols to match & ignore
real_symbols = df['Symbol'].unique()
false_symbol = ['ON','IN','AT','FOR','BY','DD','YOLO','CORP','ONE','SUB','MOON','CEO','OUT','INTO','MAN','POST','BRO','LIFE','CALL','DUDE','IDEA']
# Get the credentials & settings for PRAW
if dev_mode != True:
from auth import reddit_client_id, reddit_client_secret, reddit_password, reddit_useragent, reddit_username
##reddit_client_id=os.environ['reddit_client_id']
#reddit_client_secret=os.environ['reddit_client_secret']
#reddit_password=os.environ['reddit_password']
#reddit_useragent=os.environ['reddit_useragent']
#reddit_username=os.environ['reddit_username']
# Monitor Reddit
class Monitor:
def __init__(self):
print("Monitoring")
self.df = False
self.df_name = False
if os.path.exists(fix_path('datasets/datasets.pkl')):
self.datasets = pd.read_pickle(fix_path('datasets/datasets.pkl'))
else:
self.datasets = pd.DataFrame()
# PRAW setup
self.praw = praw.Reddit(
client_id=reddit_client_id,
client_secret=reddit_client_secret,
password=<PASSWORD>,
user_agent=reddit_useragent,
username=reddit_username
)
def start(self, subreddit="wallstreetbets", thread=True):
sub = self.praw.subreddit(subreddit)
if thread is True:
commentThread = threading.Thread(name='comments', target=self.monitorComments, args=(sub,subreddit))
submissionThread = threading.Thread(name='submissions', target=self.monitorSubmissions, args=(sub,subreddit))
commentThread.start()
submissionThread.start()
else:
self.monitorComments(sub,subreddit)
self.monitorSubmissions(sub,subreddit)
def monitorSubmissions(self, sub, subreddit):
for submission in sub.stream.submissions():
self.process_submission(submission, subreddit)
def monitorComments(self, sub, subreddit):
for comment in sub.stream.comments():
self.process_comment(comment, subreddit)
def process_submission(self, submission, subreddit):
NER = nlp(submission.title.lower())
NER2 = nlp(submission.selftext.lower())
found = []
has_rocket = '🚀' in submission.title.lower()
for token in NER:
if '.' in token.text:
w = token.text.upper().split('.')[0]
else:
w = token.text.upper()
if token.pos_ in ['ADP','NOUN','PROPN'] and w in real_symbols and w not in false_symbol:
found.append(w)
for token in NER2:
if '.' in token.text:
w = token.text.upper().split('.')[0]
else:
w = token.text.upper()
if token.pos_ in ['ADP','NOUN','PROPN'] and w in real_symbols and w not in false_symbol:
found.append(w)
if (len(found)>0):
#print('\n\n----------------')
#print(has_rocket, submission.title)
#print(found)
self.record(source='submission', has_rocket=has_rocket, symbols=list(set(found)), title=submission.title, subreddit=subreddit)
def process_comment(self, comment, subreddit):
NER = nlp(comment.body.lower())
found = []
has_rocket = '🚀' in comment.body.lower()
for token in NER:
if '.' in token.text:
w = token.text.upper().split('.')[0]
else:
w = token.text.upper()
if token.pos_ in ['ADP','NOUN','PROPN'] and w in real_symbols and w not in false_symbol:
found.append(w)
if (len(found)>0):
self.record(source='comment', has_rocket=has_rocket, symbols=list(set(found)), title=comment.body, subreddit=subreddit)
def get_df(self):
d = datetime.now()
dname = '{}-{}-{}_{}_{}'.format(d.year,d.month,d.day,d.hour,d.minute)
filename = fix_path("datasets/data/"+dname+".pkl")
if self.df_name != False:
filename_prev = fix_path("datasets/data/"+self.df_name+".pkl")
if self.df_name != dname:
# New timestep, move on to a new dataset
# Save to the index
self.datasets.at[datetime.timestamp(d), 'filename'] = filename.replace('/home/julien/mk2/main/','')
self.datasets.to_pickle(fix_path('datasets/datasets.pkl'))
print("#### New DF: ", filename)
# No the first run? There was a previous timestep buffer?
if self.df_name != False:
self.df.to_pickle(filename_prev)
# Create/recover a new df
if os.path.exists(filename):
# Recover existing file
self.df = False
self.df = pd.read_pickle(filename)
self.df_name = dname
else:
# Create a new DF
self.df = False
self.df = pd.DataFrame(columns=['comment', 'submission', 'rockets'])
self.df_name = dname
self.df.to_pickle(filename)
return self.df
def record(self, source, has_rocket, symbols, subreddit, title=''):
print(subreddit, source, has_rocket, symbols)
df = self.get_df()
for symbol in symbols:
if symbol in df.index:
df.at[symbol, source] = df.at[symbol, source]+1
if has_rocket:
df.at[symbol, 'rockets'] = df.at[symbol, 'rockets']+1
else:
df.at[symbol, "submission"] = 0
df.at[symbol, "comment"] = 0
df.at[symbol, source] = 1
if has_rocket:
df.at[symbol, 'rockets'] = 1
else:
df.at[symbol, 'rockets'] = 0
reddit = Monitor()
if dev_mode == True:
reddit.start(subreddit="wallstreetbets", thread=False)
else:
reddit.start(subreddit="wallstreetbets", thread=True)
reddit.start(subreddit="pennystocks", thread=True)
reddit.start(subreddit="Baystreetbets", thread=True)
``` |
{
"source": "26medias/rpi-zero-epaper",
"score": 3
} |
#### File: 26medias/rpi-zero-epaper/env.py
```python
from Adafruit_CCS811 import Adafruit_CCS811
import epd2in9
import time
import Image
import ImageDraw
import ImageFont
ccs = Adafruit_CCS811()
while not ccs.available():
pass
def main():
epd = epd2in9.EPD()
epd.init(epd.lut_full_update)
image = Image.new('1', (epd2in9.EPD_WIDTH, epd2in9.EPD_HEIGHT), 255) # 255: clear the frame
draw = ImageDraw.Draw(image)
font = ImageFont.truetype('/home/pi/demo/raspberrypi/python/open-sans.ttf', 16)
draw.rectangle((0, 10, 128, 34), fill = 0)
draw.text((10, 12), 'Air Quality', font = font, fill = 255)
draw.text((8, 36), 'loading...', font = font, fill = 0)
#draw.line((16, 60, 56, 60), fill = 0)
#draw.line((56, 60, 56, 110), fill = 0)
#draw.line((16, 110, 56, 110), fill = 0)
#draw.line((16, 110, 16, 60), fill = 0)
#draw.line((16, 60, 56, 110), fill = 0)
#draw.line((56, 60, 16, 110), fill = 0)
#draw.arc((60, 90, 120, 150), 0, 360, fill = 0)
#draw.rectangle((16, 130, 56, 180), fill = 0)
#draw.chord((60, 160, 120, 220), 0, 360, fill = 0)
epd.clear_frame_memory(0xFF)
epd.set_frame_memory(image, 0, 0)
epd.display_frame()
epd.delay_ms(1000)
##
# there are 2 memory areas embedded in the e-paper display
# and once the display is refreshed, the memory area will be auto-toggled,
# i.e. the next action of SetFrameMemory will set the other memory area
# therefore you have to set the frame memory twice.
##
epd.clear_frame_memory(0xFF)
epd.display_frame()
epd.clear_frame_memory(0xFF)
epd.display_frame()
# for partial update
epd.init(epd.lut_partial_update)
##
# there are 2 memory areas embedded in the e-paper display
# and once the display is refreshed, the memory area will be auto-toggled,
# i.e. the next action of SetFrameMemory will set the other memory area
# therefore you have to set the frame memory twice.
##
epd.set_frame_memory(image, 0, 0)
epd.display_frame()
epd.set_frame_memory(image, 0, 0)
epd.display_frame()
time_image = Image.new('1', (epd2in9.EPD_WIDTH, epd2in9.EPD_HEIGHT), 255) # 255: clear the frame
draw = ImageDraw.Draw(time_image)
font = ImageFont.truetype('/home/pi/demo/raspberrypi/python/open-sans.ttf', 12)
image_width, image_height = time_image.size
while (True):
if ccs.available():
temp = ccs.calculateTemperature()
if not ccs.readData():
print "CO2: ", ccs.geteCO2(), "ppm, TVOC: ", ccs.getTVOC(), " temp: ", temp
draw.rectangle((0, 0, image_width, image_height), fill = 0)
draw.text((5, 0), time.strftime('Time: %H:%M:%S'), font = font, fill = 255)
draw.text((5, 20), 'CO2: %sppm'%(ccs.geteCO2()), font = font, fill = 255)
draw.text((5, 40), 'Gas/Particulate: %s'%(ccs.getTVOC()), font = font, fill = 255)
draw.text((5, 60), 'Temperature: %s'%(temp), font = font, fill = 255)
epd.set_frame_memory(time_image, 0, 10)
epd.display_frame()
else:
print "ERROR!"
while(1):
pass
sleep(2)
if __name__ == '__main__':
main()
``` |
{
"source": "26medias/TF-Face-Angle-Translation",
"score": 2
} |
#### File: TF-Face-Angle-Translation/databuilder/builder.py
```python
from IPython.display import HTML, display
import time
import requests
import ntpath
import cv2
import math
import os, sys
from matplotlib import pyplot
from PIL import Image
import numpy as np
from numpy import asarray
from scipy.spatial.distance import cosine
from mtcnn.mtcnn import MTCNN
import keras_vggface
from keras_vggface.vggface import VGGFace
from keras_vggface.utils import preprocess_input
import glob
import mtcnn
from pathlib import Path
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from scipy.cluster import hierarchy
from bs4 import BeautifulSoup
from selenium import webdriver
import csv
from models.detector import face_detector
from models.parser import face_parser
from utils.visualize import show_parsing_with_annos
from threading import Thread
class builder():
def __init__(self, VIDEO_QUALITY="720", FRAME_PERCENTAGE=40, DIR_VIDEOS="Videos", DIR_FACES="Faces"):
# The variables
self.VIDEO_QUALITY = VIDEO_QUALITY # The trailer quality we'll download: 480, 720 or 1080
self.FRAME_PERCENTAGE = FRAME_PERCENTAGE # from 0.1 to 100: The percentage of frames that will be analyzed in the video
self.DIR_VIDEOS = DIR_VIDEOS
self.DIR_FACES = DIR_FACES
if not os.path.isdir(self.DIR_VIDEOS):
os.mkdir(self.DIR_VIDEOS, 755);
if not os.path.isdir(self.DIR_FACES):
os.mkdir(self.DIR_FACES, 755);
# Create the detector, using default weights
print("Creating the detector model")
self.detector = MTCNN()
# Create a vggface model
print("Creating the face embedding model")
self.embedding_model = VGGFace(model='resnet50', include_top=False, input_shape=(224, 224, 3), pooling='avg')
# Create a face detector
print("Creating the face detector model")
self.fd = face_detector.FaceAlignmentDetector(lmd_weights_path="models/detector/FAN/2DFAN-4_keras.h5")
# Create a face parser (segmentation)
print("Creating the face segmentation model")
self.prs = face_parser.FaceParser()
# The methods
# ===========
# Colab progress bar
def progress(self, value, max=100):
return HTML('<progress value="{value}" max="{max}" style="width: 50%"> {value}</progress>'.format(value=value, max=max))
# Convert a value from one range to another
def rangeConvert(self, x, in_min, in_max, out_min, out_max):
return (x - in_min) * (out_max - out_min) / (in_max - in_min) + out_min
# Get the directory of a filename
def getDir(self, filename):
p = Path(filename);
return p.parts[len(p.parts)-2]
# Dowload a video from a url
def downloadFile(self, url):
print("Downloading ", url)
filename = self.DIR_VIDEOS+"/"+ntpath.basename(url)
if os.path.exists(filename):
return filename
myfile = requests.get(url)
open(filename, 'wb').write(myfile.content)
print(filename," downloaded.")
return filename
# Resize an image
def resize_image(self, im, max_size=768):
if np.max(im.shape) > max_size:
ratio = max_size / np.max(im.shape)
print(f"Resize image to ({str(int(im.shape[1]*ratio))}, {str(int(im.shape[0]*ratio))}).")
return cv2.resize(im, (0,0), fx=ratio, fy=ratio)
return im
def imageFilesToGrid(self, directory, outputFilename):
filenames = glob.glob(directory+'/*.jpg')
#print(directory, ": ", len(filenames), " images")
if len(filenames) < 4:
return False
result_figsize_resolution = 10 # 1 = 100px
images_count = len(filenames)
# Calculate the grid size:
grid_size = math.ceil(math.sqrt(images_count))
# Create plt plot:
fig, axes = pyplot.subplots(grid_size, grid_size, figsize=(result_figsize_resolution, result_figsize_resolution))
current_file_number = 0
for image_filename in filenames:
x_position = current_file_number % grid_size
y_position = current_file_number // grid_size
plt_image = pyplot.imread(image_filename)
axes[x_position, y_position].imshow(plt_image)
current_file_number += 1
pyplot.subplots_adjust(left=0.0, right=1.0, bottom=0.0, top=1.0)
pyplot.savefig(outputFilename)
#pyplot.show()
def exportImageGrids(self, directory, outputDirectory):
print("Exporting image grids...")
dirs = os.listdir(directory)
dirs.sort()
ndirs = len(dirs)
for n,dir in enumerate(dirs):
if dir is not "ALL":
self.imageFilesToGrid(directory+"/"+dir, outputDirectory+"/"+dir+".jpg");
self.progress(n, ndirs)
# Extract the faces from an image, return an array of numpy faces
def extractFacesFromImage(self, pixels, required_size=(224, 224), limit=50):
results = self.detector.detect_faces(pixels)
faces = []
errors = 0
for i,faceData in enumerate(results):
if len(faces) > limit:
break
x1, y1, width, height = faceData['box']
x2, y2 = x1 + width, y1 + height
# extract the face
face = pixels[y1:y2, x1:x2]
# resize pixels to the model size
try:
image = Image.fromarray(face)
image = image.resize(required_size)
face_array = asarray(image)
faces.append(face_array)
if limit==1:
return face_array
except:
errors+=1
if limit==1 and len(faces)==0:
return False
return faces;
# Extract the faces from an image, return an array of numpy faces & landmarks
def extractFacesAndLandmarksFromImage(self, pixels, required_size=(224, 224), limit=50):
rw, rh = required_size
results, landmarks = self.fd.detect_face(pixels, with_landmarks=True)
nResults = len(results)
faces = []
errors = 0
for i,bbox in enumerate(results):
if len(faces) > limit:
break
# Get the face
x0, y0, x1, y1, score = bbox
# Find the center of the face
w = x1-x0
h = y1-y0
xCenter = x0+int(w/2)
yCenter = y0+int(h/2)
if w>h:
y0 = yCenter-int(w/2)
y1 = yCenter+int(w/2)
if h>w:
x0 = xCenter-int(h/2)
x1 = xCenter+int(h/2)
x0, y0, x1, y1 = map(int, [x0, y0, x1, y1])
face = pixels[x0:x1, y0:y1, :]
# Recalculate the landmarks coordinates
for li in range(len(landmarks[i])):
landmark = landmarks[i][li]
lx, ly = landmark
landmarks[i][li] = (self.rangeConvert(lx-x0, 0, face.shape[1], 0, rw), self.rangeConvert(ly-y0, 0, face.shape[0], 0, rh))
# Resize pixels to the model size
try:
image = Image.fromarray(face)
image = image.resize(required_size)
face_array = asarray(image)
faces.append(face_array)
if limit==1:
return face_array
except:
errors+=1
if limit==1 and len(faces)==0:
return False
return faces, landmarks
# Extract the faces from an image, return an array of numpy faces & landmarks
def extractFacesLandmarksAndSegmentationFromImage(self, pixels, required_size=(224, 224), limit=50):
rw, rh = required_size
results, landmarks = self.fd.detect_face(pixels, with_landmarks=True)
nResults = len(results)
faces = []
segmentations = []
errors = 0
for i,bbox in enumerate(results):
if len(faces) > limit:
break
# Get the face
x0, y0, x1, y1, score = bbox
# Find the center of the face
w = x1-x0
h = y1-y0
xCenter = x0+int(w/2)
yCenter = y0+int(h/2)
if w>h:
y0 = yCenter-int(w/2)
y1 = yCenter+int(w/2)
if h>w:
x0 = xCenter-int(h/2)
x1 = xCenter+int(h/2)
x0, y0, x1, y1 = map(int, [x0, y0, x1, y1])
face = pixels[x0:x1, y0:y1, :]
# Recalculate the landmarks coordinates
for li in range(len(landmarks[i])):
landmark = landmarks[i][li]
lx, ly = landmark
landmarks[i][li] = (self.rangeConvert(lx-x0, 0, face.shape[1], 0, rw), self.rangeConvert(ly-y0, 0, face.shape[0], 0, rh))
# Resize pixels to the model size
try:
image = Image.fromarray(face)
image = image.resize(required_size)
face_array = asarray(image)
faces.append(face_array)
# Get the segmentation on the resized image
segmentation = self.prs.parse_face(face_array)
segmentations.append(segmentation)
if limit==1:
return face_array
except:
errors+=1
if limit==1 and len(faces)==0:
return False
return faces, landmarks, segmentations
# Export the frames out of a video at a specific fps
def videoToFaces(self, filename, maxFrame=0):
print("Extracting faces from the video frames...")
basename = os.path.splitext(ntpath.basename(filename))[0]
#print("basename:", basename)
cap = cv2.VideoCapture(filename)
# Get the video's FPS
fps = cap.get(cv2.CAP_PROP_FPS)
nframes = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
processFrames = int(nframes*self.FRAME_PERCENTAGE/100)
skipFrame = int(nframes/processFrames)
print(basename, "fps:", fps, "skipFrame:",skipFrame,"Frames:", str(processFrames)+"/"+str(nframes))
out = display(self.progress(0, processFrames), display_id=True)
i = 0
c = 0
faces = []
landmarks = []
segmentations = []
while(cap.isOpened()):
ret, frame = cap.read()
if ret == False:
break
i+=1
if maxFrame>0 and i > maxFrame:
break;
#print(i, "-", i % skipFrame)
if (i % skipFrame == 0):
c+=1
#print("Checking faces in frame #"+str(i))
#frameFaces = self.extractFacesFromImage(frame)
frameFaces, frameLandmarks, frameSegmentations = self.extractFacesLandmarksAndSegmentationFromImage(frame)
out.update(self.progress(c, processFrames))
for nf, f in enumerate(frameFaces):
faces.append(f)
landmarks.append(frameLandmarks[nf])
segmentations.append(frameSegmentations[nf])
else:
continue
#cv2.imwrite(DIR_IMAGES+"/"+basename+'/'+str(round((i-1)/fps,2))+'sec.jpg',frame)
cap.release()
cv2.destroyAllWindows()
print(basename, " processed.")
print(processFrames,"/",nframes," frames analyzed.")
print(len(faces), " faces found.")
return faces, landmarks, segmentations
# Show a few images
def showImages(self, images, width=4):
fig = pyplot.figure(figsize=(width, math.ceil(len(images)/width)))
for i in range(len(images)):
pyplot.subplot(width, math.ceil(len(images)/width), i+1)
pyplot.imshow(images[i])
pyplot.axis('off')
pyplot.savefig('preview.png')
pyplot.show()
# Save an array of images to files
def saveImages(self, images, dest, names=False, prefix="", showProgress=True):
if not os.path.isdir(dest):
os.mkdir(dest, 755);
nImages = len(images)
if showProgress is True:
print("Saving ",nImages," images to ", dest)
out = display(self.progress(0, nImages), display_id=True)
filenames = []
for n, image in enumerate(images):
if names is False:
filename = dest+"/"+prefix+('{:04d}'.format(n))+'.jpg'
else:
filename = dest+"/"+prefix+str(names[n])+'.jpg'
cv2.imwrite(filename, image)
filenames.append(filename)
if showProgress is True:
out.update(self.progress(n, nImages))
return filenames
# Save Numpy Arrays to files
def saveNpArrays(self, npArrays, dest, names=False, prefix="", showProgress=True):
if not os.path.isdir(dest):
os.mkdir(dest, 755);
nArrays = len(npArrays)
if showProgress is True:
print("Saving ",nArrays," numpy arrays to ", dest)
out = display(self.progress(0, nArrays), display_id=True)
filenames = []
for n, npArray in enumerate(npArrays):
if names is False:
filename = dest+"/"+prefix+('{:04d}'.format(n))+'.npy'
else:
filename = dest+"/"+prefix+str(names[n])+'.npy'
np.save(filename, npArray)
filenames.append(filename)
if showProgress is True:
out.update(self.progress(n, nArrays))
return filenames
# Extract faces and calculate face embeddings for a list of photo files
def get_embeddings(self, faces):
print("Calculating the embeddings...")
# convert into an array of samples
samples = asarray(faces, 'float32')
# prepare the face for the model, e.g. center pixels
samples = preprocess_input(samples, version=2)
# perform prediction
embeddings = self.embedding_model.predict(samples)
return embeddings
# Determine if a candidate face is a match for a known face
def is_match(self, known_embedding, candidate_embedding, threshold=0.5):
# calculate distance between embeddings
score = cosine(known_embedding, candidate_embedding)
return score >= threshold
# Cluster the faces by cosine distance
def clusterFaces(self, faces, embeddings, landmarks, segmentations, minFaces=2):
groups = [] # Array of dict {faces:[], embeddings: []}
nFaces = len(faces)
print("Clustering ",nFaces," faces...")
out = display(self.progress(0, nFaces), display_id=True)
# For each faces
for n, face in enumerate(faces):
out.update(self.progress(n, nFaces))
if len(groups)==0:
groups.append({
"faces": [face],
"names": [n],
"embeddings": [embeddings[n]],
"landmarks": [landmarks[n]],
"segmentations": [segmentations[n]]
})
else:
# Not the first face, match it against all the groups, see if the average of cosine distance match an existing face
scores = [] # array of dict {group: n, embeddings: []}
for g, group in enumerate(groups):
groupScores = []
for embedding in group["embeddings"]:
groupScores.append(cosine(embedding, embeddings[n]))
score = np.mean(groupScores)
scores.append({
"group": g,
"score": score
})
# Sort the scores for each group by lowest score, check if that score is below the threshold
scores = sorted(scores, key = lambda i: i["score"], reverse=False)
if scores[0]["score"] <= 0.5:
# Add to the existing group the face matches
groups[scores[0]["group"]]["landmarks"].append(landmarks[n])
groups[scores[0]["group"]]["embeddings"].append(embeddings[n])
groups[scores[0]["group"]]["segmentations"].append(segmentations[n])
groups[scores[0]["group"]]["faces"].append(face)
groups[scores[0]["group"]]["names"].append(n)
#print("[Matched] face #", n, " to group #", scores[0]["group"], "score:", scores[0]["score"])
else:
groups.append({
"faces": [face],
"names": [n],
"embeddings": [embeddings[n]],
"landmarks": [landmarks[n]],
"segmentations": [segmentations[n]]
})
#print("[New face] face #", n, " / Best score:", scores[0]["score"])
# Filter out the groups that don't have enough faces
return [item for item in groups if len(item["faces"]) >= minFaces]
#return groups;
# Cluster all the faces from a remote video
def clusterFacesOnVideo(self, url):
print("Processing ", url);
# Download the video
videoFilename = self.downloadFile(url)
# Get the directories name for that video
# /Faces/[dirname]/Faces
# /Faces/[dirname]/Embeddings
# /Faces/[dirname]/Landmarks
# /Faces/[dirname]/Segmentations
# /Faces/[dirname]/Previews
dirname = os.path.splitext(ntpath.basename(videoFilename))[0]
dirClustered = self.DIR_FACES+"/"+dirname
dirFaces = dirClustered+"/Faces/"
dirEmbeddings = dirClustered+"/Embeddings/"
dirLandmarks = dirClustered+"/Landmarks/"
dirSegmentations = dirClustered+"/Segmentations/"
dirPreviews = dirClustered+"/Previews/"
if os.path.exists(dirPreviews):
# Video already processed, go to the next one
print("Video already processed.")
#return False
# Create the directories
if not os.path.isdir(dirClustered):
os.mkdir(dirClustered, 755);
if not os.path.isdir(dirFaces):
os.mkdir(dirFaces, 755);
if not os.path.isdir(dirEmbeddings):
os.mkdir(dirEmbeddings, 755);
if not os.path.isdir(dirLandmarks):
os.mkdir(dirLandmarks, 755);
if not os.path.isdir(dirSegmentations):
os.mkdir(dirSegmentations, 755);
if not os.path.isdir(dirPreviews):
os.mkdir(dirPreviews, 755);
# Open a CSV to save the datasets
with open(dirClustered+"/"+dirname+".csv", "w") as csvfile:
fieldnames = ["video_name", "face_group", "image_filename", "embeddings_filename", "landmarks_filename", "segmentations_filename"]
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
# Find the faces on the video
faces, landmarks, segmentations = self.videoToFaces(videoFilename)
nFaces = len(faces)
print(nFaces," faces detected")
# Get the embedding for all the faces
embeddings = self.get_embeddings(faces)
# Cluster the faces using cosine distance
clusters = self.clusterFaces(faces, embeddings, landmarks, segmentations, minFaces=5)
nClusters = len(clusters)
# Export each face group
print("Saving ",nClusters," face clusters...")
for n, group in enumerate(clusters):
ngImg = len(group["faces"])
ngEbd = len(group["embeddings"])
ngldk = len(group["landmarks"])
# Save the face as an image
images_filenames = self.saveImages(group["faces"], dirFaces+('{:04d}'.format(n)), showProgress=False)
# Save the embedding as a numpy array
embeddings_filenames = self.saveNpArrays(group["embeddings"], dirEmbeddings+('{:04d}'.format(n)), showProgress=False)
# Save the landmarks as a numpy array
landmarks_filenames = self.saveNpArrays(group["landmarks"], dirLandmarks+('{:04d}'.format(n)), showProgress=False)
# Save the segmentations as a numpy array
segmentations_filenames = self.saveNpArrays(group["segmentations"], dirSegmentations+('{:04d}'.format(n)), showProgress=False)
# Update the CSV
for i, image_filename in enumerate(images_filenames):
writer.writerow({
"video_name": dirname,
"face_group": n,
"image_filename": image_filename,
"embeddings_filename": embeddings_filenames[i],
"landmarks_filename": landmarks_filenames[i],
"segmentations_filename": segmentations_filenames[i]
})
# Build grids to show each face groups
self.exportImageGrids(dirFaces, dirPreviews)
def clusterFacesFromVideos(self, urls):
nUrls = len(urls)
for n,url in enumerate(urls):
self.clusterFacesOnVideo(url)
def fetchAllHDVideos(self, url):
response = requests.get(url)
soup = BeautifulSoup(response.content, "html5lib")
links = soup.find_all('a')
videos = []
for tag in links:
link = tag.get('href', None)
if link is not None and 'h'+str(self.VIDEO_QUALITY)+'p' in link:
videos.append(link)
return videos
class threadedBuilder():
def __init__(self, VIDEO_QUALITY="720", FRAME_PERCENTAGE=40, DIR_VIDEOS="Videos", DIR_FACES="Faces"):
# The variables
self.VIDEO_QUALITY = VIDEO_QUALITY # The trailer quality we'll download: 480, 720 or 1080
self.FRAME_PERCENTAGE = FRAME_PERCENTAGE # from 0.1 to 100: The percentage of frames that will be analyzed in the video
self.DIR_VIDEOS = DIR_VIDEOS
self.DIR_FACES = DIR_FACES
def fetchAllHDVideos(self, url):
response = requests.get(url)
soup = BeautifulSoup(response.content, "html5lib")
links = soup.find_all('a')
videos = []
for tag in links:
link = tag.get('href', None)
if link is not None and 'h'+str(self.VIDEO_QUALITY)+'p' in link:
videos.append(link)
return videos
def processVideo(self, url):
datasetBuilder = builder(FRAME_PERCENTAGE=2)
#urls = datasetBuilder.fetchAllHDVideos("https://www.davestrailerpage.co.uk/")
datasetBuilder.clusterFacesFromVideos([url])
def process(self, website):
videos = self.fetchAllHDVideos(website)
print("videos", videos)
for video in videos:
print("video", video)
Thread(target=self.processVideo, args=(video)).start()
``` |
{
"source": "26prajval98/bilingual-translator",
"score": 3
} |
#### File: bilingual-translator/kannadautils/__init__.py
```python
from urllib.request import Request, urlopen
from bs4 import BeautifulSoup
from nltk.corpus import words
import json
import os
from collections import defaultdict
def get_kannada_word(search_word):
vals = []
dict_page = 'https://www.shabdkosh.com/search-dictionary?e='+search_word+'&lc=kn&sl=en&tl=kn'
req = Request(dict_page, headers={'User-Agent': 'Mozilla/5.0'})
page = urlopen(req)
soup = BeautifulSoup(page, 'html.parser')
res_list = soup.find_all('ol', {'class': 'eirol'})
for ol in res_list:
li = ol.find_all('li')
for a in li:
for x in a.find_all('a'):
t = x.text
if len(t.split(' ')) <= 1:
vals.append(t)
return vals
def return_english(sentence):
for i in range(len(sentence)):
if sentence[i] in words.words():
return i, sentence[i]
return -1, None
def load_json(path, file):
os.chdir(path)
b = defaultdict(lambda: defaultdict(lambda: 0.2))
if os.path.isfile(file):
f = open(file, "r", encoding="utf-8")
json_data = json.loads(f.read())
b = defaultdict(lambda: defaultdict(lambda: 10 ** -20))
for t in json_data:
string = t[:]
t1, t2 = string.split(":", 1)
if t1 == "None":
t1 = None
if t2 == "None":
t2 = None
b[(t1, t2)] = json_data[string]
return b
def load_json_bi(path, file):
os.chdir(path)
b = defaultdict(lambda: defaultdict(lambda: 0.2))
if os.path.isfile(file):
f = open(file, "r", encoding="utf-8")
json_data = json.loads(f.read())
b = defaultdict(lambda: defaultdict(lambda: 10 ** -20))
for t in json_data:
if t == "null" or t == "None":
b[None] = json_data[t]
else:
b[t] = json_data[t]
return b
def calculate_prob(t_a, m):
t = [None, None]
prob = 1
for ww in t_a:
prob_t = m[tuple(t[-2:])].get(ww, 10 ** -20)
prob *= prob_t
t.append(ww)
return prob
def calculate_prob_bi(t_a, m):
t = None
prob = 1
for ww in t_a:
prob_t = m[t].get(ww, 10 ** -20)
prob *= prob_t
t = ww
return prob
def get_sentence(sentence, model, n=3):
sentence_array = sentence.split(" ")
idx, word = return_english(sentence_array)
if idx != -1:
wws = get_kannada_word(word)
if len(wws) == 0:
return []
p = - 100
chosen_word = ""
t_array = sentence_array[:]
for w in wws:
t_array[idx] = w
if n == 3:
t = calculate_prob(t_array, model)
else:
t = calculate_prob_bi(t_array, model)
if t > p:
p = t
chosen_word = w
t_array[idx] = chosen_word
return t_array
if __name__ == "__main__":
sw = "net"
print(get_kannada_word(sw))
mo = load_json("E:/NITK/6th Sem/Computer Graphics/kannada-rocks", "data.json")
ss = "ಆಗ ನಮ್ಮೂರಿನ crow ನೆಂಟರು ಬೇರೆ ಬಂದಿದ್ದಾರೆ ಎಂದು ಆಗುತ್ತದೆ"
updated = get_sentence(ss, mo)
``` |
{
"source": "27149chen/pyds8k",
"score": 2
} |
#### File: auth/ds8k/base.py
```python
from pyds8k.dataParser.ds8k import RequestParser, ResponseParser
AUTH_URL = '/tokens'
DEFAULT_BASE_URL = '/api/v1'
class Auth(object):
base_url = DEFAULT_BASE_URL
auth_url = AUTH_URL
def __init__(self):
pass
@classmethod
def authenticate(self, client):
"""
The main authenticate method. Mandatory
"""
params = {}
params['username'] = client.user
params['password'] = <PASSWORD>
if client.hostname:
params['hmc1'] = client.hostname
req_p = RequestParser(params)
_, body = client.post(self.get_auth_url(),
body=req_p.get_request_data()
)
token = _get_data(body).get('token', '')
if token:
client.set_defaultHeaders('X-Auth-Token', token)
# client.set_defaultQuerystrings('token', token)
@classmethod
def get_auth_url(self):
"""
Return the auth url. Mandatory
"""
return self.base_url + self.auth_url
def _get_data(response_body):
res_p = ResponseParser(response_body, 'token')
return res_p.get_representations()[0]
```
#### File: pyds8k/pyds8k/base.py
```python
import json
from importlib import import_module
from . import messages
from pyds8k.utils import get_response_parser_class, \
get_request_parser_class
from pyds8k.utils import is_absolute_url
from pyds8k.utils import HTTP200, HTTP204, POSTA, POST
from pyds8k.exceptions import URLNotSpecifiedError, \
FieldReadOnly, \
URLParseError, \
ResponseBodyMissingError
from pyds8k import PYDS8K_DEFAULT_LOGGER
from logging import getLogger
logger = getLogger(PYDS8K_DEFAULT_LOGGER)
get_resource_by_route = None
class URLBuilderMixin(object):
def one(self, route, resource_id):
pass
def all(self, route):
pass
class UtilsMixin(object):
def _update_list_field(self, field_name, value_list, operator='+'):
if not isinstance(value_list, list):
value_list = [value_list]
field = getattr(self, field_name)
if operator == '+':
for item in value_list:
if item in field:
raise KeyError(
messages.ITEM_IN_LIST.format(field_name, item)
)
field.append(item)
if operator == '-':
for item in value_list:
if item not in field:
raise KeyError(
messages.ITEM_NOT_IN_LIST.format(field_name, item)
)
field.pop(field.index(item))
return field
def _get_id(self):
return self.id if hasattr(self, 'id') else id(self)
def remove_None_fields_from_dict(self, input_dict):
new_dict = {}
for (key, value) in input_dict.items():
if value is not None:
new_dict[key] = value
return new_dict
class BaseResource(object):
pass
class Resource(UtilsMixin, BaseResource):
"""
A resource represents a particular representation of
a resource state or a application state.
:param client: HTTPClient object
:param manager: Manager object
:param url: A resource or a resource collection's url
:param info: Dictionary representing resource attributes
:param resource_id: The resource's id
:param parent: The parent resource object
:param loaded: All details is loaded if set to True
"""
id_field = 'id'
url_field = 'link'
# The HTTP method when creating new resource
create_method = 'put'
# base_url must to set to empty string in this base class
base_url = ''
# Set the value to None if the field is not required when creation.
_template = {}
related_resource = {}
alias = {}
def __init__(self, client, manager=None, url='', info={},
resource_id=None, parent=None, loaded=False):
self.set_loaded(loaded)
self._start_init()
self._init_updating()
self.ResponseParser = get_response_parser_class(client.service_type)
self.manager = manager
if self.manager:
self.manager.managed_object = self
self.client = client
self.representation = {}
self.url = self._add_base_to_url(url)
if resource_id:
self._id = resource_id
self.parent = parent
self._custom_url = ''
self._set_modified_info_dict()
if info:
self._add_details(info)
self._finish_init()
def one(self, route, resource_id, rebuild_url=False):
global get_resource_by_route
if not get_resource_by_route:
get_resource_by_route = \
import_module('{}.resources.utils'.format(__package__)
).get_resource_by_route
url = self._set_url(route, resource_id, rebuild_url=rebuild_url)
return get_resource_by_route(route, self.client,
url, self, resource_id)
def all(self, route, rebuild_url=False):
global get_resource_by_route
if not get_resource_by_route:
get_resource_by_route = \
import_module('{}.resources.utils'.format(__package__)
).get_resource_by_route
url = self._set_url(route, rebuild_url=rebuild_url)
return get_resource_by_route(route, self.client, url, self)
def toUrl(self, method, body={}):
"""
To send non-standard rest request, like /attach
"""
self._set_custom_url(method)
if body:
resp, res_body = self.post(body=body)
else:
resp, res_body = self.get_response()
self._reverse_custom_url()
return resp, res_body
def custom_method(self, para1, para2):
"""
Like customUrl(), but use a particular method name instand of url.
"""
result = None
if not self._custom_url:
result = self.manager.custom_method(para1, para2)
else:
result = self.get(para1, para2) # or post, put, ...
self._reverse_custom_url()
return result
def create(self, **kwargs):
custom_info = {}
# for (k, v) in six.iteritems(info):
for (k, v) in kwargs.items():
if k in list(self._template.keys()):
custom_info[k] = v
return self.create_from_template(custom_info)
def create_from_template(self, custom_info={}):
_url = self._rm_id_in_url()
_info = self._template.copy()
if self.id_field in _info:
del _info[self.id_field]
_info.update(custom_info)
data = self.remove_None_fields_from_dict(_info)
res = self.__class__(client=self.client,
manager=self.manager.__class__(self.client),
url=_url,
info=data,
parent=self.parent,
# Set loaded=True to avoid lazy-loading
loaded=True)
for key, value in data.items():
if value:
res._set_modified_info_dict(key, value)
res._is_new = True
return res
def _update_alias(self, res):
for key, alias in self.alias.items():
if key in res:
res[alias] = res.pop(key)
return res
def _set_url(self, route, resource_id='', rebuild_url=False):
url = self.url if not rebuild_url else ''
if resource_id:
url += '/{}/{}'.format(route, resource_id)
else:
url += '/{}'.format(route)
return url
def _add_id_to_url(self, resource_id):
if not self.url.endswith('/{}'.format(resource_id)):
self.url += '/{}'.format(resource_id)
def _rm_id_in_url(self, resource_id=''):
if not hasattr(self, 'id'):
return self.url
res_id = resource_id or self.id
if self.url.endswith('/{}'.format(res_id)):
return self.url[:len(self.url) - len(self.id) - 1]
return self.url
def _add_base_to_url(self, url):
if (self.base_url not in url) and (not is_absolute_url(url)):
return self.base_url + url
return url
def _set_custom_url(self, url):
self._custom_url = self.url + '/' + url
self._url_backup = self.url
self.url = self._custom_url
def _reverse_custom_url(self):
if self._custom_url:
self._custom_url = ''
self.url = self._url_backup
def _add_details(self, info, force=False):
self._start_updating()
info = self._update_alias(info)
try:
# set id field first.
self._id = info[self.id_field]
except KeyError:
pass
self_url = self.ResponseParser.get_link_from_representation(info)
if self_url:
self.url = self_url
# for (k, v) in six.iteritems(info):
for (k, v) in info.items():
if not force and k in list(self._modified_info_dict.keys()):
continue
if not k == self.id_field:
setattr(self, k, v)
self.representation[k] = v
for related_key in list(self.related_resource.keys()):
if related_key[1:] in self.representation:
self._set_related_resource(related_key[1:])
self._stop_updating()
def _set_related_resource(self, res_key):
# Related resources is set during loading details, and are readonly,
# If you want to change it, you should set the attr with the same
# name(without '_'), then update it to server and load details again.
try:
res_info = self.representation[res_key]
res_class, res_manager = self.related_resource.get('_' + res_key)
res_id = res_info[res_class.id_field]
self.representation[res_key] = res_id
setattr(self, res_key, res_id)
setattr(self,
'_' + res_key,
res_class(self.client,
manager=res_manager(self.client),
resource_id=res_id,
info=res_info,
loaded=False,
)
)
except Exception:
logger.debug(
messages.SET_RELATED_RESOURCE_FAILED.format(res_key, self)
)
self.representation[res_key] = res_info
setattr(self, res_key, res_info)
setattr(self, '_' + res_key, None)
def _get_url(self, urls):
if isinstance(urls, str):
return urls
elif isinstance(urls, dict):
urls = [urls]
elif isinstance(urls, list):
pass
else:
raise URLParseError()
for url in urls:
if url.get('rel') == 'self':
return url.get('href', '')
return ''
def __getattr__(self, k):
if k == 'id' or k == self.id_field:
if '_id' not in self.__dict__:
raise AttributeError(k)
else:
return self._id
# If we can get the attr from a resource collection
# we don't need to get the resource details.
# So we don't load the details until an attr which is
# not in the resource collection is required.
if k not in self.__dict__:
if k in self._template and not self.is_loaded():
self.get()
return getattr(self, k)
raise AttributeError(k)
else:
return self.__dict__[k]
def __setattr__(self, key, value):
if key == '_id':
self._add_id_to_url(value)
if key.startswith('_'):
super(Resource, self).__setattr__(key, value)
return
if self._is_init():
return super(Resource, self).__setattr__(key, value)
if key == 'id' or key == self.id_field:
raise FieldReadOnly(key)
if not self.is_updating() and (key in self._template or key in self.representation): # noqa
self.representation[key] = value
self._set_modified_info_dict(key, value)
super(Resource, self).__setattr__(key, value)
def __repr__(self):
reprkeys = \
sorted(k for k in self.__dict__ if not str(k).startswith('_') and
k not in ('manager', 'client')
)
info = ", ".join("{0}={1}".format(k, getattr(self, k))
for k in reprkeys)
return "<{0} {1}>".format(self.__class__.__name__, info)
def get(self, resource_id='', force=False, **kwargs):
self.set_loaded(True)
if resource_id:
return self.manager.get(resource_id, **kwargs)
else:
_, info = self.manager.get(**kwargs)
self._add_details(info, force)
return self
def get_response(self):
return self.manager.get()
def list(self, **kwargs):
return self.manager.list(**kwargs)
def put(self):
resp, data = self.manager.put()
self._set_modified_info_dict()
return resp, data
def patch(self):
resp, data = self.manager.patch()
self._set_modified_info_dict()
return resp, data
def post(self, body, url=None):
resp, data = self.manager.post(body=body)
return resp, data
def posta(self, body=None):
_url = self._rm_id_in_url()
resp, resources = self.manager.posta(url=_url, body=body)
self._set_modified_info_dict()
return resp, resources
def delete(self):
resp, data = self.manager.delete()
return resp, data
def update(self, info={}):
resp = None
data = None
if info:
resp, data = self.manager.patch(body=info)
self._del_modified_info_dict_keys(info)
else:
resp, data = self.manager.patch(body=self._get_modified_info_dict()
)
self._set_modified_info_dict()
return resp, data
def save(self):
resp = None
data = None
if hasattr(self, 'id'):
# TODO: I don't like _is_new and create_method, need a better idea.
if (not hasattr(self, '_is_new')) or (not self._is_new):
resp, data = self.put()
else:
if self.create_method.lower() not in ('posta', 'put'):
raise Exception(
"You should use POSTA or PUT method to create new resources" # noqa
)
resp, data = getattr(self, self.create_method.lower())()
if self.create_method.lower() == 'posta':
if isinstance(data[0], Resource):
# re-init the res object according to the returned data
self.__init__(
client=self.client,
manager=self.manager,
url=data[0].url,
resource_id=data[0].id,
info=data[0].representation
)
self._is_new = False
else:
resp, data = self.posta()
if isinstance(data[0], Resource):
# re-init the res object according to the returned res
self.__init__(
client=self.client,
manager=self.manager,
url=data[0].url,
resource_id=data[0].id,
info=data[0].representation
)
# self.set_loaded(False) # Set to false in order to use lazy loading.
return resp, data
def _get_modified_info_dict(self):
return self._modified_info_dict
def _set_modified_info_dict(self, key=None, value=None):
if not key:
self._modified_info_dict = {}
else:
self._modified_info_dict[key] = value
def _del_modified_info_dict_key(self, key):
if key in list(self._modified_info_dict.keys()):
del self._modified_info_dict[key]
def _del_modified_info_dict_keys(self, info=None):
if not info:
self._modified_info_dict = {}
else:
for key in list(info.keys()):
self._del_modified_info_dict_key(key)
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
if hasattr(self, 'id') and hasattr(other, 'id'):
return self.id == other.id
try:
return self._info == other._info and self.url == other.url
except Exception:
return False
def is_loaded(self):
return self._loaded
def set_loaded(self, is_loaded):
self._loaded = is_loaded
def _is_init(self):
return self._initing
def _start_init(self):
self._initing = True
def _finish_init(self):
self._initing = False
def _start_updating(self):
self._updating = True
def _stop_updating(self):
self._updating = False
def _init_updating(self):
self._updating = False
def is_updating(self):
return self._updating
@classmethod
def set_id_field(cls, _id_field):
cls.id_field = _id_field
@classmethod
def set_base_url(cls, base):
url = base
if url.endswith('/'):
url = url[:-1]
if not url.startswith('/'):
url = '/' + url
cls.base_url = url
@classmethod
def get_template(cls):
return cls._template.copy()
def get_template_from_server(self):
pass
class BaseManager(object):
pass
class Manager(UtilsMixin, BaseManager):
"""
Managers interact with a particular type of API (systems, pools, volumes,
etc.) and provide CRUD operations for them.
:param client: HTTPClient object
:param managed_object: The related resource object
:param url: A resource or a resource collection's url
"""
resource_class = Resource
response_key = 'data'
resource_type = ''
def __init__(self, client, managed_object=None, url=''):
self.client = client
self.managed_object = managed_object
self.url = url
self.ResponseParser = get_response_parser_class(client.service_type)
self.RequestParser = get_request_parser_class(client.service_type)
def _get_data(self, response_body, method='', response=None):
if not method: # get or list
if not response_body:
raise ResponseBodyMissingError()
res_p = self.ResponseParser(response_body, self.resource_type)
return res_p.get_representations()
elif method == POSTA:
if not response_body:
raise ResponseBodyMissingError()
res_p = self.ResponseParser(response_body, self.resource_type)
return res_p.get_posta_response_data()
else:
if response_body:
try:
data = self._get_status_body(response_body)
except Exception:
logger.debug(
messages.CAN_NOT_GET_STATUS_BODY.format(
method,
self.resource_class.__name__
)
)
data = response_body
elif response.status_code in (HTTP200, HTTP204):
data = messages.DEFAULT_SUCCESS_BODY_DICT
else:
res_id = ''
try:
res_id = self.managed_object.id
except Exception:
res_id = ''
data = json.loads(
messages.DEFAULT_FAIL_BODY_JSON.format(
action=method,
res_class=self.resource_class.__name__,
res_id=res_id
)
)
return data
def _get_status_body(self, response_body):
res_p = self.ResponseParser(response_body, self.resource_type)
return res_p.get_status_body()
def _get_request_data(self, data):
req = self.RequestParser(data)
return req.get_request_data()
def _return_new_resource_by_response_data(self, resp, data, url):
resource_uri = url or resp.headers.get('Location')
if resource_uri:
resource_id = self.ResponseParser.get_resource_id_from_url(
url=resource_uri,
resource_type=self.resource_type,
)
else:
resource_id = None
return self.resource_class(
client=self.client,
manager=self.__class__(self.client),
url=resource_uri,
resource_id=resource_id,
info=data
)
def _get(self, resource_id='', url='', obj_class=None, **kwargs):
new = False
parent = None
if not url:
if self.managed_object is not None:
self.url = self.managed_object.url
parent = self.managed_object.parent
if resource_id:
self.url += '/' + resource_id
new = True
else:
raise URLNotSpecifiedError()
else:
self.url = url
new = True
resp, body = self.client.get(self.url, **kwargs)
data = self._get_data(body)[0]
if not new:
return resp, data
else:
if obj_class is None:
obj_class = self.resource_class
return obj_class(client=self.client,
manager=self.__class__(self.client),
url=self.url,
info=data,
parent=parent,
loaded=True)
# if url and obj_class is not none, list the sub collection
# of current resource.
def _list(self, url='', obj_class=None, body=None, **kwargs):
parent = None
if not url:
if self.managed_object is not None:
self.url = self.managed_object.url
parent = self.managed_object.parent
else:
raise URLNotSpecifiedError()
else:
self.url = url
if body:
_, body = self.client.post(self.url, body=body)
else:
_, body = self.client.get(self.url, **kwargs)
if obj_class is None:
obj_class = self.resource_class
data = self._get_data(body)
return [obj_class(client=self.client,
manager=self.__class__(self.client),
url=self.url,
parent=parent,
info=res) for res in data if res]
def _post(self, body, url=None):
if not url:
if self.managed_object is not None:
url = self.managed_object.url
else:
raise URLNotSpecifiedError()
resp, res_body = self.client.post(url,
body=self._get_request_data(body)
)
data = self._get_data(res_body, method=POST, response=resp)
return resp, data
def _posta(self, url='', body=None):
post_body = None
if not url:
if self.managed_object is not None:
self.url = self.managed_object.url
post_body = body or self.managed_object.representation
else:
raise URLNotSpecifiedError()
else:
self.url = url
post_body = body or self.managed_object.representation
post_body = self.remove_None_fields_from_dict(post_body)
resp, body = self.client.post(self.url,
body=self._get_request_data(post_body)
)
data = self._get_data(body, method=POSTA, response=resp)
if not isinstance(data, list):
raise Exception("The parsed posta response data should be a list.")
res_list = []
for s_data in data:
res_data, res_url = s_data
if self.ResponseParser.error_status_key in res_data:
res = res_data.get(self.ResponseParser.error_status_key)
else:
res = self._return_new_resource_by_response_data(
resp,
res_data.get(self.ResponseParser.resource_data_key),
res_url
)
res_list.append(res)
return resp, res_list
def _put(self, url='', body=None):
put_body = None
if not url:
if self.managed_object is not None:
self.url = self.managed_object.url
put_body = body or self.managed_object.representation
else:
raise URLNotSpecifiedError()
else:
self.url = url
put_body = body
resp, body = self.client.put(self.url,
body=self._get_request_data(put_body)
)
data = self._get_data(body, method='PUT', response=resp)
return resp, data
def _patch(self, url='', body=None):
patch_body = None
if not url:
if self.managed_object is not None:
self.url = self.managed_object.url
patch_body = body if body else \
self.managed_object._get_modified_info_dict()
else:
raise URLNotSpecifiedError()
else:
self.url = url
patch_body = body
resp, body = self.client.patch(self.url,
body=self._get_request_data(patch_body)
)
data = self._get_data(body, method='PATCH', response=resp)
return resp, data
def _delete(self, url=''):
if not url:
if self.managed_object is not None:
self.url = self.managed_object.url
else:
raise URLNotSpecifiedError()
else:
self.url = url
resp, body = self.client.delete(self.url)
data = self._get_data(body, method='DELETE', response=resp)
return resp, data
class DefaultManager(Manager):
"""
Default resource manager.
"""
resource_class = Resource
resource_type = 'default'
def get(self, resource_id='', url='', obj_class=None, **kwargs):
return self._get(resource_id=resource_id, url=url,
obj_class=obj_class, **kwargs)
def list(self, url='', obj_class=None, body=None, **kwargs):
return self._list(url=url, obj_class=obj_class, body=body, **kwargs)
def post(self, url='', body=None):
return self._post(url=url, body=body)
def posta(self, url='', body=None):
return self._posta(url=url, body=body)
def put(self, url='', body=None):
return self._put(url=url, body=body)
def patch(self, url='', body=None):
return self._patch(url=url, body=body)
def delete(self, url=''):
return self._delete(url=url)
```
#### File: ds8k/v1/client.py
```python
from logging import getLogger
from pyds8k import PYDS8K_DEFAULT_LOGGER
from pyds8k.httpclient import HTTPClient
from pyds8k.base import Resource, DefaultManager
from pyds8k.resources.ds8k.v1.common import types
from pyds8k.resources.ds8k.v1.systems import System, \
SystemManager
logger = getLogger(PYDS8K_DEFAULT_LOGGER)
DEFAULT_PORT = 8452
class Client(object):
"""
Top-level object to access all the DS8K resources.
:param service_address: Hostname/IP address of the REST server if it is
standalone or the hostname/IP address of the DS8K
system. Required.
:type service_address: string
:param user: Username for logining to the DS8K system. Required.
:type user: string
:param password: Password for logining to the DS8K system. Required.
:type password: string
:param port: Port number of the server.
:type port: int
:param hostname: Hostname/IP address of the DS8K system. It is required if
the server is standalone.
:type hostname: string
:param timeout: How long to wait for the server to send data before giving
up. In seconds.
Default is 25, and 0 means no limitation.
:type timeout: float
"""
def __init__(self, service_address, user, password,
port=DEFAULT_PORT,
hostname='',
service_type='ds8k',
service_version='v1',
timeout=None,
):
logger.info('================== logger is enabled ==================')
client = HTTPClient(service_address, user, password,
port=port,
hostname=hostname,
service_type=service_type,
service_version=service_version,
timeout=timeout,
)
self.client = client
self.resource = Resource(self.client, DefaultManager(self.client))
self.system = System(self.client, SystemManager(self.client))
def __getattr__(self, k):
try:
# if not self.system.is_loaded():
# self.system = self.system.get_system()
method = getattr(self.system, k)
if not callable(method):
raise AttributeError(k)
else:
return method
except Exception:
raise AttributeError(k)
def create_volume(self, name, cap, pool, tp='', lss=''):
"""
Create a fb volume.
name, cap(in GiB) and pool id is mandatory.
tp have three optional values: 'none', 'ese' and 'tse'.
default is 'none', will create a standard volume.
"""
return self.system.create_volume_fb(
name=name, cap=cap,
pool=pool, tp=tp,
captype=types.DS8K_CAPTYPE_GIB,
lss=lss,
)
def create_volumes(self, name_col, cap, pool, tp='', lss=''):
"""
Create fb volumes.
name_col, cap(in GiB) and pool id is mandatory.
name_col is a volume name list.
tp have three optional values: 'none', 'ese' and 'tse'.
default is 'none', will create standard volumes.
"""
return self.create_volumes_without_same_prefix(name_col, cap,
pool, tp, lss
)
def create_volumes_with_same_prefix(self, name, cap, pool,
quantity='', tp='', lss=''
):
"""
Create fb volumes with same name prefix.
name, cap(in GiB) pool id and quantity is mandatory.
name is the volume name prefix, the final volume names will
be something like 'prefix_1', 'prefix_2', ...
tp have three optional values: 'none', 'ese' and 'tse'.
default is 'none', will create standard volumes.
"""
return self.system.create_volumes_with_same_prefix(
cap=cap, pool=pool,
stgtype=types.DS8K_VOLUME_TYPE_FB,
name=name, quantity=quantity,
captype=types.DS8K_CAPTYPE_GIB, tp=tp,
lss=lss,
)
def create_volumes_without_same_prefix(self, name_col, cap, pool,
tp='', lss=''):
return self.system.create_volumes_without_same_prefix(
cap=cap, pool=pool,
stgtype=types.DS8K_VOLUME_TYPE_FB,
name_col=name_col,
captype=types.DS8K_CAPTYPE_GIB, tp=tp,
lss=lss,
)
```
#### File: pyds8k/pyds8k/exceptions.py
```python
from pyds8k.utils import get_subclasses, \
get_response_parser_class
from pyds8k import messages
class BaseRestError(Exception):
pass
class InvalidArgumentError(Exception):
def __init__(self, reason):
self.reason = reason
def __str__(self):
return messages.INVALID_ARGUMENT.format(
self.reason
)
class OperationNotAllowed(Exception):
"""
The operation performed on the resource is not allowed.
"""
def __init__(self, operation_name, resource_name=''):
self.operation_name = operation_name
self.resource_name = resource_name
def __str__(self):
return messages.OPERATION_NOT_ALLOWED.format(
self.operation_name,
self.resource_name
)
class URLNotSpecifiedError(Exception):
"""
The URL is not specified.
"""
def __str__(self):
return messages.URL_NOT_SPECIFIED
class URLMissingError(Exception):
"""
The URL is missing.
"""
def __str__(self):
return messages.URL_MISSING
class IDMissingError(Exception):
"""
The id field is missing or None.
"""
def __str__(self):
return messages.ID_MISSING
class ResponseBodyMissingError(Exception):
"""
The response body is missing.
"""
def __str__(self):
return messages.RESPONSE_BODY_MISSING
class URLParseError(Exception):
"""
Can not get the URL
"""
def __str__(self):
return messages.CAN_NOT_GET_URL
class RepresentationParseError(Exception):
"""
Can not get the representation
"""
def __str__(self):
return messages.CAN_NOT_GET_REPRESENTATION
class FieldReadOnly(Exception):
"""
Field is read only.
"""
def __init__(self, field_name):
self.field_name = field_name
def __str__(self):
return messages.FIELD_READONLY.format(self.field_name)
class ConnectionError(Exception):
"""
Could not open a connection to the API service.
"""
pass
class Timeout(Exception):
"""
The request timed out.
"""
def __init__(self, url):
self.url = url
def __str__(self):
return messages.REQUEST_TIMED_OUT.format(self.url)
class ClientException(Exception):
"""
The base exception class for all HTTP client or server errors.
"""
def __init__(self, code, message=None, detail='', origin_data=None):
self.code = code
self.message = message
self.detail = detail
self.error_data = origin_data
if self.message and self.detail:
self.details = '[{}] {}'.format(self.message, self.detail)
elif self.message or self.detail:
self.details = self.message or self.detail
else:
self.details = ''
def __str__(self):
return "HTTP {0} {1}. {2}".format(
self.code,
self.reason_phrase,
self.details
)
class ClientError(ClientException):
"""
HTTP 4xx - Client Error
"""
status_code = '4xx'
reason_phrase = "Client Error"
class ServerError(ClientException):
"""
HTTP 5xx - Server Error
"""
status_code = '5xx'
reason_phrase = "Server Error"
class BadRequest(ClientError):
"""
HTTP 400 - Bad request: you sent some malformed data.
"""
status_code = '400'
reason_phrase = "Bad Request"
class Unauthorized(ClientError):
"""
HTTP 401 - Unauthorized: bad credentials.
"""
status_code = '401'
reason_phrase = "Unauthorized"
class Forbidden(ClientError):
"""
HTTP 403 - Forbidden: your credentials don't give you access to this
resource.
"""
status_code = '403'
reason_phrase = "Forbidden"
class NotFound(ClientError):
"""
HTTP 404 - Not found
"""
status_code = '404'
reason_phrase = "Not Found"
class MethodNotAllowed(ClientError):
"""
HTTP 405 - Method Not Allowed
"""
status_code = '405'
reason_phrase = "Method Not Allowed"
class Conflict(ClientError):
"""
HTTP 409 - Conflict
"""
status_code = '409'
reason_phrase = "Conflict"
class UnsupportedMediaType(ClientError):
"""
HTTP 415 - Unsupported Media Type
"""
status_code = '415'
reason_phrase = "Unsupported Media Type"
class InternalServerError(ServerError):
"""
HTTP 500 - Internal Server Error: The server encountered an unexpected
condition which prevented it from fulfilling the request.
"""
status_code = '500'
reason_phrase = "Internal Server Error"
class ServiceUnavailable(ServerError):
"""
HTTP 503 - Service Unavailable
"""
status_code = '503'
reason_phrase = "Service Unavailable"
class GatewayTimeout(ServerError):
"""
HTTP 504 - Gateway Timeout
"""
status_code = '504'
reason_phrase = "Gateway Timeout"
_error_dict = dict((c.status_code, c) for c in get_subclasses(ClientException))
def raise_error(response, body, service_type=''):
"""
Return an instance of an ClientException or subclass
based on an requests response.
"""
ResponseParser = get_response_parser_class(service_type)
cls = _error_dict.get(str(response.status_code), ClientException)
if body:
res_p = ResponseParser(body)
message = res_p.get_error_code()
details = res_p.get_error_msg()
data = res_p.get_status_body()
return cls(code=response.status_code,
message=message,
detail=details,
origin_data=data
)
else:
return cls(code=response.status_code,
message=response.reason,
origin_data=body
)
```
#### File: v1/common/base.py
```python
from logging import getLogger
from pyds8k.messages import INVALID_TYPE
from pyds8k import PYDS8K_DEFAULT_LOGGER
from pyds8k.base import Resource, Manager
from .mixins import RootResourceMixin
from pyds8k.exceptions import OperationNotAllowed, \
URLNotSpecifiedError, \
FieldReadOnly
from ....utils import get_resource_class_by_name
logger = getLogger(PYDS8K_DEFAULT_LOGGER)
class Base(RootResourceMixin, Resource):
# If there is a field named "id" in response data,
# the id_field can't be set to value other than "id"
id_field = 'id'
url_field = 'link'
base_url = '/api/v1'
create_method = 'posta'
# Required only in writable resources, fileds are from _template
# Resource id is exclude.
readonly_fileds = ()
# Not like related_resource, related_resources_list is not set during
# loading, its keys use lazy-loading to get details.
related_resources_collection = ()
def _add_details(self, info, force=False):
super(Base, self)._add_details(info, force=force)
self._start_updating()
self._set_related_resources_collection()
self._stop_updating()
def _set_related_resources_collection(self):
for key in self.related_resources_collection:
res = self.representation.get(key)
# If the related resources(should be a list) are not in info,
# will empty them and wait for lazy-loading.
if not isinstance(res, list):
self.representation[key] = ''
try:
delattr(self, key)
except AttributeError:
pass
# If the related resources(should be a list) are in info, set it.
else:
re_class, re_manager = self._get_resource_class_by_name(key)
res_list = [re_class(self.client,
manager=re_manager(self.client),
info=r)
for r in res]
setattr(self, key, res_list)
def __setattr__(self, key, value):
if key in self.readonly_fileds and not self.is_updating():
raise FieldReadOnly(key)
super(Base, self).__setattr__(key, value)
try:
if key in self.related_resources_collection:
ids = [getattr(item, item.id_field) for item in value]
self.representation[key] = ids
if not self.is_updating():
self._set_modified_info_dict(key, ids)
except AttributeError:
pass
def __getattr__(self, key):
if key in self.related_resources_collection:
try:
return getattr(self, 'get_{}'.format(key))()
except Exception as e:
logger.debug(
"Can not get {} from {}, reason is: {}".format(
key, self, type(e)
)
)
raise AttributeError(key)
return super(Base, self).__getattr__(key)
def __repr__(self):
return "<{0}: {1}>".format(self.__class__.__name__, self._get_id())
def _get_resource_class_by_name(self, resource_type):
prefix = '{}.{}'.format(self.client.service_type,
self.client.service_version
)
return get_resource_class_by_name(resource_type, prefix)
def _verify_type(self, new_type, valid_type_list):
if new_type and not (new_type in valid_type_list):
raise ValueError(
INVALID_TYPE.format(', '.join(valid_type_list))
)
class SingletonBase(Base):
# A singleton resource has no id field by default
id_field = '*'
class BaseManager(Manager):
resource_class = Base
response_key = 'data'
resource_type = ''
def _post(self, url='', body=None):
post_body = None
if not body:
if self.managed_object is not None:
post_body = self.managed_object._get_modified_info_dict()
# repre = self.managed_object.representation
# post_body = {key: value
# for key, value in repre.iteritems()
# if key not in self.managed_object.readonly_fileds
# }
else:
raise URLNotSpecifiedError()
else:
post_body = body
return super(BaseManager, self)._post(url=url, body=post_body)
# DS8K will use PUT in PATCH way, and don't use PATCH.
def _put(self, url='', body=None):
put_body = None
if not url:
if self.managed_object is not None:
self.url = self.managed_object.url
# use modified info here
put_body = body if body else \
self.managed_object._get_modified_info_dict()
else:
raise URLNotSpecifiedError()
else:
self.url = url
put_body = body
resp, body = self.client.put(self.url,
body=self._get_request_data(put_body)
)
data = self._get_data(body, method='PUT', response=resp)
return resp, data
def _patch(self, url='', body=None):
return self._put(url=url, body=body)
def get(self, resource_id='', url='', obj_class=None, **kwargs):
raise OperationNotAllowed('get', self.resource_class.__name__)
def list(self, url='', obj_class=None, body=None, **kwargs):
raise OperationNotAllowed('list', self.resource_class.__name__)
def post(self, url='', body=None):
raise OperationNotAllowed('post', self.resource_class.__name__)
def posta(self, url='', body=None):
raise OperationNotAllowed('posta', self.resource_class.__name__)
def put(self, url='', body=None):
raise OperationNotAllowed('put', self.resource_class.__name__)
def patch(self, url='', body=None):
raise OperationNotAllowed('patch', self.resource_class.__name__)
def delete(self, url=''):
raise OperationNotAllowed('delete', self.resource_class.__name__)
class ReadOnlyManager(BaseManager):
def get(self, resource_id='', url='', obj_class=None, **kwargs):
return self._get(resource_id=resource_id,
url=url, obj_class=obj_class, **kwargs)
def list(self, url='', obj_class=None, body=None, **kwargs):
return self._list(url=url, obj_class=obj_class, body=body, **kwargs)
class SingletonBaseManager(BaseManager):
def get(self, url='', obj_class=None, **kwargs):
return self._get(url=url, obj_class=obj_class, **kwargs)
def list(self, url='', obj_class=None, body=None, **kwargs):
return self._list(url=url, obj_class=obj_class, body=body, **kwargs)
```
#### File: pyds8k/pyds8k/size_converter.py
```python
GiB = 2**30 # Gibibyte = GiB = 2^30 B = 1,073,741,824 bytes
MiB = 2**20 # Mebibyte = MiB = 2^20 B = 1,048,576 bytes
KiB = 2**10 # Kibibyte = kiB = 2^10 B = 1,024 bytes
GB = 10**9 # Gigabyte = GB = 10^9 B = 1,000,000,000 bytes
MB = 10**6 # Megabyte = MB = 10^6 B = 1,000,000 bytes
KB = 10**3 # Kilobyte = kB = 10^3 B = 1,000 bytes
def validate_number(number):
if not isinstance(number, (int, float)):
raise ValueError("Expected types are (int, long, float)")
# =============================================================================
# Methods converting to bytes.
# =============================================================================
def convert_size_gib_to_bytes(size_in_gib):
""":rtype: int / long"""
validate_number(size_in_gib)
return size_in_gib * GiB
def convert_size_mib_to_bytes(size_in_mib):
""":rtype: int / long"""
validate_number(size_in_mib)
return size_in_mib * MiB
def convert_size_kib_to_bytes(size_in_kib):
""":rtype: int / long"""
validate_number(size_in_kib)
return size_in_kib * KiB
def convert_size_gb_to_bytes(size_in_gb):
""":rtype: int / long"""
validate_number(size_in_gb)
return size_in_gb * GB
def convert_size_mb_to_bytes(size_in_mb):
""":rtype: int / long"""
validate_number(size_in_mb)
return size_in_mb * MB
def convert_size_kb_to_bytes(size_in_kb):
""":rtype: int / long"""
validate_number(size_in_kb)
return size_in_kb * KB
# =============================================================================
# Methods converting from bytes.
# =============================================================================
def convert_size_bytes_to_gib(size_in_bytes):
""":rtype: float"""
return float(size_in_bytes) / GiB
def convert_size_bytes_to_mib(size_in_bytes):
""":rtype: float"""
return float(size_in_bytes) / MiB
def convert_size_bytes_to_kib(size_in_bytes):
""":rtype: float"""
return float(size_in_bytes) / KiB
def convert_size_bytes_to_gb(size_in_bytes):
""":rtype: float"""
return float(size_in_bytes) / GB
def convert_size_bytes_to_mb(size_in_bytes):
""":rtype: float"""
return float(size_in_bytes) / MB
def convert_size_bytes_to_kb(size_in_bytes):
""":rtype: float"""
return float(size_in_bytes) / KB
```
#### File: test_client/test_ds8k/test_client.py
```python
from pyds8k.resources.ds8k.v1.common.types import DS8K_SYSTEM, \
DS8K_VOLUME
from ...base import TestCaseWithConnect
from pyds8k.resources.ds8k.v1.common.base import Base
from ...data import get_response_list_json_by_type, \
get_response_list_data_by_type
from pyds8k.client.ds8k.v1.client import Client
from pyds8k.resources.ds8k.v1.volumes import Volume
import httpretty
system_list_response_json = get_response_list_json_by_type(DS8K_SYSTEM)
volume_list_response_json = get_response_list_json_by_type(DS8K_VOLUME)
volume_list_response = get_response_list_data_by_type(DS8K_VOLUME)
class TestClient(TestCaseWithConnect):
def setUp(self):
super(TestClient, self).setUp()
self.base_url = Base.base_url
self.rest_client = Client('localhost', 'admin', 'admin', '8088')
@httpretty.activate
def test_get_array_method(self):
domain = self.client.domain
vol_url = '/volumes'
sys_url = '/systems'
httpretty.register_uri(httpretty.GET,
domain + self.base_url + vol_url,
body=volume_list_response_json,
content_type='application/json')
httpretty.register_uri(httpretty.GET,
domain + self.base_url + sys_url,
body=system_list_response_json,
content_type='application/json')
vol_list = self.rest_client.get_volumes()
self.assertIsInstance(vol_list, list)
self.assertIsInstance(vol_list[0], Volume)
self.assertEqual(
len(vol_list),
len(volume_list_response['data']['volumes'])
)
with self.assertRaises(AttributeError):
# 'base_url' is an attr from System, not a method
self.rest_client.base_url
```
#### File: pyds8k/test/test_httpclient.py
```python
from pyds8k.exceptions import URLParseError
from . import base
import httpretty
import json
import time
from nose.tools import nottest
from pyds8k.httpclient import HTTPClient
from pyds8k.base import Resource, DefaultManager
from .data import get_response_list_json_by_type, \
get_response_list_data_by_type, \
get_response_data_by_type, \
get_response_json_by_type
info = {'id': 'v1', 'name': 'vol1'}
custom_method_get = {'data': 'custom_method_get'}
custom_method_get_json = json.dumps(custom_method_get)
DEFAULT = 'default'
default_a_response = get_response_data_by_type(DEFAULT)
default_a_response_json = get_response_json_by_type(DEFAULT)
default_list_response = get_response_list_data_by_type(DEFAULT)
default_list_response_json = get_response_list_json_by_type(DEFAULT)
class TestHTTPClient(base.TestCaseWithConnect):
def setUp(self):
super(TestHTTPClient, self).setUp()
self.base_url = ''
def test_parse_url(self):
url1 = self.domain + '/new'
url2 = '/new'
_, url3 = url1.split('//')
url4 = 'http://new_domain' + '/new'
self.assertEqual('/new', self.client._parse_url(url1))
self.assertEqual('/new', self.client._parse_url(url2))
self.assertEqual('/new', self.client._parse_url(url3))
with self.assertRaises(URLParseError):
self.client._parse_url(url4)
new_client = HTTPClient('172.16.58.3', 'admin', 'admin',
service_type='ds8k',
secure=True)
with self.assertRaises(URLParseError):
new_client._parse_url(url3)
@httpretty.activate
def test_redirect(self):
url = '/default/old'
new_url = '/default/a'
httpretty.register_uri(httpretty.GET,
self.domain + self.base_url + url,
content_type='application/json',
adding_headers={'Location': new_url},
status=301)
httpretty.register_uri(httpretty.GET,
self.domain + self.base_url + new_url,
body=default_a_response_json,
content_type='application/json',
status=200)
de = self.resource.one(DEFAULT, 'old').get(allow_redirects=False)
self.assertEqual(new_url, de.url)
# Not work in this way.
@nottest
@httpretty.activate
def test_timeout(self):
url = '/default/a'
new_client = HTTPClient('localhost', 'admin', 'admin',
service_type='ds8k',
timeout=0.01)
def _verify_request(request, uri, headers):
time.sleep(10)
return (200, headers, default_a_response_json)
httpretty.register_uri(httpretty.GET,
new_client.domain + self.base_url + url,
body=_verify_request,
content_type='application/json',
)
resource = Resource(new_client, DefaultManager(new_client))
resource.one(DEFAULT, 'a').get()
```
#### File: test_resources/test_ds8k/test_host_port.py
```python
import httpretty
import json
# import warnings
from pyds8k.exceptions import InternalServerError, FieldReadOnly
from pyds8k.messages import DEFAULT_SUCCESS_BODY_DICT
from pyds8k.resources.ds8k.v1.common.types import DS8K_HOST_PORT
from pyds8k.resources.ds8k.v1.host_ports import HostPort, \
HostPortManager
from pyds8k.resources.ds8k.v1.ioports import IOPort
from pyds8k.resources.ds8k.v1.hosts import Host
from .base import TestDS8KWithConnect
from ...data import get_response_json_by_type, get_response_data_by_type
from ...data import action_response, action_response_json, \
action_response_failed, action_response_failed_json, \
create_host_port_response_json
from pyds8k.dataParser.ds8k import RequestParser
response_a = get_response_data_by_type(DS8K_HOST_PORT)
response_a_json = get_response_json_by_type(DS8K_HOST_PORT)
class TestHostPort(TestDS8KWithConnect):
def setUp(self):
super(TestHostPort, self).setUp()
self.host_port = HostPort(self.client, HostPortManager(self.client))
self.wwpn = self._get_resource_id_from_resopnse(DS8K_HOST_PORT,
response_a,
HostPort.id_field
)
@httpretty.activate
def test_delete_host_port(self):
url = '/host_ports/{}'.format(self.wwpn)
httpretty.register_uri(
httpretty.GET,
self.domain + self.base_url + url,
body=response_a_json,
content_type='application/json',
status=200,
)
httpretty.register_uri(
httpretty.DELETE,
self.domain + self.base_url + url,
body=action_response_json,
content_type='application/json',
status=204,
)
# Way 1
_ = self.system.delete_host_port(self.wwpn)
self.assertEqual(httpretty.DELETE, httpretty.last_request().method)
# self.assertEqual(resp1, action_response['server'])
# Way 2
host_port = self.system.get_host_port(self.wwpn)
self.assertIsInstance(host_port, HostPort)
resp2, _ = host_port.delete()
self.assertEqual(resp2.status_code, 204)
self.assertEqual(httpretty.DELETE, httpretty.last_request().method)
# self.assertEqual(resp2.text, action_response['server'])
# self.assertEqual(data2, action_response['server'])
# warnings.warn("TestHostPort.test_delete_host_port: do not know why \
# requests can not get DELETE response's body. Maybe httpretty can \
# not set DELETE response's body correctly")
@httpretty.activate
def test_delete_host_port_without_resp_body(self):
url = '/host_ports/{}'.format(self.wwpn)
httpretty.register_uri(httpretty.DELETE,
self.domain + self.base_url + url,
content_type='application/json',
status=204,
)
resp1 = self.system.delete_host_port(self.wwpn)
self.assertEqual(httpretty.DELETE, httpretty.last_request().method)
self.assertEqual(resp1, DEFAULT_SUCCESS_BODY_DICT)
@httpretty.activate
def test_delete_host_port_failed(self):
url = '/host_ports/{}'.format(self.wwpn)
httpretty.register_uri(
httpretty.DELETE,
self.domain + self.base_url + url,
body=action_response_failed_json,
content_type='application/json',
status=500,
)
with self.assertRaises(InternalServerError) as cm:
self.system.delete_host_port(self.wwpn)
self.assertEqual(action_response_failed['server'],
cm.exception.error_data
)
self.assertEqual(httpretty.DELETE, httpretty.last_request().method)
@httpretty.activate
def test_update_host_port(self):
url = '/host_ports/{}'.format(self.wwpn)
new_host_name = 'new_host'
def _verify_request(request, uri, headers):
self.assertEqual(uri, self.domain + self.base_url + url)
resq = RequestParser({'host': new_host_name})
self.assertEqual(json.loads(request.body), resq.get_request_data())
return (200, headers, action_response_json)
httpretty.register_uri(
httpretty.GET,
self.domain + self.base_url + url,
body=response_a_json,
content_type='application/json',
status=200,
)
httpretty.register_uri(httpretty.PUT,
self.domain + self.base_url + url,
body=_verify_request,
content_type='application/json',
)
# Way 1
resp1 = self.system.update_host_port_change_host(self.wwpn,
new_host_name
)
self.assertEqual(httpretty.PUT, httpretty.last_request().method)
self.assertEqual(resp1, action_response['server'])
host_port = self.system.get_host_port(self.wwpn)
# Way 2
host_port.host = new_host_name
resp2, data2 = host_port.update()
self.assertEqual(httpretty.PUT, httpretty.last_request().method)
self.assertEqual(data2, action_response['server'])
self.assertEqual(resp2.status_code, 200)
# Way 3 in DS8K, save works the same as update
host_port.host = new_host_name
resp3, data3 = host_port.save()
self.assertEqual(httpretty.PUT, httpretty.last_request().method)
self.assertEqual(data3, action_response['server'])
self.assertEqual(resp3.status_code, 200)
# Way 4
host_port.host = new_host_name
resp4, data4 = host_port.patch()
self.assertEqual(httpretty.PUT, httpretty.last_request().method)
self.assertEqual(data4, action_response['server'])
self.assertEqual(resp4.status_code, 200)
# Way 5 in DS8K, put works the same as patch
host_port.host = new_host_name
resp5, data5 = host_port.put()
self.assertEqual(httpretty.PUT, httpretty.last_request().method)
self.assertEqual(data5, action_response['server'])
self.assertEqual(resp5.status_code, 200)
@httpretty.activate
def test_update_host_port_failed(self):
url = '/host_ports/{}'.format(self.wwpn)
new_host_name = 'new_host'
httpretty.register_uri(httpretty.PUT,
self.domain + self.base_url + url,
body=action_response_failed_json,
content_type='application/json',
status=500
)
with self.assertRaises(InternalServerError) as cm:
self.system.update_host_port_change_host(self.wwpn, new_host_name)
self.assertEqual(action_response_failed['server'],
cm.exception.error_data
)
def test_set_readonly_field(self):
with self.assertRaises(FieldReadOnly):
self.host_port.state = 'new_state'
with self.assertRaises(FieldReadOnly):
self.host_port.wwpn = 'new_wwpn'
def test_update_host_field(self):
host_info = get_response_data_by_type(
DS8K_HOST_PORT
)['data'][DS8K_HOST_PORT][0]
host_name = host_info['host']['name']
self.host_port._add_details(host_info)
self.assertEqual(
self.host_port.host,
host_name
)
self.assertEqual(
self.host_port.representation['host'],
host_name
)
self.assertIsInstance(self.host_port._host, Host)
self.assertEqual(
self.host_port._host.id,
host_name
)
self.host_port.host = 'new_host'
self.assertEqual(
self.host_port.host,
'new_host'
)
self.assertEqual(
self.host_port.representation['host'],
'new_host'
)
@httpretty.activate
def test_create_host_port(self):
url = '/host_ports'
host_name = 'host1'
def _verify_request(request, uri, headers):
self.assertEqual(uri, self.domain + self.base_url + url)
req = RequestParser({'wwpn': self.wwpn, 'host': host_name})
self.assertDictContainsSubset(
req.get_request_data().get('request').get('params'),
json.loads(request.body).get('request').get('params'),
)
return (200, headers, create_host_port_response_json)
httpretty.register_uri(httpretty.POST,
self.domain + self.base_url + url,
body=_verify_request,
content_type='application/json',
)
# Way 1
resp1 = self.system.create_host_port(self.wwpn, host_name)
self.assertEqual(httpretty.POST, httpretty.last_request().method)
self.assertIsInstance(resp1[0], HostPort)
# Way 2
host_port = self.system.all(DS8K_HOST_PORT, rebuild_url=True)
new_host_port2 = host_port.create(wwpn=self.wwpn, host=host_name)
resp2, data2 = new_host_port2.posta()
self.assertEqual(httpretty.POST, httpretty.last_request().method)
self.assertIsInstance(data2[0], HostPort)
self.assertEqual(resp2.status_code, 200)
# Way 3
host_port = self.system.all(DS8K_HOST_PORT, rebuild_url=True)
new_host_port3 = host_port.create(wwpn=self.wwpn, host=host_name)
resp3, data3 = new_host_port3.save()
self.assertEqual(httpretty.POST, httpretty.last_request().method)
self.assertIsInstance(data3[0], HostPort)
self.assertEqual(resp3.status_code, 200)
# Way 4
# Don't init a resource instance by yourself when create new.
# use .create() instead.
@httpretty.activate
def test_create_host_port_failed(self):
url = '/host_ports'
host_name = 'host1'
httpretty.register_uri(httpretty.POST,
self.domain + self.base_url + url,
body=action_response_failed_json,
content_type='application/json',
status=500
)
with self.assertRaises(InternalServerError) as cm:
self.system.create_host_port(self.wwpn, host_name)
self.assertEqual(action_response_failed['server'],
cm.exception.error_data
)
def test_related_resource_field(self):
self._test_related_resource_field(DS8K_HOST_PORT)
def test_occupied_ioports(self):
OCCUPIED_IOPORTS = 'login_ports'
info = get_response_data_by_type(
DS8K_HOST_PORT
)['data'][DS8K_HOST_PORT][0]
host_port = HostPort(self.client,
HostPortManager(self.client),
info=info
)
ioport_ids = [port.get(IOPort.id_field)
for port in info[OCCUPIED_IOPORTS]
]
self.assertCountEqual(ioport_ids,
host_port.representation.get(OCCUPIED_IOPORTS)
)
self.assertIsInstance(getattr(host_port, OCCUPIED_IOPORTS)[0], IOPort)
self.assertIn(getattr(host_port, OCCUPIED_IOPORTS)[0].id, ioport_ids)
```
#### File: test_resources/test_ds8k/test_pprc.py
```python
from pyds8k.resources.ds8k.v1.common.types import DS8K_PPRC
from ...data import get_response_data_by_type
from .base import TestDS8KWithConnect
from pyds8k.resources.ds8k.v1.volumes import Volume
from pyds8k.resources.ds8k.v1.pprc import PPRC
from pyds8k.resources.ds8k.v1.systems import System
class TestPPRC(TestDS8KWithConnect):
def test_related_resource_field(self):
pprc_info = get_response_data_by_type(
DS8K_PPRC
)['data'][DS8K_PPRC][0]
sourcevolume_id = pprc_info['sourcevolume'][Volume.id_field]
targetvolume_id = pprc_info['targetvolume'][Volume.id_field]
targetsystem_id = pprc_info['targetsystem'][System.id_field]
pprc = PPRC(self.client, info=pprc_info)
self.assertEqual(pprc.sourcevolume, sourcevolume_id)
self.assertEqual(pprc.representation['sourcevolume'], sourcevolume_id)
self.assertIsInstance(pprc._sourcevolume, Volume)
self.assertEqual(pprc._sourcevolume.id, sourcevolume_id)
self.assertEqual(pprc.targetvolume, targetvolume_id)
self.assertEqual(pprc.representation['targetvolume'], targetvolume_id)
self.assertIsInstance(pprc._targetvolume, Volume)
self.assertEqual(pprc._targetvolume.id, targetvolume_id)
self.assertEqual(pprc.targetsystem, targetsystem_id)
self.assertEqual(pprc.representation['targetsystem'], targetsystem_id)
self.assertIsInstance(pprc._targetsystem, System)
self.assertEqual(pprc._targetsystem.id, targetsystem_id)
```
#### File: pyds8k/test/test_utils.py
```python
from . import base
from nose.tools import nottest
from pyds8k import utils
class TestUtils(base.TestCaseWithoutConnect):
def test_get_subclasses(self):
class A(object):
pass
class B(A):
pass
class C(A):
pass
class D(B):
pass
class E(C):
pass
self.assertIn(B, utils.get_subclasses(A))
self.assertIn(C, utils.get_subclasses(A))
self.assertIn(D, utils.get_subclasses(A))
self.assertIn(E, utils.get_subclasses(A))
def test_is_absolute_url(self):
url1 = 'http://www.example.com/test'
url2 = 'https://www.example.com/test'
url3 = 'ftps://www.example.com/test'
url4 = 'ssh://www.example.com/test'
url5 = 'www.example.com/test'
url6 = 'example.com/test'
url7 = 'localhost/test'
url8 = '/test'
for url in (url1, url2, url3, url4, url5, url6, url7):
self.assertTrue(utils.is_absolute_url(url))
self.assertFalse(utils.is_absolute_url(url8))
def test_get_request_parser_class(self):
from pyds8k.dataParser.ds8k import RequestParser
self.assertEqual(RequestParser, utils.get_request_parser_class('ds8k'))
def test_get_response_parser_class(self):
from pyds8k.dataParser.ds8k import ResponseParser
self.assertEqual(ResponseParser,
utils.get_response_parser_class('ds8k'))
# def test_get_default_service_type(self):
# self.assertEqual('ds8k', utils.get_default_service_type())
@nottest
def test_get_config_settings(self):
settings_dict = utils.get_config_settings()
self.assertEqual(5, len(list(settings_dict.keys())))
self.assertIsNotNone(settings_dict.get('debug'))
self.assertIsNotNone(settings_dict.get('log_path'))
self.assertIsNotNone(settings_dict.get('default_service_type'))
self.assertIsNotNone(settings_dict.get('runtime_service_type'))
@nottest
def test_get_config_all_items(self):
config_dict = utils.get_config_all_items()
self.assertEqual(5, len(list(config_dict.keys())))
self.assertIsNotNone(config_dict.get('debug'))
self.assertIsNotNone(config_dict.get('log_path'))
self.assertIsNotNone(config_dict.get('default_service_type'))
self.assertIsNotNone(config_dict.get('runtime_service_type'))
@nottest
def test_get_config_all(self):
config_dict = utils.get_config_all()
self.assertEqual(1, len(list(config_dict.keys())))
settings_dict = config_dict.get('settings')
self.assertIsNotNone(settings_dict)
self.assertEqual(5, len(list(settings_dict.keys())))
self.assertIsNotNone(settings_dict.get('debug'))
self.assertIsNotNone(settings_dict.get('log_path'))
self.assertIsNotNone(settings_dict.get('default_service_type'))
self.assertIsNotNone(settings_dict.get('runtime_service_type'))
'''
class TestSetConfig(unittest.TestCase):
RUNTIME_SERVICE_TYPE = ''
def setUp(self):
self.RUNTIME_SERVICE_TYPE = utils.get_runtime_service_type()
def tearDown(self):
utils.set_runtime_service_type(self.RUNTIME_SERVICE_TYPE)
def test_get_runtime_service_type(self):
self.assertEqual(
self.RUNTIME_SERVICE_TYPE,
utils.get_runtime_service_type()
)
def test_set_runtime_service_type(self):
utils.set_runtime_service_type('test')
self.assertEqual('test', utils.get_runtime_service_type())
def test_get_service_type(self):
if utils.get_runtime_service_type():
self.assertEqual(
self.RUNTIME_SERVICE_TYPE,
utils.get_service_type()
)
else:
self.assertEqual(
utils.get_default_service_type(),
utils.get_service_type()
)
utils.set_runtime_service_type('test')
self.assertEqual('test', utils.get_service_type())
'''
``` |
{
"source": "2715816772/fzutils",
"score": 3
} |
#### File: lib/fzutils/safe_utils.py
```python
"""
安全相关
"""
__all__ = [
'encrypt', # 加密算法
'decrypt', # 解密算法
]
def encrypt(key, tmp_str):
'''
加密算法
:param key: 配合加密的key
:param tmp_str: 待加密的str
:return:
'''
b = bytearray(str(tmp_str).encode("gbk"))
n = len(b) # 求出 b 的字节数
c = bytearray(n*2)
j = 0
for i in range(0, n):
b1 = b[i]
b2 = b1 ^ key # b1 = b2^ key
c1 = b2 % 16
c2 = b2 // 16 # b2 = c2*16 + c1
c1 = c1 + 65
c2 = c2 + 65 # c1,c2都是0~15之间的数,加上65就变成了A-P 的字符的编码
c[j] = c1
c[j+1] = c2
j = j+2
return c.decode("gbk")
def decrypt(key, tmp_str):
'''
解密算法
:param key: 配合解密的key
:param tmp_str: 待解密密的str
:return: '' 解码失败 | 'xxx' 成功
'''
c = bytearray(str(tmp_str).encode("gbk"))
n = len(c) # 计算 b 的字节数
if n % 2 != 0 :
return ''
n = n // 2
b = bytearray(n)
j = 0
for i in range(0, n):
c1 = c[j]
c2 = c[j+1]
j = j+2
c1 = c1 - 65
c2 = c2 - 65
b2 = c2*16 + c1
b1 = b2^ key
b[i]= b1
try:
return b.decode("gbk")
except:
return ''
```
#### File: 2715816772/fzutils/setup.py
```python
from __future__ import print_function
from setuptools import (
setup,
find_packages,
)
import sys
import os
import codecs
from requires import install_requires
"""
发布新包步骤:
1. 现在从setup.py位于的同一目录运行此命令
$ python3 setup.py sdist bdist_wheel
2. upload
$ twine upload dist/* --skip-existing
3. 本地更新(发布完后过会才能更新Release)[注意: pycharm可能要单独更新]
$ pip3 install fzutils -U
4. 服务器上安装install fzutils解决方案(更新最后加个'-U')
$ pip3 install -i http://pypi.douban.com/simple/ fzutils --trusted-host pypi.douban.com
报错及其解决方案:
1. error: invalid command 'bdist_wheel'
$ pip3 install pip setuptools -U && pip3 install wheel
"""
def read(f_name):
"""
用来读取目录下的长描述
我们一般是将README文件中的内容读取出来作为长描述,这个会在PyPI中你这个包的页面上展现出来,
你也可以不用这个方法,自己手动写内容即可,
PyPI上支持.rst格式的文件。暂不支持.md格式的文件,<BR>.rst文件PyPI会自动把它转为HTML形式显示在你包的信息页面上。
"""
return codecs.open(os.path.join(os.path.dirname(__file__), f_name)).read()
long_description = read('README.md')
classifiers = [
'Programming Language :: Python :: 3 :: Only',
'License :: OSI Approved :: MIT License',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
]
# 可被导入的包(写最外层的就可以)
py_modules = [
'fzutils',
]
setup(
name="fzutils",
version="0.0.1.1.2",
author="super_fazai",
author_email="<EMAIL>",
description="A Python utils for spider",
py_modules=py_modules,
long_description=long_description,
long_description_content_type="text/markdown",
license="MIT",
url="https://www.github.com/superonesfazai",
packages=find_packages(),
platforms=['linux/Windows/Mac'],
classifiers=classifiers,
install_requires=install_requires,
include_package_data=True,
python_requires='>=3',
zip_safe=True,
)
``` |
{
"source": "2720851545/GitHubPoster",
"score": 3
} |
#### File: GitHubPoster/loader/github_loader.py
```python
import requests
from html_parser import GitHubParser
from .base_loader import BaseLoader
from .config import GITHUB_CONTRIBUCTIONS_URL
class GitHubLoader(BaseLoader):
def __init__(self, from_year, to_year, **kwargs) -> None:
super().__init__()
assert to_year >= from_year
self.from_year = from_year
self.to_year = to_year
self.user_name = kwargs.get("github_user_name", "")
self._make_years_list()
def make_track_dict(self):
for y in self.year_list:
p = GitHubParser()
try:
r = requests.get(
GITHUB_CONTRIBUCTIONS_URL.format(
user_name=self.user_name,
start_day=f"{y}-01-01",
end_day=f"{y}-12-31",
)
)
self.number_by_date_dict.update(p.make_contribution_dict(r.text))
except Exception as e:
raise Exception(f"Can not get GitHub contribuctions error: {str(e)}")
for _, v in self.number_by_date_dict.items():
self.number_list.append(v)
def get_all_track_data(self):
self.make_track_dict()
self.make_special_number()
return self.number_by_date_dict, self.year_list
``` |
{
"source": "2742195759/Paddle",
"score": 2
} |
#### File: paddle/cost_model/cost_model.py
```python
import paddle
import paddle.static as static
import numpy as np
from paddle.fluid import core
class CostModel():
def __init__(self):
pass
def build_program(self):
paddle.enable_static()
main_program = static.Program()
startup_program = static.Program()
with static.program_guard(
main_program=main_program, startup_program=startup_program):
data = paddle.static.data(
name='X', shape=[None, 1], dtype='float32')
hidden = paddle.static.nn.fc(data, 10)
loss = paddle.mean(hidden)
paddle.optimizer.SGD(learning_rate=0.01).minimize(loss)
print("main program is: {}".format(main_program))
#print("start up program is: {}".format(startup_program))
return startup_program, main_program
def profile_measure(self,
startup_program,
main_program,
device='gpu',
fetch_cost_list=['time', 'memory']):
place = paddle.set_device('gpu')
x = np.random.random(size=(10, 1)).astype('float32')
exe = paddle.static.Executor(place)
exe.run(startup_program)
paddle.fluid.profiler.start_profiler("All")
exe.run(main_program, feed={"X": x}, fetch_list=[])
# core.CostModel.ProfileMeasure(main_program, device)
print("core:<<<<<<<")
cost_model = core.CostModel()
cost_data = cost_model.ProfileMeasure(device)
# cost_list = self.stop_cost_model()
# return cost_list
cost_model = CostModel()
startup_program, main_program = cost_model.build_program()
cost_model.profile_measure(startup_program, main_program)
```
#### File: device/cuda/graphs.py
```python
from paddle.fluid.core import is_compiled_with_cuda, is_compiled_with_rocm, CUDAPlace
if is_compiled_with_cuda() and not is_compiled_with_rocm():
from paddle.fluid.core import CUDAGraph as CoreCUDAGraph
class CUDAGraph:
def __init__(self, place=None, mode="thread_local"):
ALL_MODES = ["global", "thread_local", "relaxed"]
self._graph = None
if place is None:
place = CUDAPlace(0)
self._place = place
assert mode in ALL_MODES
self._mode = ALL_MODES.index(mode)
def capture_begin(self):
CoreCUDAGraph.begin_capture(self._place, self._mode)
def capture_end(self):
self._graph = CoreCUDAGraph.end_capture()
def replay(self):
self._graph.replay()
def reset(self):
self._graph.reset()
else:
class CUDAGraph:
def __init__(self, place=None, mode="thread_local"):
raise NotImplementedError()
def capture_begin(self):
raise NotImplementedError()
def capture_end(self):
raise NotImplementedError()
def replay(self):
raise NotImplementedError()
def reset(self):
raise NotImplementedError()
```
#### File: paddle/distributed/elastic.py
```python
import argparse
import six
import os
class Command(object):
def __init__(self, server, name):
import etcd3
srv, port = server.split(':')
self.etcd = etcd3.client(host=srv, port=port)
self.prefix = "/paddle/" + name
self.node_prefix = self.prefix + '/nodes'
self.np_path = self.prefix + '/np'
def set_np(self, np):
self.etcd.put(self.np_path, six.b('{}'.format(np)))
def scale_np(self, np):
if self.etcd.get(self.np_path)[0] != None:
self.set_np(np)
return True
return False
def clean(self):
self.etcd.delete_prefix(self.prefix)
def close(self):
self.etcd.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Elastic Command')
parser.add_argument(
"--elastic_server", type=str, help="etcd server host:port")
parser.add_argument("--job_id", type=str, help="job unique id")
parser.add_argument("--np", type=int, help="job pod/node number")
parser.add_argument("action", type=str, help="action to take")
args = parser.parse_args()
server = args.elastic_server or os.getenv('PADDLE_ELASTIC_SERVER')
name = args.job_id or os.getenv('PADDLE_ELASTIC_JOB_ID')
np = args.np or int(os.getenv('PADDLE_ELASTIC_NP', 0))
cmd = Command(server, name)
if args.action == "scale":
cmd.scale_np(np)
if args.action == "clean":
cmd.clean()
print("action {} done".format(args.action))
cmd.close()
```
#### File: meta_optimizers/sharding/offload_helper.py
```python
import copy
from ..common import is_optimizer_op, OP_ROLE_KEY, OpRole, is_update_op
from paddle.fluid import core, unique_name
from .shard import Shard
__all__ = []
class OffloadHelper(object):
cpu_place_type = 0
cuda_place_type = 1
cuda_pinned_place_type = 2
def __init__(self, mp_ring_id=None, dp_ring_id=None):
self.mp_ring_id = mp_ring_id
self.dp_ring_id = dp_ring_id
def _insert_cast_op(self, block, idx, src_name, dst_name):
src_var = block.var(src_name)
if not block.has_var(dst_name):
block.create_var(
name=dst_name,
shape=src_var.shape,
dtype=core.VarDesc.VarType.FP16,
persistable=True)
dst_var = block.var(dst_name)
assert dst_var.dtype == core.VarDesc.VarType.FP16
block._insert_op_without_sync(
idx,
type='cast',
inputs={'X': src_var},
outputs={'Out': dst_var},
attrs={
'in_dtype': src_var.dtype,
'out_dtype': dst_var.dtype,
OP_ROLE_KEY: OpRole.Optimize
})
def _insert_broadcast_op(self, block, idx, param_name):
rings = []
if self.dp_ring_id is not None:
rings.append(self.dp_ring_id)
# need sync non distributed param in mp group
if self.mp_ring_id is not None:
param = block.var(param_name)
if not hasattr(param, 'is_distributed') or not param.is_distributed:
rings.append(self.mp_ring_id)
# the insert op order is: mp, dp
for ring in rings:
block._insert_op_without_sync(
idx,
type="c_broadcast",
inputs={'X': param_name},
outputs={'Out': param_name},
attrs={
'ring_id': ring,
'root': 0,
'use_calc_stream': True,
OP_ROLE_KEY: OpRole.Forward,
})
def _insert_memcpy_op(self, block, idx, src_name, dst_name, dst_place_type):
src_var = block.var(src_name)
dst_var = block.var(dst_name)
block._insert_op_without_sync(
idx,
type='memcpy',
inputs={'X': src_var},
outputs={'Out': dst_var},
attrs={
'dst_place_type': dst_place_type,
OP_ROLE_KEY: OpRole.Optimize,
})
def _insert_fetch_op(self, block, idx, src_name, dst_name):
self._insert_memcpy_op(block, idx, src_name, dst_name,
OffloadHelper.cuda_place_type)
def _insert_offload_op(self, block, idx, src_name, dst_name):
self._insert_memcpy_op(block, idx, src_name, dst_name,
OffloadHelper.cuda_pinned_place_type)
def _get_offload_var_name(self, name):
return unique_name.generate(name + '@offload')
def _create_offload_var(self, var_name, offload_var_name, blocks):
for block in blocks:
var = block.var(var_name)
var.persistable = False
offload_var = block.create_var(
name=offload_var_name,
shape=var.shape,
dtype=var.dtype,
persistable=True)
def offload_fp32param(self, block, startup_block, offload=True):
"""
(p_fp16) = cast(p)
(p_fp16_recompute) = cast(p)
(pout,) = adam(p)
===========================>
rename(p_fp16_recompute, p_fp16)
(p,) = prefetch(p@offload)
(pout,) = adam(p)
(p_fp16) = cast(p)
(p@offload) = memcpy(p)
"""
param_to_idx = dict()
param_to_fp16 = dict()
# recompute_var which need rename to fp16_param
fp16_param_to_recompute = dict()
recompute_to_fp16 = dict()
def remove_param(input_name):
param_to_idx.pop(input_name)
if input_name in param_to_fp16:
fp16_param = param_to_fp16.pop(input_name)
if fp16_param in fp16_param_to_recompute:
recompute = fp16_param_to_recompute.pop(fp16_param)
recompute_to_fp16.pop(recompute)
# step1: record param
for idx, op in reversed(list(enumerate(block.ops))):
if is_update_op(op):
param = op.desc.input("Param")[0]
param_to_idx[param] = idx
# step2: remove param which can't offload and
# record param->fp16param, fp16param->recompute_var
for idx, op in enumerate(block.ops):
if is_optimizer_op(op):
break
# TODO (<NAME>): tmp solution for fuse_grad_merge + optimize_cast
if not offload and op.type == 'coalesce_tensor':
continue
for input_name in op.desc.input_arg_names():
if input_name not in param_to_idx:
continue
# param which will be used by fp32 op
if op.type != 'cast':
remove_param(input_name)
continue
# param is only used by cast op,
# which to cast fp32_param to fp16_param
output_name = op.output_arg_names[0]
if 'cast_fp16' not in output_name:
remove_param(input_name)
continue
if 'subprog' not in output_name:
assert output_name == input_name + '.cast_fp16'
assert input_name not in param_to_fp16, \
"There must be only one cast op from fp32 param to fp16 param."
param_to_fp16[input_name] = output_name
else:
# fp16-->recompute_var
assert input_name in param_to_fp16, \
"param must first be cast to fp16"
fp16_param = param_to_fp16[input_name]
fp16_param_to_recompute[fp16_param] = output_name
recompute_to_fp16[output_name] = fp16_param
param_name_to_offload_name = dict()
# step3: main_block add offload, cast op
# change recompute to fp16, remove cast(param) to fp16
for idx, op in reversed(list(enumerate(block.ops))):
if is_update_op(op):
param = op.desc.input("Param")[0]
if param not in param_to_idx: continue
# step3.1: create offload_var
offload_var_name = self._get_offload_var_name(param)
param_name_to_offload_name[param] = offload_var_name
if offload:
self._create_offload_var(param, offload_var_name,
[block, startup_block])
# step3.2: insert cast op and offload op
self._insert_offload_op(block, idx + 1, param,
offload_var_name)
assert param in param_to_fp16
fp16_param_name = param_to_fp16[param]
fp16_param_var = block.var(fp16_param_name)
fp16_param_var.persistable = True
self._insert_cast_op(block, idx + 1, param,
param_to_fp16[param])
if offload:
# step3.3: insert fetch op
self._insert_fetch_op(block, idx, offload_var_name, param)
continue
# step3.4: remove cast op
if op.type == 'cast':
input_name = op.desc.input_arg_names()[0]
if input_name in param_to_idx:
block._remove_op(idx, sync=False)
continue
# step3.5: change recompute_param to fp16_param
for input_name in op.desc.input_arg_names():
if input_name in recompute_to_fp16:
op._rename_input(input_name, recompute_to_fp16[input_name])
for output_name in op.desc.output_arg_names():
if output_name in recompute_to_fp16:
op._rename_output(output_name,
recompute_to_fp16[output_name])
# step4: remove recompute_param
for name in recompute_to_fp16.keys():
block._remove_var(name, sync=False)
# step5: startup_block add offload
visited_vars = set()
# FIXME(wangxi): should insert in idx, need move comm init to the head.
insert_idx = len(startup_block.ops)
for idx, op in reversed(list(enumerate(startup_block.ops))):
for out_name in op.output_arg_names:
if out_name in visited_vars:
continue
if out_name in param_name_to_offload_name:
var_name = out_name
if offload:
offload_var_name = param_name_to_offload_name[var_name]
self._insert_offload_op(startup_block, insert_idx,
var_name, offload_var_name)
self._insert_cast_op(startup_block, insert_idx, var_name,
param_to_fp16[var_name])
# NOTE(wangxi): cast and offload should insert after broadcast param.
# the insert op order is: {mp, dp}broadcast, cast, offload
self._insert_broadcast_op(startup_block, insert_idx,
var_name)
visited_vars.add(out_name)
block._sync_with_cpp()
startup_block._sync_with_cpp()
def cast_fp32param_in_optimize(self, block, startup_block):
"""
(p_fp16) = cast(p)
(p_fp16_recompute) = cast(p)
(pout,) = adam(p)
===========================>
rename(p_fp16_recompute, p_fp16)
(pout,) = adam(p)
(p_fp16) = cast(p)
"""
self.offload_fp32param(block, startup_block, offload=False)
def offload(self, block, startup_block):
"""
(m1, m2) = prefetch(m1@offload, m2@offload)
(m1out, m2out, pout) = adam(m1, m2, p)
(m1@offload, m2@offload) = memcpy(m1, m2)
"""
vars_name_to_offload_name = dict()
# main_block add offload
for idx, op in reversed(list(enumerate(block.ops))):
if not is_optimizer_op(op):
break
vars_name = []
if op.type == "adam" or op.type == "adamw":
# {Moment1Out = [''], Moment2Out = [''], ParamOut = ['']} =
# adam(inputs={Moment1 = [''], Moment2 = [''], Param = ['']})
vars_name.append(op.desc.input("Moment1")[0])
vars_name.append(op.desc.input("Moment2")[0])
elif op.type == 'momentum':
pass
elif op.type == 'lars':
pass
elif op.type == 'lamb':
pass
# step1: create and init offload_var
for var_name in vars_name:
assert var_name not in vars_name_to_offload_name
offload_var_name = self._get_offload_var_name(var_name)
vars_name_to_offload_name[var_name] = offload_var_name
self._create_offload_var(var_name, offload_var_name,
[block, startup_block])
# step2: insert offload op
for var_name in vars_name:
offload_var_name = vars_name_to_offload_name[var_name]
self._insert_offload_op(block, idx + 1, var_name,
offload_var_name)
# step3: insert fetch op
for var_name in vars_name:
offload_var_name = vars_name_to_offload_name[var_name]
self._insert_fetch_op(block, idx, offload_var_name, var_name)
# startup_block add offload
visited_vars = set()
for idx, op in reversed(list(enumerate(startup_block.ops))):
for out_name in op.output_arg_names:
if out_name in visited_vars:
continue
if out_name in vars_name_to_offload_name:
var_name = out_name
offload_var_name = vars_name_to_offload_name[var_name]
# insert offload op after var is generated
self._insert_offload_op(startup_block, idx + 1, var_name,
offload_var_name)
visited_vars.add(out_name)
block._sync_with_cpp()
startup_block._sync_with_cpp()
def opt_sharding_cast_fp32param(self,
block,
startup_block,
params,
offload=False):
"""
(p_fp16) = cast(p)
(p_fp16_recompute) = cast(p)
(pout,) = adam(p)
===========================>
rename(p_fp16_recompute, p_fp16)
(pout,) = adam(p)
(p_fp16) = cast(p)
broadcast(p_fp16)
"""
global_params = set()
local_params = set()
param_to_fp16 = dict()
# recompute_var which need rename to fp16_param
fp16_param_to_recompute = dict()
recompute_to_fp16 = dict()
def remove_param(input_name):
global_params.remove(input_name)
if input_name in local_params:
local_params.remove(input_name)
if input_name in param_to_fp16:
fp16_param = param_to_fp16.pop(input_name)
if fp16_param in fp16_param_to_recompute:
recompute = fp16_param_to_recompute.pop(fp16_param)
recompute_to_fp16.pop(recompute)
# step1: record param
global_params = set(params)
for idx, op in reversed(list(enumerate(block.ops))):
if is_update_op(op):
param = op.desc.input("Param")[0]
local_params.add(param)
# step2: remove param which can't offload and
# record param->fp16param, fp16param->recompute_var
for idx, op in enumerate(block.ops):
if is_optimizer_op(op):
break
# TODO (<NAME>): tmp solution for fuse_grad_merge + optimize_cast
if op.type == 'coalesce_tensor':
continue
for input_name in op.desc.input_arg_names():
if input_name not in global_params:
continue
# param which will be used by fp32 op
if op.type != 'cast':
remove_param(input_name)
continue
# param is only used by cast op,
# which to cast fp32_param to fp16_param
output_name = op.output_arg_names[0]
if 'cast_fp16' not in output_name:
remove_param(input_name)
continue
if 'subprog' not in output_name:
assert output_name == input_name + '.cast_fp16'
assert input_name not in param_to_fp16, \
"There must be only one cast op from fp32 param to fp16 param."
param_to_fp16[input_name] = output_name
else:
# fp16-->recompute_var
assert input_name in param_to_fp16, \
"param must first be cast to fp16"
fp16_param = param_to_fp16[input_name]
fp16_param_to_recompute[fp16_param] = output_name
recompute_to_fp16[output_name] = fp16_param
param_name_to_offload_name = dict()
# step3: main_block add offload, cast op
# change recompute to fp16, remove cast(param) to fp16
for idx, op in reversed(list(enumerate(block.ops))):
if is_update_op(op):
param = op.desc.input("Param")[0]
if param not in global_params:
continue
# step3.1: create offload_var
offload_var_name = self._get_offload_var_name(param)
param_name_to_offload_name[param] = offload_var_name
if offload:
self._create_offload_var(param, offload_var_name,
[block, startup_block])
# step3.2: insert cast op and offload op
self._insert_offload_op(block, idx + 1, param,
offload_var_name)
assert param in param_to_fp16
fp16_param_name = param_to_fp16[param]
fp16_param_var = block.var(fp16_param_name)
fp16_param_var.persistable = True
self._insert_cast_op(block, idx + 1, param,
param_to_fp16[param])
if offload:
# step3.3: insert fetch op
self._insert_fetch_op(block, idx, offload_var_name, param)
continue
# step3.4: remove cast op
if op.type == 'cast':
input_name = op.desc.input_arg_names()[0]
if input_name in global_params:
block._remove_op(idx, sync=False)
continue
# step3.5: change recompute_param to fp16_param
for input_name in op.desc.input_arg_names():
if input_name in recompute_to_fp16:
op._rename_input(input_name, recompute_to_fp16[input_name])
for output_name in op.desc.output_arg_names():
if output_name in recompute_to_fp16:
op._rename_output(output_name,
recompute_to_fp16[output_name])
# step4: remove recompute_param
for name in recompute_to_fp16.keys():
block._remove_var(name, sync=False)
# step5: remove fp32 param which not need
for idx, op in enumerate(block.ops):
if op.type not in ['coalesce_tensor', 'c_broadcast']:
continue
for input_name in op.desc.input_arg_names():
if input_name in param_to_fp16:
op._rename_input(input_name, param_to_fp16[input_name])
for output_name in op.desc.output_arg_names():
if output_name in param_to_fp16:
op._rename_output(output_name, param_to_fp16[output_name])
for param in global_params:
assert param in param_to_fp16
fp16_param_name = param_to_fp16[param]
fp16_param_var = block.var(fp16_param_name)
fp16_param_var.persistable = True
if param not in local_params:
block._remove_var(param, sync=False)
# step6: startup_block add offload
visited_vars = set()
insert_idx = len(startup_block.ops)
for idx, op in reversed(list(enumerate(startup_block.ops))):
for out_name in op.output_arg_names:
if out_name in visited_vars: continue
if out_name in param_to_fp16:
var_name = out_name
if offload:
self._insert_offload_op(
startup_block, idx + 1, var_name,
param_name_to_offload_name[var_name])
self._insert_cast_op(startup_block, insert_idx, var_name,
param_to_fp16[var_name])
# NOTE(wangxi): cast and offload should insert after broadcast param.
# the insert op order is: {mp, dp}broadcast, cast, offload
self._insert_broadcast_op(startup_block, insert_idx,
var_name)
if var_name not in local_params:
param = startup_block.var(out_name)
param.persistable = False
visited_vars.add(out_name)
block._sync_with_cpp()
startup_block._sync_with_cpp()
```
#### File: ir/inference/test_trt_convert_multihead_matmul.py
```python
from trt_layer_auto_scan_test import TrtLayerAutoScanTest, SkipReasons
from program_config import TensorConfig, ProgramConfig
import unittest
import numpy as np
import paddle.inference as paddle_infer
from functools import partial
from typing import Optional, List, Callable, Dict, Any, Set
class TrtConvertMultiHeadMatmulTest(TrtLayerAutoScanTest):
def is_program_valid(self, program_config: ProgramConfig) -> bool:
return True
def sample_program_configs(self):
def generate_input1(batch, dim1):
return np.random.random((batch, dim1, 768)).astype(np.float32)
def generate_input2(shape):
return np.random.random(shape).astype(np.float32)
def generate_weight1():
return np.random.random((768, 768)).astype(np.float32)
def generate_weight2():
return np.random.random(768).astype(np.float32)
for batch in [1, 2, 4]:
self.batch = batch
for reshape_shape in [[0, 0, 12, 64]]:
for dim1 in [128]:
input2_shapes = [[batch, reshape_shape[2], dim1, dim1],
[batch, 1, 1, dim1]]
for input2_shape in input2_shapes:
for axis in [0]:
dics = [{
"x_num_col_dims": 2,
"y_num_col_dims": 1
}, {
"axis": 2
}, {
"shape": reshape_shape
}, {
"axis": [0, 2, 1, 3]
}, {
"x_num_col_dims": 2,
"y_num_col_dims": 1
}, {
"axis": 2
}, {
"shape": reshape_shape
}, {
"axis": [0, 2, 1, 3]
}, {
"x_num_col_dims": 2,
"y_num_col_dims": 1
}, {
"axis": 2
}, {
"shape": reshape_shape
}, {
"axis": [0, 2, 1, 3]
}, {
"scale": 0.125,
"bias": 0.0,
"bias_after_scale": True
}, {
"alpha": 1.0,
"transpose_X": False,
"transpose_Y": True,
"fused_reshape_X": [],
"fused_reshape_Y": [],
"fused_transpose_X": [],
"fused_transpose_Y": [],
"fused_reshape_Out": [],
"fused_transpose_Out": []
}, {
"axis": axis
}, {
"axis": -1,
"is_test": True
}, {
"seed": 0,
"dropout_prob": 0.10000000149011612,
"dropout_implementation": "upscale_in_train",
"fix_seed": False,
"is_test": True
}, {
"alpha": 1.0,
"transpose_X": False,
"transpose_Y": False,
"fused_reshape_X": [],
"fused_reshape_Y": [],
"fused_transpose_X": [],
"fused_transpose_Y": [],
"fused_reshape_Out": [],
"fused_transpose_Out": []
}, {
"axis": [0, 2, 1, 3]
}, {
"shape": [0, 0, 768]
}, {
"x_num_col_dims": 2,
"y_num_col_dims": 1
}]
ops_config = [
{
"op_type": "mul",
"op_inputs": {
"X": ["input_data1"],
"Y": ["mul1_weight"]
},
"op_outputs": {
"Out": ["mul1_output"]
},
"op_attrs": dics[0]
},
{
"op_type": "elementwise_add",
"op_inputs": {
"X": ["mul1_output"],
"Y": ["elementwise_add1_weight"]
},
"op_outputs": {
"Out": ["elementwise_add1_output"]
},
"op_attrs": dics[1]
},
{
"op_type": "reshape2",
"op_inputs": {
"X": ["elementwise_add1_output"],
},
"op_outputs": {
"Out": ["reshape21_output"],
"XShape": ["reshape21_output_xshape"]
},
"op_attrs": dics[2]
},
{
"op_type": "transpose2",
"op_inputs": {
"X": ["reshape21_output"]
},
"op_outputs": {
"Out": ["transpose21_output"],
"XShape":
["transpose21_output_xshape"]
},
"op_attrs": dics[3]
},
{
"op_type": "mul",
"op_inputs": {
"X": ["input_data1"],
"Y": ["mul2_weight"]
},
"op_outputs": {
"Out": ["mul2_output"]
},
"op_attrs": dics[4]
},
{
"op_type": "elementwise_add",
"op_inputs": {
"X": ["mul2_output"],
"Y": ["elementwise_add2_weight"]
},
"op_outputs": {
"Out": ["elementwise_add2_output"]
},
"op_attrs": dics[5]
},
{
"op_type": "reshape2",
"op_inputs": {
"X": ["elementwise_add2_output"]
},
"op_outputs": {
"Out": ["reshape22_output"],
"XShape": ["reshape22_output_xshape"]
},
"op_attrs": dics[6]
},
{
"op_type": "transpose2",
"op_inputs": {
"X": ["reshape22_output"]
},
"op_outputs": {
"Out": ["transpose22_output"],
"XShape":
["transpose22_output_xshape"]
},
"op_attrs": dics[7]
},
{
"op_type": "mul",
"op_inputs": {
"X": ["input_data1"],
"Y": ["mul3_weight"]
},
"op_outputs": {
"Out": ["mul3_output"]
},
"op_attrs": dics[8]
},
{
"op_type": "elementwise_add",
"op_inputs": {
"X": ["mul3_output"],
"Y": ["elementwise_add3_weight"]
},
"op_outputs": {
"Out": ["elementwise_add3_output"]
},
"op_attrs": dics[9]
},
{
"op_type": "reshape2",
"op_inputs": {
"X": ["elementwise_add3_output"]
},
"op_outputs": {
"Out": ["reshape23_output"],
"XShape": ["reshape23_output_xshape"]
},
"op_attrs": dics[10]
},
{
"op_type": "transpose2",
"op_inputs": {
"X": ["reshape23_output"]
},
"op_outputs": {
"Out": ["transpose23_output"],
"XShape":
["transpose23_output_xshape"]
},
"op_attrs": dics[11]
},
{
"op_type": "scale",
"op_inputs": {
"X": ["transpose23_output"],
},
"op_outputs": {
"Out": ["scale_output"]
},
"op_attrs": dics[12]
},
{
"op_type": "matmul",
"op_inputs": {
"X": ["scale_output"],
"Y": ["transpose22_output"],
},
"op_outputs": {
"Out": ["matmul1_output"]
},
"op_attrs": dics[13]
},
{
"op_type": "elementwise_add",
"op_inputs": {
"X": ["matmul1_output"],
"Y": ["input_data2"]
},
"op_outputs": {
"Out": ["elementwise_add4_output"]
},
"op_attrs": dics[14]
},
{
"op_type": "softmax",
"op_inputs": {
"X": ["elementwise_add4_output"]
},
"op_outputs": {
"Out": ["softmax_output"]
},
"op_attrs": dics[15]
},
{
"op_type": "dropout",
"op_inputs": {
"X": ["softmax_output"],
},
"op_outputs": {
"Out": ["dropout3_output"]
},
"op_attrs": dics[16]
},
{
"op_type": "matmul",
"op_inputs": {
"X": ["dropout3_output"],
"Y": ["transpose21_output"],
},
"op_outputs": {
"Out": ["matmul2_output"]
},
"op_attrs": dics[17]
},
{
"op_type": "transpose2",
"op_inputs": {
"X": ["matmul2_output"]
},
"op_outputs": {
"Out": ["transpose24_output"],
"XShape":
["transpose24_output_xshape"]
},
"op_attrs": dics[18]
},
{
"op_type": "reshape2",
"op_inputs": {
"X": ["transpose24_output"]
},
"op_outputs": {
"Out": ["reshape24_output"],
"XShape": ["reshape24_output_xshape"]
},
"op_attrs": dics[19]
},
# In order to fuse ops with
# multihead_matmul_fuse_pass_v2, the last op
# must be mul.
{
"op_type": "mul",
"op_inputs": {
"X": ["reshape24_output"],
"Y": ["mul4_weight"]
},
"op_outputs": {
"Out": ["mul4_output"]
},
"op_attrs": dics[20]
}
]
ops = self.generate_op_config(ops_config)
program_config = ProgramConfig(
ops=ops,
weights={
"mul1_weight": TensorConfig(
data_gen=partial(generate_weight1)),
"mul2_weight": TensorConfig(
data_gen=partial(generate_weight1)),
"mul3_weight": TensorConfig(
data_gen=partial(generate_weight1)),
"mul4_weight": TensorConfig(
data_gen=partial(generate_weight1)),
"elementwise_add1_weight": TensorConfig(
data_gen=partial(generate_weight2)),
"elementwise_add2_weight": TensorConfig(
data_gen=partial(generate_weight2)),
"elementwise_add3_weight": TensorConfig(
data_gen=partial(generate_weight2)),
},
inputs={
"input_data1": TensorConfig(
data_gen=partial(generate_input1, batch,
dim1)),
"input_data2": TensorConfig(
data_gen=partial(generate_input2,
input2_shape)),
},
outputs=["mul4_output"])
yield program_config
def sample_predictor_configs(
self, program_config) -> (paddle_infer.Config, List[int], float):
def generate_dynamic_shape(attrs):
# The last dim of input1 and input2 should be static.
self.dynamic_shape.min_input_shape = {
"input_data1": [1, 8, 768],
"input_data2": [1, 1, 1, 128],
"reshape24_output": [1, 128, 768]
}
self.dynamic_shape.max_input_shape = {
"input_data1": [16, 512, 768],
"input_data2": [16, 256, 512, 128],
"reshape24_output": [1, 128, 768]
}
self.dynamic_shape.opt_input_shape = {
"input_data1": [8, 128, 768],
"input_data2": [8, 32, 64, 128],
"reshape24_output": [1, 128, 768]
}
def clear_dynamic_shape():
self.dynamic_shape.max_input_shape = {}
self.dynamic_shape.min_input_shape = {}
self.dynamic_shape.opt_input_shape = {}
attrs = [
program_config.ops[i].attrs
for i in range(len(program_config.ops))
]
# for static_shape
clear_dynamic_shape()
self.trt_param.precision = paddle_infer.PrecisionType.Float32
yield self.create_inference_config(), (1, 4), (1e-5, 1e-5)
self.trt_param.precision = paddle_infer.PrecisionType.Half
yield self.create_inference_config(), (1, 4), (1e-5, 1e-5)
# for dynamic_shape
generate_dynamic_shape(attrs)
self.trt_param.precision = paddle_infer.PrecisionType.Float32
yield self.create_inference_config(), (1, 3), (1e-5, 1e-5)
self.trt_param.precision = paddle_infer.PrecisionType.Half
yield self.create_inference_config(), (1, 3), (1e-5, 1e-5)
def add_skip_trt_case(self):
def teller1(program_config, predictor_config):
if self.trt_param.precision == paddle_infer.PrecisionType.Half:
return True
return False
self.add_skip_case(
teller1, SkipReasons.TRT_NOT_IMPLEMENTED,
"The output has diff between gpu and trt in fp16 mode.")
def teller2(program_config, predictor_config):
if self.trt_param.precision == paddle_infer.PrecisionType.Float32 and len(
self.dynamic_shape.min_input_shape) != 0 and self.batch > 2:
return True
return False
self.add_skip_case(
teller2, SkipReasons.TRT_NOT_IMPLEMENTED,
"The output has diff between gpu and trt when dynamic fp32 mode and batch size > 2."
)
def test(self):
self.add_skip_trt_case()
self.run_test()
if __name__ == "__main__":
unittest.main()
```
#### File: tests/unittests/test_cost_model.py
```python
from __future__ import print_function
import unittest
import paddle
import paddle.fluid.core as core
paddle.enable_static()
device = "gpu" if core.is_compiled_with_cuda() else "cpu"
class TestCostModel(unittest.TestCase):
def test_profiler_measure_empty_program(self):
cost_model = core.CostModel()
empty_program = paddle.static.Program()
startup_program = paddle.static.Program()
cost_data = cost_model.profile_measure(empty_program, startup_program,
device, ["time"])
self.assertEqual(cost_data.get_whole_time_ms(), 0)
def test_profiler_measure_program(self):
main_program = paddle.static.Program()
startup_program = paddle.static.Program()
with paddle.static.program_guard(main_program, startup_program):
# TODO(zhhsplendid): support paddle.static.data, which is uninitialized data
data = paddle.ones(name='X', shape=[16, 100], dtype='float32')
hidden = paddle.static.nn.fc(data, 10)
loss = paddle.mean(hidden)
cost_model = core.CostModel()
cost_data = cost_model.profile_measure(main_program, startup_program,
device, ["time"])
fc_op_time = cost_data.get_op_time_ms(0)
mean_op_time = cost_data.get_op_time_ms(1)
self.assertGreater(fc_op_time, 0)
self.assertGreater(mean_op_time, 0)
self.assertGreaterEqual(cost_data.get_whole_time_ms(),
fc_op_time + mean_op_time)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "2757571500/sdk",
"score": 3
} |
#### File: blender/scripts/import_fbx.py
```python
import bpy
# Imports a file using importer
def import_file(file_path):
# Import the model
bpy.ops.import_scene.fbx(filepath = file_path)
# Clear existing objects.
def clear_scene():
scene = bpy.context.scene
scene.camera = None
for obj in scene.objects:
scene.objects.unlink(obj)
# Save current scene as .blend file
def save_file(save_path):
# Check if output file exists already
try:
f = open(save_path, 'w')
f.close()
ok = True
except:
print("Cannot save to path %r" % save_path)
import traceback
traceback.print_exc()
# Save .blend file
if ok:
bpy.ops.wm.save_as_mainfile(filepath=save_path)
def main():
import sys # to get command line args
import argparse # to parse options for us and print a nice help message
# get the args passed to blender after "--", all of which are ignored by
# blender so scripts may receive their own arguments
argv = sys.argv
if "--" not in argv:
argv = [] # as if no args are passed
else:
argv = argv[argv.index("--") + 1:] # get all args after "--"
# When --help or no args are given, print this help
usage_text = \
"Run blender in background mode with this script:"
" blender --background --factory-startup --python " + __file__ + " -- [options]"
parser = argparse.ArgumentParser(description=usage_text)
# Possible types are: string, int, long, choice, float and complex.
parser.add_argument("-i", "--input", dest="file_path", metavar='FILE',
help="Import the specified file")
parser.add_argument("-o", "--output", dest="save_path", metavar='FILE',
help="Save the generated file to the specified path")
args = parser.parse_args(argv) # In this example we wont use the args
if not argv:
parser.print_help()
return
# Run the conversion
clear_scene()
import_file(args.file_path)
save_file(args.save_path)
print("batch job finished, exiting")
if __name__ == "__main__":
main()
``` |
{
"source": "276793422/attack-website",
"score": 2
} |
#### File: modules/clean/__init__.py
```python
from . import clean
from . import clean_config
def get_priority():
return clean_config.priority
def run_module():
return (clean.clean_website_build(), clean_config.module_name)
```
#### File: modules/resources/resources.py
```python
import json
import os
from . import resources_config
import modules
from modules import site_config
from modules import util
from datetime import datetime
import mitreattack.attackToExcel.attackToExcel as attackToExcel
def generate_resources():
"""Responsible for generating the resources pages"""
# Create content pages directory if does not already exist
util.buildhelpers.create_content_pages_dir()
# Verify if resources directory exists
if not os.path.isdir(site_config.resources_markdown_path):
os.mkdir(site_config.resources_markdown_path)
# Verify if resources directory exists
if not os.path.isdir(resources_config.updates_markdown_path):
os.mkdir(resources_config.updates_markdown_path)
# Move templates to templates directory
util.buildhelpers.move_templates(resources_config.module_name, resources_config.resources_templates_path)
util.buildhelpers.move_docs(resources_config.docs_path)
generate_working_with_attack()
generate_general_information()
generate_training_pages()
generate_attackcon_page()
check_menu_versions_module()
generate_static_pages()
def generate_general_information():
"""Responsible for compiling resources json into resources markdown files
for rendering on the HMTL
"""
# load papers and presentations list
with open(os.path.join(site_config.data_directory, "resources.json"), "r", encoding='utf8') as f:
resources = json.load(f)
# get papers and presentations in sorted date order
papers = sorted(resources["papers"], key=lambda p: datetime.strptime(p["date"], "%B %Y"), reverse=True)
presentations = sorted(resources["presentations"], key=lambda p: datetime.strptime(p["date"], "%B %Y"), reverse=True)
# get markdown
resources_content = resources_config.general_information_md + json.dumps({
"papers": papers,
"presentations": presentations
})
# write markdown to file
with open(os.path.join(site_config.resources_markdown_path, "general_information.md"), "w", encoding='utf8') as md_file:
md_file.write(resources_content)
def generate_training_pages():
""" Responsible for generating the markdown pages of the training pages """
data = {}
# Side navigation for training
data['menu'] = resources_config.training_navigation
# Training Overview
training_md = resources_config.training_md + json.dumps(data)
# write markdown to file
with open(os.path.join(site_config.resources_markdown_path, "training.md"), "w", encoding='utf8') as md_file:
md_file.write(training_md)
# CTI training
training_cti_md = resources_config.training_cti_md + json.dumps(data)
# write markdown to file
with open(os.path.join(site_config.resources_markdown_path, "training_cti.md"), "w", encoding='utf8') as md_file:
md_file.write(training_cti_md)
def generate_attackcon_page():
"""Responsible for compiling ATT&CKcon json into attackcon markdown file
for rendering on the HTML
"""
# load ATT&CKcon data
with open(os.path.join(site_config.data_directory, "attackcon.json"), "r", encoding='utf8') as f:
attackcon = json.load(f)
attackcon = sorted(attackcon, key=lambda a: datetime.strptime(a["date"], "%B %Y"), reverse=True)
attackcon_content = resources_config.attackcon_md + json.dumps(attackcon)
# write markdown to file
with open(os.path.join(site_config.resources_markdown_path, "attackcon.md"), "w", encoding='utf8') as md_file:
md_file.write(attackcon_content)
def check_menu_versions_module():
""" Verify if versions module is in the running pool, if not
remove from submenu
"""
if not [key['module_name'] for key in modules.run_ptr if key['module_name'] == 'versions']:
util.buildhelpers.remove_element_from_sub_menu(resources_config.module_name, "Versions of ATT&CK")
def generate_static_pages():
""" Reads markdown files from the static pages directory and copies them into
the markdown directory
"""
static_pages_dir = os.path.join('modules', 'resources', 'static_pages')
for static_page in os.listdir(static_pages_dir):
with open(os.path.join(static_pages_dir, static_page), "r", encoding='utf8') as md:
content = md.read()
if static_page.startswith("updates-"):
with open(os.path.join(resources_config.updates_markdown_path, static_page), "w", encoding='utf8') as md_file:
md_file.write(content)
else:
with open(os.path.join(site_config.resources_markdown_path, static_page), "w", encoding='utf8') as md_file:
md_file.write(content)
def generate_working_with_attack():
""" Responsible for generating working with ATT&CK and creating excel files
"""
excel_dirs = [
f"enterprise-attack-{site_config.full_attack_version}",
f"mobile-attack-{site_config.full_attack_version}",
f"ics-attack-{site_config.full_attack_version}"
]
files_types = ["matrices", "mitigations", "relationships", "software", "groups", "tactics", "techniques", "datasources"]
# Verify if directories exists
if not os.path.isdir(site_config.web_directory):
os.makedirs(site_config.web_directory)
docs_dir = os.path.join(site_config.web_directory, 'docs')
if not os.path.isdir(docs_dir):
os.makedirs(docs_dir)
attackToExcel.export("enterprise-attack", site_config.full_attack_version, docs_dir)
attackToExcel.export("mobile-attack", site_config.full_attack_version, docs_dir)
attackToExcel.export("ics-attack", site_config.full_attack_version, docs_dir)
files_json = {'excel_files': []}
for excel_dir in excel_dirs:
excel_json = {
'label' : f"{excel_dir}.xlsx",
'url': f"/docs/{excel_dir}/{excel_dir}.xlsx",
'children' : []
}
for file_type in files_types:
child_json = {
'label' : f"{excel_dir}-{file_type}.xlsx",
'url': f"/docs/{excel_dir}/{excel_dir}-{file_type}.xlsx"
}
if os.path.exists(site_config.web_directory + child_json['url']):
excel_json['children'].append(child_json)
files_json['excel_files'].append(excel_json)
working_with_attack_content = resources_config.working_with_attack_md + json.dumps(files_json)
# write markdown to file
with open(os.path.join(site_config.resources_markdown_path, "working_with_attack.md"), "w", encoding='utf8') as md_file:
md_file.write(working_with_attack_content)
```
#### File: modules/tests/__init__.py
```python
from . import tests
from . import tests_config
def get_priority():
return tests_config.priority
def run_module():
return (tests.run_tests(), tests_config.module_name)
```
#### File: modules/tour/__init__.py
```python
from . import tour
from . import tour_config
def get_priority():
return tour_config.priority
def run_module():
return (tour.generate_tour(), tour_config.module_name)
```
#### File: modules/website_build/__init__.py
```python
from . import website_build
from . import website_build_config
from modules import site_config
def get_priority():
return website_build_config.priority
def run_module():
return (website_build.generate_website(), website_build_config.module_name)
``` |
{
"source": "279632990/AutoSpeech2020",
"score": 2
} |
#### File: AutoSpeech2020/mysubmission/data_augmentation.py
```python
import librosa
import numpy as np
import random
import keras.backend as K
from tensorflow.python.keras import Input
from tensorflow.python.keras.engine import InputLayer
from tensorflow.python.keras.engine import InputSpec
from tensorflow.python.keras.engine import Layer
from tensorflow.python.keras.layers import Wrapper,Dense,MaxPool2D
from tensorflow import keras
import numpy.linalg as nl
from scipy import interpolate
from scipy.spatial.distance import pdist, cdist, squareform
class LayerNormalization(keras.layers.Layer):
def __init__(self,
center=True,
scale=True,
epsilon=None,
gamma_initializer='ones',
beta_initializer='zeros',
gamma_regularizer=None,
beta_regularizer=None,
gamma_constraint=None,
beta_constraint=None,
**kwargs):
"""Layer normalization layer
See: [Layer Normalization](https://arxiv.org/pdf/1607.06450.pdf)
:param center: Add an offset parameter if it is True.
:param scale: Add a scale parameter if it is True.
:param epsilon: Epsilon for calculating variance.
:param gamma_initializer: Initializer for the gamma weight.
:param beta_initializer: Initializer for the beta weight.
:param gamma_regularizer: Optional regularizer for the gamma weight.
:param beta_regularizer: Optional regularizer for the beta weight.
:param gamma_constraint: Optional constraint for the gamma weight.
:param beta_constraint: Optional constraint for the beta weight.
:param kwargs:
"""
super(LayerNormalization, self).__init__(**kwargs)
self.supports_masking = True
self.center = center
self.scale = scale
if epsilon is None:
epsilon = K.epsilon() * K.epsilon()
self.epsilon = epsilon
self.gamma_initializer = keras.initializers.get(gamma_initializer)
self.beta_initializer = keras.initializers.get(beta_initializer)
self.gamma_regularizer = keras.regularizers.get(gamma_regularizer)
self.beta_regularizer = keras.regularizers.get(beta_regularizer)
self.gamma_constraint = keras.constraints.get(gamma_constraint)
self.beta_constraint = keras.constraints.get(beta_constraint)
self.gamma, self.beta = None, None
def get_config(self):
config = {
'center': self.center,
'scale': self.scale,
'epsilon': self.epsilon,
'gamma_initializer': keras.initializers.serialize(self.gamma_initializer),
'beta_initializer': keras.initializers.serialize(self.beta_initializer),
'gamma_regularizer': keras.regularizers.serialize(self.gamma_regularizer),
'beta_regularizer': keras.regularizers.serialize(self.beta_regularizer),
'gamma_constraint': keras.constraints.serialize(self.gamma_constraint),
'beta_constraint': keras.constraints.serialize(self.beta_constraint),
}
base_config = super(LayerNormalization, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def compute_output_shape(self, input_shape):
return input_shape
def compute_mask(self, inputs, input_mask=None):
return input_mask
def build(self, input_shape):
shape = input_shape[-1:]
if self.scale:
self.gamma = self.add_weight(
shape=shape,
initializer=self.gamma_initializer,
regularizer=self.gamma_regularizer,
constraint=self.gamma_constraint,
name='gamma',
)
if self.center:
self.beta = self.add_weight(
shape=shape,
initializer=self.beta_initializer,
regularizer=self.beta_regularizer,
constraint=self.beta_constraint,
name='beta',
)
super(LayerNormalization, self).build(input_shape)
def call(self, inputs, training=None):
mean = K.mean(inputs, axis=-1, keepdims=True)
variance = K.mean(K.square(inputs - mean), axis=-1, keepdims=True)
std = K.sqrt(variance + self.epsilon)
outputs = (inputs - mean) / std
if self.scale:
outputs *= self.gamma
if self.center:
outputs += self.beta
return outputs
#x = DropConnect(Dense(64, activation='relu'), prob=0.5)(x)
class DropConnectDense(Dense):
def __init__(self, *args, **kwargs):
self.prob = kwargs.pop('prob', 0.5)
if 0. < self.prob < 1.:
self.uses_learning_phase = True
super(DropConnectDense, self).__init__(*args, **kwargs)
def call(self, x, mask=None):
if 0. < self.prob < 1.:
self.kernel = K.in_train_phase(K.dropout(self.kernel, self.prob), self.kernel)
self.b = K.in_train_phase(K.dropout(self.b, self.prob), self.b)
# Same as original
output = K.dot(x, self.W)
if self.bias:
output += self.b
return self.activation(output)
class DropConnect(Wrapper):
def __init__(self, layer, prob=1., **kwargs):
self.prob = prob
self.layer = layer
super(DropConnect, self).__init__(layer, **kwargs)
if 0. < self.prob < 1.:
self.uses_learning_phase = True
def build(self, input_shape):
if not self.layer.built:
self.layer.build(input_shape)
self.layer.built = True
super(DropConnect, self).build()
def compute_output_shape(self, input_shape):
return self.layer.compute_output_shape(input_shape)
def call(self, x):
if 0. < self.prob < 1.:
self.layer.kernel = K.in_train_phase(K.dropout(self.layer.kernel, self.prob) * (1-self.prob), self.layer.kernel)
self.layer.bias = K.in_train_phase(K.dropout(self.layer.bias, self.prob) * (1-self.prob), self.layer.bias)
return self.layer.call(x)
#DropBlock2D(block_size=5, keep_prob=0.8, name='Dropout-1')
class DropBlock2D(Layer):
"""See: https://arxiv.org/pdf/1810.12890.pdf"""
def __init__(self,
block_size,
keep_prob,
sync_channels=False,
data_format=None,
**kwargs):
"""Initialize the layer.
:param block_size: Size for each mask block.
:param keep_prob: Probability of keeping the original feature.
:param sync_channels: Whether to use the same dropout for all channels.
:param data_format: 'channels_first' or 'channels_last' (default).
:param kwargs: Arguments for parent class.
"""
super(DropBlock2D, self).__init__(**kwargs)
self.block_size = block_size
self.keep_prob = keep_prob
self.sync_channels = sync_channels
self.data_format = K.normalize_data_format(data_format)
self.input_spec = InputSpec(ndim=4)
self.supports_masking = True
def get_config(self):
config = {'block_size': self.block_size,
'keep_prob': self.keep_prob,
'sync_channels': self.sync_channels,
'data_format': self.data_format}
base_config = super(DropBlock2D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def compute_mask(self, inputs, mask=None):
return mask
def compute_output_shape(self, input_shape):
return input_shape
def _get_gamma(self, height, width):
"""Get the number of activation units to drop"""
height, width = K.cast(height, K.floatx()), K.cast(width, K.floatx())
block_size = K.constant(self.block_size, dtype=K.floatx())
return ((1.0 - self.keep_prob) / (block_size ** 2)) *\
(height * width / ((height - block_size + 1.0) * (width - block_size + 1.0)))
def _compute_valid_seed_region(self, height, width):
positions = K.concatenate([
K.expand_dims(K.tile(K.expand_dims(K.arange(height), axis=1), [1, width]), axis=-1),
K.expand_dims(K.tile(K.expand_dims(K.arange(width), axis=0), [height, 1]), axis=-1),
], axis=-1)
half_block_size = self.block_size // 2
valid_seed_region = K.switch(
K.all(
K.stack(
[
positions[:, :, 0] >= half_block_size,
positions[:, :, 1] >= half_block_size,
positions[:, :, 0] < height - half_block_size,
positions[:, :, 1] < width - half_block_size,
],
axis=-1,
),
axis=-1,
),
K.ones((height, width)),
K.zeros((height, width)),
)
return K.expand_dims(K.expand_dims(valid_seed_region, axis=0), axis=-1)
def _compute_drop_mask(self, shape):
height, width = shape[1], shape[2]
mask = K.random_binomial(shape, p=self._get_gamma(height, width))
mask *= self._compute_valid_seed_region(height, width)
mask = MaxPool2D(
pool_size=(self.block_size, self.block_size),
padding='same',
strides=1,
data_format='channels_last',
)(mask)
return 1.0 - mask
def call(self, inputs, training=None):
def dropped_inputs():
outputs = inputs
if self.data_format == 'channels_first':
outputs = K.permute_dimensions(outputs, [0, 2, 3, 1])
shape = K.shape(outputs)
if self.sync_channels:
mask = self._compute_drop_mask([shape[0], shape[1], shape[2], 1])
else:
mask = self._compute_drop_mask(shape)
outputs = outputs * mask *\
(K.cast(K.prod(shape), dtype=K.floatx()) / K.sum(mask))
if self.data_format == 'channels_first':
outputs = K.permute_dimensions(outputs, [0, 3, 1, 2])
return outputs
return K.in_train_phase(dropped_inputs, inputs, training=training)
def mix_up(data, one_hot_labels, alpha=1):
np.random.seed(2333)
batch_size = len(data)
weights = np.random.beta(alpha, alpha, batch_size)
index = np.random.permutation(batch_size)
x1, x2 = data, data[index]
x = np.array([x1[i] * weights [i] + x2[i] * (1 - weights[i]) for i in range(len(weights))])
y1 = np.array(one_hot_labels).astype(np.float)
y2 = np.array(np.array(one_hot_labels)[index]).astype(np.float)
y = np.array([y1[i] * weights[i] + y2[i] * (1 - weights[i]) for i in range(len(weights))])
return x, y
def noise(data):
"""
Adding White Noise.
"""
# you can take any distribution from
# https://docs.scipy.org/doc/numpy-1.13.0/reference/routines.random.html
# more noise reduce the value to 0.5
noise_amp = 0.05 * np.random.uniform() * np.amax(data)
data = data.astype('float64') + noise_amp * \
np.random.normal()
return data
def shift(data):
"""
Random Shifting.
"""
s_range = int(np.random.uniform(low=-5, high=5) * 1000) # default at 500
return np.roll(data, s_range)
def stretch(data, rate=0.8):
"""
Streching the Sound. Note that this expands the dataset slightly
"""
# keep the same length, drop some
data = librosa.effects.time_stretch(data, rate)[:len(data)]
return data
def pitch(data, sr=16000):
"""
Pitch Tuning.
"""
bins_per_octave = 12
pitch_pm = 2
pitch_change = pitch_pm * 2 * (np.random.uniform())
data = librosa.effects.pitch_shift(data.astype('float64'),
sr,
n_steps=pitch_change,
bins_per_octave=bins_per_octave)
return data
def dyn_change(data):
"""
Random Value Change.
"""
dyn_change = np.random.uniform(
low=-0.5, high=7) # default low = 1.5, high = 3
return data * dyn_change
def speed_npitch(data):
"""
speed and Pitch Tuning.
"""
# you can change low and high here
length_change = np.random.uniform(low=0.8, high=1)
speed_fac = 1.2 / length_change # try changing 1.0 to 2.0 ... =D
tmp = np.interp(
np.arange(
0, len(data), speed_fac), np.arange(
0, len(data)), data)
minlen = min(data.shape[0], tmp.shape[0])
data *= 0
data[0:minlen] = tmp[0:minlen]
return data
def makeT(cp):
# cp: [K x 2] control points
# T: [(K+3) x (K+3)]
K = cp.shape[0]
T = np.zeros((K+3, K+3))
T[:K, 0] = 1
T[:K, 1:3] = cp
T[K, 3:] = 1
T[K+1:, 3:] = cp.T
R = squareform(pdist(cp, metric='euclidean'))
R = R * R
R[R == 0] = 1 # a trick to make R ln(R) 0
R = R * np.log(R)
np.fill_diagonal(R, 0)
T[:K, 3:] = R
return T
def liftPts(p, cp):
# p: [N x 2], input points
# cp: [K x 2], control points
# pLift: [N x (3+K)], lifted input points
N, K = p.shape[0], cp.shape[0]
pLift = np.zeros((N, K+3))
pLift[:,0] = 1
pLift[:,1:3] = p
R = cdist(p, cp, 'euclidean')
R = R * R
R[R == 0] = 1
R = R * np.log(R)
pLift[:,3:] = R
return pLift
def spec_augment(spec):
W=40
T=30
F=13
mt=2
mf=2
# Nframe : number of spectrum frame
Nframe = spec.shape[1]
# Nbin : number of spectrum freq bin
Nbin = spec.shape[0]
# check input length
if Nframe < W*2+1:
W = int(Nframe/4)
if Nframe < T*2+1:
T = int(Nframe/mt)
if Nbin < F*2+1:
F = int(Nbin/mf)
# warping parameter initialize
w = random.randint(-W,W)
center = random.randint(W,Nframe-W)
src = np.asarray([[ float(center), 1], [ float(center), 0], [ float(center), 2], [0, 0], [0, 1], [0, 2], [Nframe-1, 0], [Nframe-1, 1], [Nframe-1, 2]])
dst = np.asarray([[ float(center+w), 1], [ float(center+w), 0], [ float(center+w), 2], [0, 0], [0, 1], [0, 2], [Nframe-1, 0], [Nframe-1, 1], [Nframe-1, 2]])
#print(src,dst)
# source control points
xs, ys = src[:,0],src[:,1]
cps = np.vstack([xs, ys]).T
# target control points
xt, yt = dst[:,0],dst[:,1]
# construct TT
TT = makeT(cps)
# solve cx, cy (coefficients for x and y)
xtAug = np.concatenate([xt, np.zeros(3)])
ytAug = np.concatenate([yt, np.zeros(3)])
cx = nl.solve(TT, xtAug) # [K+3]
cy = nl.solve(TT, ytAug)
# dense grid
x = np.linspace(0, Nframe-1,Nframe)
y = np.linspace(1,1,1)
x, y = np.meshgrid(x, y)
xgs, ygs = x.flatten(), y.flatten()
gps = np.vstack([xgs, ygs]).T
# transform
pgLift = liftPts(gps, cps) # [N x (K+3)]
xgt = np.dot(pgLift, cx.T)
spec_warped = np.zeros_like(spec)
for f_ind in range(Nbin):
spec_tmp = spec[f_ind,:]
func = interpolate.interp1d(xgt, spec_tmp,fill_value="extrapolate")
xnew = np.linspace(0, Nframe-1,Nframe)
spec_warped[f_ind,:] = func(xnew)
# sample mt of time mask ranges
t = np.random.randint(T-1, size=mt)+1
# sample mf of freq mask ranges
f = np.random.randint(F-1, size=mf)+1
# mask_t : time mask vector
mask_t = np.ones((Nframe,1))
ind = 0
t_tmp = t.sum() + mt
for _t in t:
k = random.randint(ind,Nframe-t_tmp)
mask_t[k:k+_t] = 0
ind = k+_t+1
t_tmp = t_tmp - (_t+1)
mask_t[ind:] = 1
# mask_f : freq mask vector
mask_f = np.ones((Nbin,1))
ind = 0
f_tmp = f.sum() + mf
for _f in f:
k = random.randint(ind,Nbin-f_tmp)
mask_f[k:k+_f] = 0
ind = k+_f+1
f_tmp = f_tmp - (_f+1)
mask_f[ind:] = 1
# calculate mean
mean = np.mean(spec_warped)
# make spectrum to zero mean
spec_zero = spec_warped-mean
spec_masked = ((spec_zero * mask_t.T) * mask_f) + mean
# spec_masked = ((spec_zero * mask_t).T * mask_f).T
return spec_warped
```
#### File: AutoSpeech2020/mysubmission/model_manager.py
```python
import gc
import numpy as np
from sklearn.metrics import roc_auc_score, accuracy_score
from tensorflow.python.keras import backend as K
from CONSTANT import CLASS_NUM, MODEL_FIRST_MAX_RUN_LOOP, FIRST_ROUND_DURATION, SECOND_ROUND_DURATION
from models import * # import all models and model_name constant
from models.crnn2d import Crnn2dModel
from models.crnn2d_larger import Crnn2dLargerModel
from models.crnn2d_vgg import Crnn2dVggModel
from models.my_classifier import Classifier
from models.cnn import CnnModel2D
from tools import log
def auc_metric(solution, prediction):
if solution.sum(axis=0).min() == 0:
return np.nan
auc = roc_auc_score(solution, prediction, average='macro')
return np.mean(auc * 2 - 1)
def acc_metric(solution, prediction):
if solution.sum(axis=0).min() == 0:
return np.nan
acc = accuracy_score(solution, prediction)
return acc
class ModelManager(Classifier):
def __init__(self,
meta,
data_manager,
keep_num=5,
each_model_keep_num=3,
each_model_top_k=2,
patience=3,
auc_threshold=0.5,
*args,
**kwargs):
self.metadata = meta
self._data_manager = data_manager
self._keep_num = keep_num
self._each_model_keep_num = each_model_keep_num
self._each_model_top_k = each_model_top_k
self._patience = patience
self._not_rise_num = 0
self._input_shape = None
self._model = None
self._model_name = None
self._last_model_name = None
self._cur_model_run_loop = 0
self._model_num = 0
self._model_idx = 0
self._round_num = 0
self._val_set = None
self._test_x = None
self._use_new_train = False
self._is_reset_model = False
self._use_mfcc = True
self._is_nedd_30s = False
self._use_mel_round = None
self._k_best_predicts = [-1] * self._keep_num
self._k_best_auc = [-1.1] * self._keep_num
self._each_model_best_predict = {}
self._each_model_best_auc = {}
self._cur_model_max_auc = -1
self._auc_threshold = auc_threshold
self._num_classes = self.metadata[CLASS_NUM]
self._model_lib = {
LR_MODEL: LogisticRegression,
LSTM_MODEL: LstmAttention,
CRNN_MODEL: CrnnModel,
CRNN2D_MODEL: Crnn2dModel,
CRNN2D_LARGER_MODEL: Crnn2dLargerModel,
CRNN2D_VGG_MODEL: Crnn2dVggModel,
BILSTM_MODEL: BilstmAttention,
CNN_MODEL_2D: CnnModel2D,
SVM_MODEL: SvmModel,
ATTGRU: AttentionGru
}
self._model_sequences = [
LR_MODEL,
CRNN_MODEL,
#CNN_MODEL_2D,
BILSTM_MODEL,
LSTM_MODEL
]
self._max_first_model_run_loop = MODEL_FIRST_MAX_RUN_LOOP
self._max_model_run_loop = 12
self._models = {}
def _get_or_create_model(self):
# use new model and not reset model, have to initialize the model
if not self._model.is_init:
log(f'get new model {self._model_name}')
# init model parameters
if self._model_name == CNN_MODEL_2D:
kwargs = {
'input_shape': self._input_shape[1:],
'num_classes': self.metadata[CLASS_NUM],
'max_layer_num': 10
}
elif self._model_name in [LSTM_MODEL, BILSTM_MODEL, CRNN_MODEL, CRNN2D_MODEL, CRNN2D_LARGER_MODEL,
CRNN2D_VGG_MODEL, ATTGRU]:
kwargs = {
'input_shape': self._input_shape[1:],
'num_classes': self.metadata[CLASS_NUM],
}
elif self._model_name == SVM_MODEL:
kwargs = {
'kernel': 'linear',
'max_iter': 1000
}
elif self._model_name == LR_MODEL:
kwargs = {
'kernel': 'liblinear',
'max_iter': 100
}
else:
raise Exception("No such model!")
if not self._model.is_init:
self._model.init_model(**kwargs)
log(f'This train loop use {self._model_name}, last train loop use {self._last_model_name}')
def _pre_select_model(self, train_loop_num):
self._last_model_name = self._model_name
if train_loop_num == 1 or self._model_name is None:
self._model_name = self._model_sequences[0]
self._each_model_best_auc[self._model_name] = [-1]
self._each_model_best_predict[self._model_name] = [-1]
self._use_new_train = True
if self._not_rise_num == self._patience \
or (self._model_num == 0 and self._cur_model_run_loop >= self._max_first_model_run_loop) \
or (self._round_num == 0 and self._cur_model_run_loop >= self._max_model_run_loop):
self._model_idx += 1
if self._model_idx == len(
self._model_sequences) and LR_MODEL in self._model_sequences:
# TODO be careful!
self._model_idx = 1
self._round_num += 1
if self._round_num > 1:
self._patience = 4
# sort model sequences by auc, desc
if not self._data_manager.crnn_first:
self._model_sequences = [self._model_sequences[0]] \
+ sorted(self._model_sequences[1:],
key=lambda x: self._each_model_best_auc[x][-1], reverse=True)
else:
self._model_sequences.remove(CRNN_MODEL)
self._model_sequences = [self._model_sequences[0]] + [CRNN_MODEL] \
+ sorted(self._model_sequences[1:],
key=lambda x: self._each_model_best_auc[x][-1], reverse=True)
log(
f'round {self._round_num} start, model sequences {self._model_sequences[self._model_idx:]}')
self._model_name = self._model_sequences[self._model_idx]
self._model_num += 1
self._not_rise_num = 0
log(
f'change model from {self._last_model_name} to {self._model_name}, loop_num: {self._cur_model_run_loop}')
self._use_new_train = self._model_num in [0,
1,
(2 * (len(self._model_sequences) - 1)) + 1,
(3 * (len(self._model_sequences) - 1)) + 1,
(4 * (len(self._model_sequences) - 1)) + 1]
self._is_reset_model = (self._round_num > 1
and self._model_num == self._round_num * (len(self._model_sequences) - 1) + 1)
if self._use_new_train:
self._test_x = None
self._cur_model_run_loop = 0
if self._round_num == 0 and self._cur_model_run_loop == 0:
self._each_model_best_auc[self._model_name] = [-1]
self._each_model_best_predict[self._model_name] = [-1]
self._cur_model_max_auc = -1
elif self._round_num == 1 and self._cur_model_run_loop == 0:
self._cur_model_max_auc = self._each_model_best_auc[self._model_name][-1]
elif self._round_num >= 2 and self._cur_model_run_loop == 0:
self._each_model_best_auc[self._model_name] += [-1]
self._each_model_best_predict[self._model_name] += [-1]
self._cur_model_max_auc = -1
if self._is_reset_model:
log(f'new round {self._round_num}')
# clear all models
self._models.clear()
del self._model
self._model = None
gc.collect()
K.clear_session()
# self._new_round = False
if self._model_name != self._last_model_name or self._model is None or self._is_reset_model:
if self._model_name in self._models:
self._model = self._models[self._model_name]
else:
self._model = self._model_lib[self._model_name]()
self._models[self._model_name] = self._model
def _get_each_model_top_k_predicts(self):
predicts = []
for k, v in self._each_model_best_auc.items():
if k == LR_MODEL:
continue
k_predicts = np.asarray(self._each_model_best_predict[k])
temp = [(auc, k_predicts[i]) for i, auc in enumerate(v)
if auc > max(self._auc_threshold, self._k_best_auc[0] - 0.1)]
temp.sort(key=lambda x: x[0], reverse=True)
predicts.extend(temp[:self._each_model_top_k])
if len(predicts) == 0:
return [], []
predicts = sorted(predicts, key=lambda x: x[0], reverse=True)[
:self._each_model_keep_num]
top_k_aucs = [predicts[i][0] for i in range(len(predicts))]
top_k_predicts = [predicts[i][1] for i in range(len(predicts))]
return top_k_aucs, top_k_predicts
def _blending_ensemble(self):
selected_k_best = [self._k_best_predicts[i]
for i, a in enumerate(self._k_best_auc) if a > 0.0]
each_model_k_aucs, selected_each_model_k_best = self._get_each_model_top_k_predicts()
if self._round_num >= 2:
selected = selected_k_best + selected_each_model_k_best
else:
selected = selected_k_best
log(f"model_num: {self._model_num} Select k best {self._keep_num} predicts which have auc {self._k_best_auc}, "
f"each model {self._each_model_keep_num} best which have auc {each_model_k_aucs}, "
f"and each previous model's best predict which have auc "
f"{[f'({k}:{v})' for k, v in self._each_model_best_auc.items()]} ")
return np.mean(selected, axis=0)
@property
def data_manager(self):
return self._data_manager
def fit(self, train_loop_num=1, **kwargs):
# select model first, inorder to use preprocess data method
self._pre_select_model(train_loop_num)
log(f'fit {self._model_name} for {self._cur_model_run_loop} times')
self._cur_model_run_loop += 1
# get data
if self._round_num == 0:
train_x, train_y, val_x, val_y = self._data_manager.get_train_data(train_loop_num=train_loop_num,
model_num=self._model_num,
round_num=self._round_num,
use_new_train=self._use_new_train,
use_mfcc=self._use_mfcc)
self._is_nedd_30s = self._data_manager.need_30s
if self._is_nedd_30s:
self._use_mel_round = 3
else:
self._use_mel_round = 2
else:
if self._round_num == self._use_mel_round:
self._use_mfcc = False
else:
self._use_mfcc = True
train_x, train_y, val_x, val_y = self._data_manager.get_train_data(train_loop_num=train_loop_num,
model_num=self._model_num,
round_num=self._round_num,
use_new_train=self._use_new_train,
use_mfcc=self._use_mfcc)
self._val_set = (val_x, val_y)
self._input_shape = train_x.shape
log(f'train_x: {train_x.shape}; train_y: {train_y.shape};'
f' val_x: {val_x.shape}; val_y: {val_y.shape};')
# init model really
self._get_or_create_model()
self._model.fit(train_x, train_y, (val_x, val_y),
self._round_num, **kwargs)
def predict(self, test_x, is_final_test_x=False):
x_val, y_val = self._val_set
auc = auc_metric(y_val, self._model.predict(x_val))
need_predict = False
dif_score = 0.1
if auc > self._cur_model_max_auc:
log(
f'cur_max_auc {self._cur_model_max_auc}; cur_auc {auc}; {self._model_name} auc rise for {self._cur_model_run_loop} times')
self._cur_model_max_auc = auc
if self._round_num == 0:
self._not_rise_num = max(0, self._not_rise_num - 1)
else:
self._not_rise_num = 0
if auc > self._each_model_best_auc[LR_MODEL][-1] - dif_score:
need_predict = True
else:
self._not_rise_num += 1
log(
f'cur_max_auc {self._cur_model_max_auc}; cur_auc {auc}; {self._model_name} auc not rise for {self._not_rise_num} times')
if max(self._k_best_auc[-1], self._each_model_best_auc[LR_MODEL]
[-1] - dif_score) >= auc and not need_predict:
log('not predict')
else:
log(f'new predict')
if is_final_test_x:
if self._test_x is None:
if self._model_num == 0:
self._test_x = self._data_manager.lr_preprocess(test_x)
elif self._round_num == 0:
self._test_x = self._data_manager.nn_preprocess(test_x,
n_mfcc=96,
max_duration=FIRST_ROUND_DURATION,
is_mfcc=self._use_mfcc)
else:
self._test_x = self._data_manager.nn_preprocess(test_x,
n_mfcc=128,
max_duration=SECOND_ROUND_DURATION,
is_mfcc=self._use_mfcc)
if self._round_num > 1:
y_pred = self._model.predict(self._test_x, batch_size=32)
else:
y_pred = self._model.predict(self._test_x, batch_size=32 * 8)
if self._k_best_auc[-1] < auc and auc > self._each_model_best_auc[LR_MODEL][-1] - dif_score:
self._k_best_predicts[-1] = y_pred
self._k_best_auc[-1] = auc
if self._each_model_best_auc[self._model_name][-1] < auc:
self._each_model_best_predict[self._model_name][-1] = y_pred
self._each_model_best_auc[self._model_name][-1] = auc
i = 0
for auc, pred in sorted(
zip(self._k_best_auc, self._k_best_predicts), key=lambda x: x[0], reverse=True):
self._k_best_auc[i] = auc
self._k_best_predicts[i] = pred
i += 1
self._use_new_train = False
self._is_reset_model = False
return self._blending_ensemble()
```
#### File: mysubmission/resnet_utils/resnet_data_utils.py
```python
import librosa
import numpy as np
import multiprocessing
from multiprocessing import Pool
from itertools import repeat
NCPU = multiprocessing.cpu_count()
def extend_wav(wav, train_wav_len=40000, test_wav_len=40000, mode='train'):
if mode == 'train':
div, mod = divmod(train_wav_len, wav.shape[0])
extended_wav = np.concatenate([wav]*div+[wav[:mod]])
if np.random.random() < 0.3:
extended_wav = extended_wav[::-1]
return extended_wav
else:
div, mod = divmod(test_wav_len, wav.shape[0])
extended_wav = np.concatenate([wav]*div+[wav[:mod]])
return extended_wav
def lin_spectogram_from_wav(wav, hop_length, win_length, n_fft=1024):
linear = librosa.stft(wav, n_fft=n_fft, win_length=win_length, hop_length=hop_length) # linear spectrogram
return linear.T
def load_data(mag, train_spec_len=250, test_spec_len=250, mode='train'):
freq, time = mag.shape
if mode == 'train':
if time-train_spec_len > 0:
randtime = np.random.randint(0, time-train_spec_len)
spec_mag = mag[:, randtime:randtime+train_spec_len]
else:
spec_mag = mag[:, :train_spec_len]
else:
spec_mag = mag[:, :test_spec_len]
mu = np.mean(spec_mag, 0, keepdims=True)
std = np.std(spec_mag, 0, keepdims=True)
return (spec_mag - mu) / (std + 1e-5)
def wav_to_mag(wav, params, win_length=400, hop_length=160, n_fft=512):
mode = params["mode"]
wav = extend_wav(wav, params["train_wav_len"], params["test_wav_len"], mode=mode)
linear_spect = lin_spectogram_from_wav(wav, hop_length, win_length, n_fft)
mag, _ = librosa.magphase(linear_spect)
mag_T = mag.T
if mode == 'test':
mag_T = load_data(mag_T, params["train_spec_len"], params["test_spec_len"], mode)
return mag_T
def pre_trans_wav_update(wav_list, params):
print(f"pre_trans_wav len={len(wav_list)}")
if len(wav_list) == 0:
return []
elif len(wav_list) > NCPU * 10:
with Pool(NCPU) as pool:
mag_arr = pool.starmap(wav_to_mag, zip(wav_list, repeat(params)))
pool.close()
pool.join()
return mag_arr
else:
mag_arr = [wav_to_mag(wav, params) for wav in wav_list]
return mag_arr
```
#### File: 279632990/AutoSpeech2020/run_local_test.py
```python
VERISION = "v20190505"
DESCRIPTION = \
"""This script allows participants to run local test of their method
within the
downloaded starting kit folder (and avoid using submission quota on CodaLab). To
do this, run:
```
python run_local_test.py -dataset_dir=./sample_data/miniciao
-code_dir=./code_submission/
```
in the starting kit directory. If you want to test the performance of a
different algorithm on a different dataset, please specify them using respective
arguments.
If you want to use default folders (i.e. those in above command line), simply
run
```
python run_local_test.py
```
"""
# ALL INFORMATION, SOFTWARE, DOCUMENTATION, AND DATA ARE PROVIDED "AS-IS".
# ISABELLE GUYON, CHALEARN, AND/OR OTHER ORGANIZERS OR CODE AUTHORS DISCLAIM
# ANY EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ANY PARTICULAR PURPOSE, AND THE
# WARRANTY OF NON-INFRINGEMENT OF ANY THIRD PARTY'S INTELLECTUAL PROPERTY
# RIGHTS.
# IN NO EVENT SHALL ISABELLE GUYON AND/OR OTHER ORGANIZERS BE LIABLE FOR ANY
# SPECIAL,
# INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER ARISING OUT OF
# OR IN
# CONNECTION WITH THE USE OR PERFORMANCE OF SOFTWARE, DOCUMENTS, MATERIALS,
# PUBLICATIONS, OR INFORMATION MADE AVAILABLE FOR THE CHALLENGE.
################################################################################
# Verbosity level of logging.
# Can be: NOTSET, DEBUG, INFO, WARNING, ERROR, CRITICAL
verbosity_level = 'INFO'
import logging
import os
import tensorflow as tf
import time
import shutil # for deleting a whole directory
import webbrowser
from multiprocessing import Process
logging.basicConfig(
level=getattr(logging, verbosity_level),
format='%(asctime)s %(levelname)s %(filename)s: %(message)s',
datefmt='%Y-%m-%d %H:%M:%S'
)
def _HERE(*args):
h = os.path.dirname(os.path.realpath(__file__))
return os.path.join(h, *args)
def get_path_to_ingestion_program(starting_kit_dir):
return os.path.join(starting_kit_dir,
'ingestion', 'ingestion.py')
def get_path_to_scoring_program(starting_kit_dir):
return os.path.join(starting_kit_dir,
'scoring', 'score.py')
def remove_dir(output_dir):
"""Remove the directory `output_dir`.
This aims to clean existing output of last run of local test.
"""
if os.path.isdir(output_dir):
logging.info("Cleaning existing output directory of last run: {}" \
.format(output_dir))
shutil.rmtree(output_dir)
def get_basename(path):
if len(path) == 0:
return ""
if path[-1] == os.sep:
path = path[:-1]
return path.split(os.sep)[-1]
def run_baseline(dataset_dir, code_dir, time_budget=7200):
# Current directory containing this script
starting_kit_dir = os.path.dirname(os.path.realpath(__file__))
path_ingestion = get_path_to_ingestion_program(starting_kit_dir)
path_scoring = get_path_to_scoring_program(starting_kit_dir)
# Run ingestion and scoring at the same time
command_ingestion = \
"python {} --dataset_dir={} --code_dir={} --time_budget={}"\
.format(path_ingestion, dataset_dir, code_dir, time_budget)
command_scoring = \
'python {} --solution_dir={}' \
.format(path_scoring, dataset_dir)
def run_ingestion():
os.system(command_ingestion)
def run_scoring():
os.system(command_scoring)
ingestion_process = Process(name='ingestion', target=run_ingestion)
scoring_process = Process(name='scoring', target=run_scoring)
ingestion_output_dir = os.path.join(starting_kit_dir,
'ingestion_output')
score_dir = os.path.join(starting_kit_dir,
'scoring_output')
remove_dir(ingestion_output_dir)
remove_dir(score_dir)
ingestion_process.start()
scoring_process.start()
detailed_results_page = os.path.join(starting_kit_dir,
'scoring_output',
'detailed_results.html')
detailed_results_page = os.path.abspath(detailed_results_page)
# Open detailed results page in a browser
time.sleep(2)
for i in range(30):
if os.path.isfile(detailed_results_page):
webbrowser.open('file://' + detailed_results_page, new=2)
break
time.sleep(1)
if __name__ == '__main__':
default_starting_kit_dir = _HERE()
# The default dataset is 'miniciao' under the folder sample_data/
default_dataset_dir = os.path.join(default_starting_kit_dir,
'sample_data', 'test_data1')
default_code_dir = os.path.join(default_starting_kit_dir,
'code_submission')
default_time_budget = 1200
tf.flags.DEFINE_string('dataset_dir', default_dataset_dir,
"Directory containing the content (e.g. "
"adult.data/ + "
"adult.solution) of an AutoDL dataset. Specify this "
"argument if you want to test on a different "
"dataset.")
tf.flags.DEFINE_string('code_dir', default_code_dir,
"Directory containing a `model.py` file. Specify "
"this "
"argument if you want to test on a different "
"algorithm."
)
tf.flags.DEFINE_float('time_budget', default_time_budget,
"Time budget for model train/predict if not "
"specified in meta.json")
FLAGS = tf.flags.FLAGS
dataset_dir = FLAGS.dataset_dir
code_dir = FLAGS.code_dir
time_budget = FLAGS.time_budget
logging.info("#" * 50)
logging.info("Begin running local test using")
logging.info("code_dir = {}".format(get_basename(code_dir)))
logging.info("dataset_dir = {}".format(get_basename(dataset_dir)))
logging.info("#" * 50)
run_baseline(dataset_dir, code_dir, time_budget)
``` |
{
"source": "27Anurag/minesweeper_model",
"score": 3
} |
#### File: minesweeper_model/minesweeper_model/str_input_to_mine_coords.py
```python
def str_input_to_mine_coords(input_string):
mine_coord=[]
x_len,y_len=0,0
temp=input_string.split("\n")
x_len,y_len=len(temp[0]),len(temp)
x_mine,y_mine=0,0
for i in temp:
x_mine=0
for s in i:
if(s=='x'):
mine_coord.append((x_mine,y_mine))
x_mine+=1
y_mine+=1
return (x_len, y_len, mine_coord)
```
#### File: minesweeper_model/minesweeper_model/utility.py
```python
def surrounding_tiles(tile_x, tile_y, remove_outside_tiles=False):
"""Return the 8 tiles surrounding the given tile.
Parameters:
tile_x: X coord of tile
tile_y: Y coord of tile
Returns:
List of two tuples (x, y) of surrounding tiles.
The list will exclude the given tile itself.
The list can potentially contain -ve coordinates.
"""
# Loops generate all surrounding tiles and the tile itself
tiles = [(x, y) for x in [tile_x - 1, tile_x, tile_x + 1]
for y in [tile_y - 1, tile_y, tile_y + 1]]
# Remove the tile itself to only leave surrounding tiles
tiles.remove((tile_x, tile_y))
if remove_outside_tiles:
tiles = [(x, y) for (x, y) in tiles if x >= 0 and y >= 0]
return tiles
```
#### File: minesweeper_model/tests/test_field.py
```python
import unittest
from minesweeper_model import field
class TestField(unittest.TestCase):
def test_are_coords_valid(self):
f = field.Field(4, 4)
self.assertTrue(f.are_coords_valid(3, 2))
self.assertFalse(f.are_coords_valid(5, 2))
def test_render(self):
mines = {(0, 0), (3, 1), (1, 3)}
f = field.Field(4, 4, mines)
expected = "x...\n...x\n....\n.x..\n"
self.assertEqual(f.render(), expected)
expected = "@ooo\nooo@\noooo\no@oo\n"
self.assertEqual(f.render("o", "@"), expected)
```
#### File: minesweeper_model/tests/test_player_field.py
```python
import unittest
from minesweeper_model import field
class TestPlayerField(unittest.TestCase):
def setUp(self):
self.field1 = field.PlayerField(field.Field(4, 4))
self.field2 = field.PlayerField(field.Field(4, 4))
self.field2.field.mine_coords = {(0, 0), (1, 0)}
self.field2.open_coords = {(1, 1), (0, 1)}
self.field2.flag_coords = {(0, 0), (2, 1)}
self.field2.hints = {(0, 0): -1, (0, 1): 2, (0, 2): 0, (0, 3): 0,
(1, 0): -1, (1, 1): 2, (1, 2): 0, (1, 3): 0,
(2, 0): 1, (2, 1): 1, (2, 2): 0, (2, 3): 0,
(3, 0): 0, (3, 1): 0, (3, 2): 0, (3, 3): 0}
field3_mines = {(2, 0), (2, 2), (6, 3), (1, 5), (5, 6)}
self.field3 = field.PlayerField(field.Field(8, 7, field3_mines))
def test_tile(self):
actual = self.field2.tile(1, 1)
expected = {"hint": 2, "flag": False}
self.assertEqual(actual, expected)
actual = self.field2.tile(0, 0)
# Hint is None as the mine is not open
expected = {"hint": None, "flag": True}
self.assertEqual(actual, expected)
actual = self.field2.tile(1, 0)
expected = {"hint": None, "flag": False}
self.assertEqual(actual, expected)
def test_nine_tiles(self):
# .: closed tile
# [0-9]: open tile, hint
# x: mine
# !: flag
# ----------
# ! . . .
# 2 2 . .
# x x . .
# . . . .
actual = self.field2.nine_tiles(0, 1)
# Expect to exclude tiles outside the field.
expected = {(0, 0): {"hint": None, "flag": True},
(0, 2): {"hint": None, "flag": False},
(1, 0): {"hint": None, "flag": False},
(1, 1): {"hint": 2, "flag": False},
(1, 2): {"hint": None, "flag": False},
(0, 1): {"hint": 2, "flag": False}}
self.assertEqual(actual, expected)
def test_open_tile(self):
self.field1.field.mine_coords = {(0, 0)}
# Tile without mine
self.assertTrue(self.field1.open_tile(1, 0))
self.assertEqual(self.field1.open_coords, {(1, 0)})
# Tile with mine
self.assertFalse(self.field1.open_tile(0, 0))
self.assertEqual(self.field1.open_coords, {(1, 0)})
def test_open_tile_open_adjacent_tiles(self):
# field3
# mines: hints: expected open:
#
# ..x..... 01x10000 ..x+++++
# ........ 02220000 ...+++++
# ..x..... 01x10111 ..x+++++
# ......x. 011101x1 ...+++x.
# ........ 11100111 ..++++..
# .x...... 1x101110 .x+++...
# .....x.. 11101x10 ..+++x..
self.field3.open_tile(5, 1, True)
# expect adjacent 0 hint tiles to open recursively until a non-zero hint tile
# is found, when the adjacent tiles are opened the last time and algorithm stops.
expected_open_coords = {(3, 0), (4, 0), (5, 0), (6, 0), (7, 0),
(3, 1), (4, 1), (5, 1), (6, 1), (7, 1),
(3, 2), (4, 2), (5, 2), (6, 2), (7, 2),
(2, 3), (3, 3), (4, 3), (5, 3),
(2, 4), (3, 4), (4, 4), (5, 4),
(2, 5), (3, 5), (4, 5), (5, 5),
(2, 6), (3, 6), (4, 6)}
self.assertEqual(self.field3.open_coords, expected_open_coords)
def test_toggle_flag(self):
self.field1.toggle_flag(0, 1)
self.assertEqual(self.field1.flag_coords, {(0, 1)})
self.field1.flag_coords = {(2, 0)}
self.field1.toggle_flag(2, 0)
self.assertEqual(self.field1.flag_coords, set())
self.assertRaises(ValueError, self.field1.toggle_flag, 5, 5)
def test_traverse_tiles(self):
# ..x..... 01x10000
# .....~.. 02220~00
# ..x..... 01x10111
# ......x. 011101x1
# ........ 11100111
# .x...... 1x101110
# .....x.. 11101x10
# Adjacent 0: [(4, 0), (4, 1), (4, 2), (5, 0), (6, 0), (6, 1)]
def should_visit(x, y, player_field):
return player_field.hints[(x, y)] == 0
actual = self.field3.traverse_tiles(0, 0, should_visit)
expected = {(0, 0), (0, 1), (0, 2), (0, 3)}
self.assertEqual(actual, expected)
def test_render(self):
mines = {(0, 0), (3, 1), (0, 3)}
f = field.Field(4, 4, mines)
pf = field.PlayerField(f)
pf.flag_coords = {(1, 1)}
pf.open_coords = {(0, 1), (3, 3)}
pf.hints = {(0, 1): 1, (3, 3): 0}
expected = "....\n1!..\n....\n...0\n"
self.assertEqual(pf.render(), expected)
expected = "oooo\n1?oo\noooo\nooo0\n"
self.assertEqual(pf.render("?", "o"), expected)
``` |
{
"source": "27Cobalter/vrc_log_viewer",
"score": 3
} |
#### File: 27Cobalter/vrc_log_viewer/vrc_log_viewer.py
```python
import glob
import os
import re
import sys
import time
import yaml
def tail(thefile, past):
if not past:
thefile.seek(0, 2)
while True:
line = thefile.readline()
if not line:
time.sleep(0.5)
continue
line = line.rstrip("\n").rstrip("\r")
if line != "":
yield repr(line)[1:-1]
if __name__ == "__main__":
with open("config.yml", "r") as config:
conf = yaml.load(config, Loader=yaml.SafeLoader)
print("load config")
reg = []
for pattern in conf["reg"]:
print(" " + pattern)
reg.append(re.compile(pattern))
vrcdir = os.environ["USERPROFILE"] + "\\AppData\\LocalLow\\VRChat\\VRChat\\"
logfile = vrcdir + conf["logfile"]
if len(sys.argv) > 1:
logfile = sys.argv[1]
if logfile == vrcdir:
logfiles = glob.glob(vrcdir + "output_log_*.txt")
logfiles.sort(key=os.path.getctime, reverse=True)
logfile = logfiles[0]
with open(logfile, "r", encoding="utf-8") as f:
print("open logfile : ", logfile)
loglines = tail(f, conf["past"])
for line in loglines:
for pattern in reg:
match = pattern.match(line)
if not match:
continue
message = ""
for group in match.groups():
message = message + group + " "
print(message)
``` |
{
"source": "27Cobalter/vrc_meta_tool",
"score": 3
} |
#### File: 27Cobalter/vrc_meta_tool/vrc_meta_editor.py
```python
import datetime
import sys
from struct import pack
from zlib import crc32
class MetaData:
FORMAT_DATE_USER_INPUT = "%Y-%m-%d %H:%M:%S"
FORMAT_DATE_RAW_DATA = "%Y%m%d%H%M%S"
def __init__(self):
self.date = ""
self.photographer = ""
self.world = ""
self.users = []
# PNG ファイルシグネチャ
self.other_data = b"\x89\x50\x4E\x47\x0D\x0A\x1A\x0A"
def validate_date_format(self, str_date):
try:
datetime.datetime.strptime(str_date, self.FORMAT_DATE_USER_INPUT)
return True
except ValueError:
return False
def update_date(self, strdate):
date = datetime.datetime.strptime(strdate, self.FORMAT_DATE_USER_INPUT)
strdate = datetime.datetime.strftime(date, self.FORMAT_DATE_RAW_DATA) + "000"
self.date = strdate
def update_photographer(self, photographer):
self.photographer = photographer
def update_world(self, world):
self.world = world
def add_user(self, user):
self.users.append(user)
def update_user(self, index, vrcname, twitterid):
user = self.users[index].rsplit(" : ", 1)
# vrcnameまたはtwitteridが空文字のときは変更しない
if vrcname != "":
user[0] = vrcname
if twitterid != "":
if len(user) == 1:
user.append(twitterid)
else:
user[1] = twitterid
self.users[index] = " : ".join(user)
def delete_users(self, delete_indexes):
deleted_user = []
for i in reversed(delete_indexes):
deleted_user.append(self.users.pop(i))
# 削除したユーザをリスト順で出したい
return reversed(deleted_user)
def sort_users(self):
self.users.sort()
def print_users(self):
for i, user in enumerate(self.users):
print("{0:2d} {1:s}".format(i, user))
def print(self):
print("-" * 80)
print(
"Date:",
datetime.datetime.strptime(self.date[:-3], self.FORMAT_DATE_RAW_DATA),
)
print("Photographer:", self.photographer)
print("World:", self.world)
for user in self.users:
print("User:", user)
print("-" * 80)
class ChunkUtils:
# pngチャンク関連関数
def chunk_iter(self, data):
total_length = len(data)
end = 4
while end + 8 < total_length:
length = int.from_bytes(data[end + 4 : end + 8], "big")
chunk_type = end + 8
chunk_data = chunk_type + 4
end = chunk_data + length
yield (data[chunk_type:chunk_data], data[chunk_data:end])
def chunk(self, name, data):
return pack("!I4s%dsI" % len(data), len(data), name, data, crc32(name + data))
def write(self, file_name, metadata):
metadata.users.sort()
with open(file_name, "w+b") as f:
f.write(metadata.other_data)
f.write(self.chunk(b"vrCd", metadata.date.encode("utf-8")))
f.write(self.chunk(b"vrCp", metadata.photographer.encode("utf-8")))
f.write(self.chunk(b"vrCw", metadata.world.encode("utf-8")))
for user in metadata.users:
f.write(self.chunk(b"vrCu", user.encode("utf-8")))
f.write(self.chunk(b"IEND", b""))
def parse_number(user_input, length):
words = user_input.split()
exclude_indexes = set()
for word in words:
invert = False
# 先頭が^なら否定
if word[0] == "^":
invert = True
word = word[1:]
ranges = word.split("-", 1)
num1 = int(ranges[0])
num2 = num1
if len(ranges) == 2:
num2 = int(ranges[1])
if num1 > num2:
num1, num2 = num2, num1
indexes = set(range(num1, num2 + 1))
if invert:
indexes = set(range(length)) - indexes
exclude_indexes = exclude_indexes | indexes
# 範囲超えないように
exclude_indexes = exclude_indexes & set(range(length))
exclude_indexes = sorted(exclude_indexes)
return exclude_indexes
def main(args):
if len(args) == 1:
print("Usage: vrc_meta_editor.py file\r\n")
return
image_path = args[1]
data = None
metadata = MetaData()
chunkutils = ChunkUtils()
with open(image_path, "rb") as f:
data = f.read()
assert data[:8] == b"\x89\x50\x4E\x47\x0D\x0A\x1A\x0A"
# 画像のデータをmetadataに変換
for chunk_type, chunk_data in chunkutils.chunk_iter(data):
if chunk_type == b"vrCd":
metadata.date = chunk_data.decode("utf-8")
elif chunk_type == b"vrCp":
metadata.update_photographer(chunk_data.decode("utf-8"))
elif chunk_type == b"vrCw":
metadata.update_world(chunk_data.decode("utf-8"))
elif chunk_type == b"vrCu":
metadata.add_user(chunk_data.decode("utf-8"))
elif chunk_type != b"IEND":
# vrc_meta_toolで使っていないチャンクはIENDを除いてそのまま保存
metadata.other_data = metadata.other_data + chunkutils.chunk(
chunk_type, chunk_data
)
metadata.sort_users()
metadata.print()
while True:
mode = input("Add/Update/Delete/Print/Quit [a/u/d/p/q] ")
if len(mode) == 0:
continue
mode_initial = mode[0].lower()
if mode_initial == "a":
# ユーザ追加
vrcname = input("VRCName: ")
if vrcname == "":
continue
twitterid = input('TwitterId: (eg:"@Twitter")(optional): ')
user_name = vrcname
if twitterid != "":
user_name = vrcname + " : " + twitterid
metadata.add_user(user_name)
metadata.sort_users()
chunkutils.write(image_path, metadata)
elif mode_initial == "u":
item = input("Date/Photographer/World/VRCName/TwitterID [d/p/w/v/t] ")
if len(item) == 0:
continue
item_initial = item[0].lower()
if item_initial == "d":
# 撮影日時変更
valid = False
while not valid:
date = input('New Date(eg: "2018-05-10 18:52:00": ')
valid = metadata.validate_date_format(date)
if not valid:
print("invalid date format. expected YYYY-MM-DD HH:mm:ss")
metadata.update_date(date)
chunkutils.write(image_path, metadata)
elif item_initial == "p":
# 撮影者変更
photographer = input("New Photographer: ")
metadata.update_photographer(photographer)
chunkutils.write(image_path, metadata)
elif item_initial == "w":
# 撮影ワールド変更
world = input("New World: ")
metadata.update_world(world)
chunkutils.write(image_path, metadata)
elif item_initial == "v":
# VRCNameの変更
metadata.print_users()
index = input("Select user: ")
user_name = input("New VRCName: ")
metadata.update_user(int(index), user_name, "")
metadata.sort_users
chunkutils.write(image_path, metadata)
elif item_initial == "t":
# TwitterIDの変更
metadata.print_users()
index = input("Select user: ")
twitterid = input('New TwitterID: (eg:"@Twitter"):')
metadata.update_user(int(index), "", twitterid)
chunkutils.write(image_path, metadata)
elif mode_initial == "d":
# ユーザ削除
metadata.print_users()
user_input = input('Select User (eg:"1 2 3", "1-3", "^4"): ')
# 削除するインデックスの配列
delete_indexes = parse_number(user_input, len(metadata.users))
deleted_user = metadata.delete_users(delete_indexes)
for user in deleted_user:
print("delete :", user)
chunkutils.write(image_path, metadata)
elif mode in {"p", "P", "print", "Print"}:
metadata.print()
elif mode in {"q", "Q", "quit", "Quit"}:
break
if __name__ == "__main__":
main(sys.argv)
``` |
{
"source": "27pirateking/empoweredautoparts-scraper",
"score": 3
} |
#### File: empoweredautoparts-scraper/empoweredautoparts/scraping_utility.py
```python
def preprocess_descrip_array(des_array):
remove_index=-1
for index in range(0,len(des_array)):
if "Rotors are a direct bolt on item".lower() in des_array[index].lower():
remove_index = index
continue
if "Disc wear is reduced by up to 50% with".lower() in des_array[index].lower():
remove_index = index
continue
if "Empowered Auto Parts is an authorised".lower() in des_array[index].lower():
remove_index = index
if remove_index > 0:
des_array = des_array[0:remove_index]
return des_array
def process_description(des_item, descrip_html):
descrip_html = preprocess_descrip_array(descrip_html)
index = 0
compatible_models=-1
features=-1
product_highlights=-1
you_are_buying=-1
product_specifications=-1
for aa in descrip_html:
index+= 1
if "COMPATIBLE MODELS".lower() in aa.lower():
compatible_models = index-1
continue
if "FEAURES".lower() in aa.lower():
features = index-1
continue
if "PRODUCT HIGHLIGHTS".lower() in aa.lower():
product_highlights = index-1
continue
if "YOU ARE BUYING".lower() in aa.lower():
you_are_buying = index-1
continue
if "PRODUCT SPECIFICATIONS".lower() in aa.lower():
product_specifications = index-1
continue
descrips = [compatible_models,features,product_highlights,you_are_buying,product_specifications]
descrips.append(len(descrip_html))
descrips.sort()
keys = [i for i in descrips if i >= 0]
iter_keys = iter(keys)
des_item['description'] = ''.join(descrip_html[0:next(iter_keys)])
last_index = compatible_models + 1
if compatible_models >= 0:
next_last_index = next(iter_keys)
des_item['compatible_models'] = ''.join(descrip_html[last_index:next_last_index])
last_index=next_last_index + 1
else:
des_item['compatible_models'] = '.'
if features >= 0:
next_last_index = next(iter_keys)
des_item['features'] = ''.join(descrip_html[last_index:next_last_index])
last_index=next_last_index + 1
else:
des_item['features'] = '.'
if product_highlights >= 0:
next_last_index = next(iter_keys)
des_item['product_highlights'] = ''.join(descrip_html[last_index:next_last_index])
last_index=next_last_index + 1
else:
des_item['product_highlights'] = '.'
if you_are_buying >= 0:
next_last_index = next(iter_keys)
des_item['you_are_buying'] = ''.join(descrip_html[last_index:next_last_index])
last_index=next_last_index + 1
else:
des_item['you_are_buying'] = '.'
if product_specifications >= 0:
next_last_index = next(iter_keys)
des_item['product_specifications'] = ''.join(descrip_html[last_index:next_last_index])
else:
des_item['product_specifications'] = '.'
return des_item
``` |
{
"source": "27pirateking/maximum-entropy-method",
"score": 3
} |
#### File: maximum-entropy-method/Utilities/extract.py
```python
import linecache
import numpy as np
import os
class extract(object):
"""
This object is used to extract the data from DCA monte carlo code.
"""
def __init__(self, filename = '7_G(K,w,b,b)', numSamples = 1000):
"""
"""
self.keys = ['[0,0]', '[0,-pi]','[pi,0-]','[pi,-pi]']
self.numSamples = numSamples
self.readAllFiles(filename)
self.writeFiles()
def readFile(self, filename = "./i0/7_G(K,w,b,b)"):
"""
read the 7_G(K,w,b,b) JSON file.
"""
flag = True
i = 38
wn, G = [], {}
for k in self.keys:
G[k] = []
while flag:
line = linecache.getline(filename, i)
flag = (line != "\n")
i += 1
if flag:
a = line.split()
wn.append(float(a[0]))
for v,k in list(enumerate(self.keys)):
G[k].append(float(a[2*v+1]) + 1j * float(a[2*v+2]))
else:
break
self.n = len(wn)
return wn, G
def readAllFiles(self,filename='7_G(K,w,b,b)'):
"""
Read 1000 samples from directory i10 to i1009.
"""
path = os.getcwd()
filenames = []
for i in range(0,100):
dirname = '/i' + str(i+10) + '/'
filenames.append(path+dirname+filename)
self.G = {}
for k in self.keys:
self.G[k] = []
for file in filenames:
wn, iG = self.readFile(file)
for k in self.keys:
self.G[k].append(iG[k])
self.wn = []
for n in range(self.n):
self.wn.append((2*(n-self.n/2) + 1)*np.pi/5.0)
self.G['tot'] = []
for i in range(self.numSamples):
iG = []
for j in range(self.n):
num = 0
for k in self.keys:
num += self.G[k][i][j]
iG.append(num/len(self.keys))
self.G['tot'].append(iG)
def writeFiles(self):
"""
Write 1000 samples.
"""
for k,v in self.G.iteritems():
file = open(k, 'w')
for i in range(self.n):
file.write(str(self.wn[i]) + '\t')
for j in range(self.numSamples):
file.write(str(np.real(v[j][i])) + '\t' + str(np.imag(v[j][i])) + '\t')
file.write('\n')
file.close()
if __name__ == "__main__":
data = extract('7_G(K,w,b,b)', 100)
``` |
{
"source": "27pirateking/panacea-scraper",
"score": 3
} |
#### File: MedIndia/spiders/MedSpider.py
```python
__author__ = 'Nikhil'
import scrapy
from MedIndia.items import MedindiaItem
html_headers = {
"accept" : "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
"accept-encoding" : "gzip, deflate, sdch, br",
"accept-language" : "en-US,en;q=0.8,ms;q=0.6",
"user-agent" : "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36"
}
#to csv - scrapy runspider MedIndia.py -o file.csv -t csv
class MedSpider(scrapy.Spider):
name = 'med'
#MedindiaSpider.download_delay = 1
allowed_domains = ["medindia.net"]
start_urls = [#"https://www.medindia.net/drug-price/brand-index.asp?alpha=a",
#"https://www.medindia.net/drug-price/brand-index.asp?alpha=b",
#"https://www.medindia.net/drug-price/brand-index.asp?alpha=c",
#"https://www.medindia.net/drug-price/brand-index.asp?alpha=d",
#"https://www.medindia.net/drug-price/brand-index.asp?alpha=e",
#"https://www.medindia.net/drug-price/brand-index.asp?alpha=f",
#"https://www.medindia.net/drug-price/brand-index.asp?alpha=g",
#"https://www.medindia.net/drug-price/brand-index.asp?alpha=h",
#"https://www.medindia.net/drug-price/brand-index.asp?alpha=i",
#"https://www.medindia.net/drug-price/brand-index.asp?alpha=j",
#"https://www.medindia.net/drug-price/brand-index.asp?alpha=k",
#"https://www.medindia.net/drug-price/brand-index.asp?alpha=l",
#"https://www.medindia.net/drug-price/brand-index.asp?alpha=m",
"https://www.medindia.net/drug-price/brand-index.asp?alpha=n",
"https://www.medindia.net/drug-price/brand-index.asp?alpha=o",
"https://www.medindia.net/drug-price/brand-index.asp?alpha=p",
"https://www.medindia.net/drug-price/brand-index.asp?alpha=q",
"https://www.medindia.net/drug-price/brand-index.asp?alpha=r",
"https://www.medindia.net/drug-price/brand-index.asp?alpha=s",
"https://www.medindia.net/drug-price/brand-index.asp?alpha=t",
"https://www.medindia.net/drug-price/brand-index.asp?alpha=u",
"https://www.medindia.net/drug-price/brand-index.asp?alpha=v",
"https://www.medindia.net/drug-price/brand-index.asp?alpha=w",
"https://www.medindia.net/drug-price/brand-index.asp?alpha=x",
"https://www.medindia.net/drug-price/brand-index.asp?alpha=y",
"https://www.medindia.net/drug-price/brand-index.asp?alpha=z",
]
def parse(self, response):
#drug_urls = response.css("table.table-bordered.table > tr > td > a::attr(href)").extract()
#for letter_href in reponse.css("div.btn-group.btn-group-sm > a::attr(href)"):
# next_letter_page =
for href in response.css("table.table-bordered.table > tr > td > a::attr(href)"):
#print(href)
yield response.follow(href, callback=self.parse_details)
#for drug_url in drug_urls:
# drug_url = response.urljoin(drug_url)
# print(drug_url)
# yield scrapy.Request(url=drug_url,
# #headers=html_headers,
# callback=self.parse_details)
next_page_url = response.css("a[title='Next Page']::attr(href)").extract_first()
if next_page_url:
next_page_url = response.urljoin(next_page_url)
#print(next_page_url)
yield scrapy.Request(url=next_page_url, callback=self.parse)
def parse_details(self,response):
#print("we are here")
item = MedindiaItem()
item['drugName'] = response.css("td > h1::text").extract()
item['drugForm'] = response.css("td > span::text")[0].extract()
item['drugGenericName'] = response.css("td > span::text")[1].extract()
item['price'] = response.css("div.ybox > b::text").extract()
item['dosage'] = response.css("div.ybox > span > b::text")[0].extract()
item['basicInfo'] = response.css("div.report-content::text").extract()
item['conditions'] = response.css("div.caption > b > a::text").extract()
item['sideEffects'] = response.xpath('.//p[@class="drug-content"][1]/text()').extract()
item['dosageInfo'] = response.xpath('.//p[@class="drug-content"][2]/text()').extract()
item['howToTake'] = response.xpath('.//p[@class="drug-content"][3]/text()').extract()
item['contraindications'] = response.xpath('.//p[@class="drug-content"][4]/text()').extract()
item['warningsAndPrecautions'] = response.xpath('.//p[@class="drug-content"][5]/text()').extract()
item['otherPrecautions'] = response.xpath('.//p[@class="drug-content"][6]/text()').extract()
item['StorageConditions'] = response.xpath('.//p[@class="drug-content"][7]/text()').extract()
#get data of each drug
yield item
# {
# 'Host': 'www.medindia.net',
# 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:60.0) Gecko/20100101 Firefox/60.0',
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en-US,en;q=0.5',
# 'Referer': 'https://www.medindia.net/drug-price/brand-index.asp?alpha=a',
# 'Accept-Encoding': 'gzip, deflate, br',
# #'Cookie': 'ASPSESSIONIDCASCCQQB=LAIEMMHCGFGJCIOHMDFGBBLP; ASPSESSIONIDACTARTCD=AEAGKDKCJADJAGILECIGFDDN; ASPSESSIONIDCATDSQBC=FGNMNKICLBCAIFIDJGDJGALL; ASPSESSIONIDCASBCSQD=LMEDKCOCNNJHPFKFDFLMAGME; ASPSESSIONIDCATBTTBC=AMLHCGIDDLAKKOFDLNDJHFCJ',
# 'Connection': 'keep-alive'
# }
```
#### File: webmd/spiders/webmd_spider.py
```python
from scrapy import Spider, Request
from scrapy.selector import Selector
from webmd.items import WebmdItem
import urllib
import re
headers = {'User-Agent': 'Chrome/56.0.2924.87', 'enc_data': 'OXYIMo2UzzqFUzYszFv4lWP6aDP0r+h4AOC2fYVQIl8=', 'timestamp': 'Thu, 09 Feb 2017 02:11:34 GMT', 'client_id': '3454df96-c7a5-47bb-a74e-890fb3c30a0d'}
class WebmdSpider(Spider):
name = "webmd_spider"
allowed_urls = ['http://www.webmd.com/']
start_urls = ['http://www.webmd.com/drugs/index-drugs.aspx?show=conditions']
def parse(self, response):
# follow links to next alphabet page
atoz = response.xpath('//*[@id="drugs_view"]/li/a/@href').extract()
print("parsing...")
for i in range(2, len(atoz)):
yield Request(response.urljoin(atoz[i]), callback = self.parse_az, dont_filter= True)
def parse_az(self, response):
# follow links to condition
Aa = response.xpath('//*[@id="showAsubNav"]/ul/li').extract()
print("selecting alphabet...")
for i in range(len(Aa)):
yield Request(response.urljoin(response.xpath('//*[@id="showAsubNav"]/ul/li//a/@href').extract()[i]), \
callback = self.parse_condition,\
dont_filter= True)
def parse_condition(self, response):
# follow links to drugs
table = response.xpath('//*[@id="az-box"]/div//a').extract()
print("scraping condition and following link to drugs...")
for i in range(len(table)):
Condition = response.xpath('//*[@id="az-box"]/div//a/text()').extract()[i]
yield Request(response.urljoin(response.xpath('//*[@id="az-box"]/div//a/@href').extract()[i]), \
callback = self.parse_drug, meta = {'Condition' : Condition},\
dont_filter= True)
def parse_drug(self, response):
# following links to drug details
Condition = response.meta['Condition']
print("scraping drug info and following link to details...")
if re.search('Please select a condition below to view a list', response.body):
yield Request(response.urljoin(response.xpath('//*[@id="fdbSearchResults"]/ul/li[1]/a//@href').extract()[0]),\
callback = self.parse_drug, meta = {'Condition': Condition},\
dont_filter= True)
else:
rows = response.xpath('//*[@id="vit_drugsContent"]/div/div/table[2]/tr').extract()
for i in range(len(rows)):
Drug = response.xpath('//*[@id="vit_drugsContent"]/div/div/table[2]/tr/td[1]/a/text()').extract()[i]
Indication = response.xpath('//*[@id="vit_drugsContent"]/div/div/table[2]/tr/td[2]/@class').extract()[i].replace('drug_ind_fmt', '')
Type = response.xpath('//*[@id="vit_drugsContent"]/div/div/table[2]/tr/td[3]/@class').extract()[i].replace('drug_type_fmt', '')
Review = response.xpath('//*[@id="vit_drugsContent"]/div/div/table[2]/tr/td[4]/a/text()').extract()[i].replace('\r\n', '')
aspx_index = response.xpath('//*[@id="vit_drugsContent"]/div/div/table[2]/tr/td[1]/a/@href').extract()[i].find('aspx') + 4
yield Request(response.urljoin(response.xpath('//*[@id="vit_drugsContent"]/div/div/table[2]/tr/td[1]/a//@href').extract()[i][:aspx_index]),\
callback = self.parse_details, meta = {'Condition': Condition, 'Drug': Drug, 'Indication': Indication, 'Type': Type, 'Review': Review},\
dont_filter= True)
def parse_details(self, response):
Condition = response.meta['Condition']
Drug = response.meta['Drug']
Indication = response.meta['Indication']
Type = response.meta['Type']
Review = response.meta['Review']
print("scraping details and following link to contraindications...")
if re.search('The medication you searched for has more', response.body):
yield Request(response.urljoin(response.xpath('//*[@id="ContentPane28"]/div/section/p[1]/a//@href').extract()[0]), \
callback = self.parse_details, meta = {'Condition': Condition, 'Drug': Drug, 'Indication': Indication, 'Type': Type, 'Review': Review},\
dont_filter= True)
else:
Use = ' '.join(response.xpath('//*[@id="ContentPane28"]/div/div/div/div[3]/div[1]/div[1]/h3/preceding-sibling::p//text()').extract())
HowtoUse = ' '.join(response.xpath('//*[@id="ContentPane28"]/div/div/div/div[3]/div[1]/div[1]/h3/following-sibling::p//text()').extract())
Sides = ' '.join(response.xpath('//*[@id="ContentPane28"]/div/div/div/div[3]/div[2]/div/p[1]//text()').extract()).replace('\r\n', '')
Precautions = ' '.join(response.xpath('//*[@id="ContentPane28"]/div/div/div/div[3]/div[3]/div/p//text()').extract())
Interactions = ' '.join(response.xpath('//*[@id="ContentPane28"]/div/div/div/div[3]/div[4]/div[1]/p[2]//text()').extract())
revurl = response.xpath('//*[@id="ContentPane28"]/div/div/div/div[2]/nav/ul/li[7]/a//@href').extract()[0]
if re.search('(rx/)(\d+)',response.xpath('//*[@id="ContentPane28"]/div/div/div/div[4]/div[1]/div/a/@href').extract()[0]):
priceid = re.search('(rx/)(\d+)',response.xpath('//*[@id="ContentPane28"]/div/div/div/div[4]/div[1]/div/a/@href').extract()[0]).group(2)
else:
priceid = ''
if not Use:
Use = ' '
if not Sides:
Sides = ' '
if not Interactions:
Interactions = ' '
if not Precautions:
Precautions = ' '
if not HowtoUse:
HowtoUse = ' '
if re.search('COMMON BRAND NAME', response.body):
BrandName = ', '.join(response.xpath('//*[@id="ContentPane28"]/div/header/section/section[1]/p/a/text()').extract())
GenName = response.xpath('//*[@id="ContentPane28"]/div/header/section/section[2]/p/text()').extract()[0]
if not BrandName:
BrandName = ' '
if not GenName:
GenName = ' '
elif re.search('GENERIC NAME', response.body):
BrandName = ' '
GenName = response.xpath('//*[@id="ContentPane28"]/div/header/section/section[1]/p/text()').extract()[0]
if not GenName:
GenName = ' '
else:
GenName = ' '
BrandName = ' '
yield Request(response.urljoin(response.url + '/list-contraindications'),\
callback = self.parse_avoid, meta = {'Condition': Condition, 'Drug': Drug, 'Indication': Indication, 'Type': Type, 'Review': Review,\
'Use': Use, \
'HowtoUse': HowtoUse, \
'Sides': Sides,\
'Precautions': Precautions,\
'Interactions': Interactions,\
'BrandName': BrandName,\
'GenName': GenName,\
'revurl': revurl,\
'priceid': priceid},\
dont_filter= True)
def parse_avoid(self, response):
Condition = response.meta['Condition']
Drug = response.meta['Drug']
Indication = response.meta['Indication']
Type = response.meta['Type']
Review = response.meta['Review']
Use = response.meta['Use']
HowtoUse = response.meta['HowtoUse']
Sides = response.meta['Sides']
Precautions = response.meta['Precautions']
Interactions = response.meta['Interactions']
BrandName = response.meta['BrandName']
GenName = response.meta['GenName']
revurl = response.meta['revurl']
priceid = response.meta['priceid']
print("scraping avoid use cases...")
if re.search("We\'re sorry, but we couldn\'t find the page you tried", response.body):
AvoidUse = ' '
Allergies = ' '
elif re.search('Conditions:', response.body):
AvoidUse = ' '.join(response.xpath('//*[@id="ContentPane28"]/div/article/section/p[2]/text()').extract())
Allergies = ' '.join(response.xpath('//*[@id="ContentPane28"]/div/article/section/p[3]/text()').extract())
elif re.search('Allergies:', response.body):
AvoidUse = ' '
Allergies = ' '.join(response.xpath('//*[@id="ContentPane28"]/div/article/section/p[2]/text()').extract())
else:
AvoidUse = ' '
Allergies = ' '
if not AvoidUse:
AvoidUse = ' '
if not Allergies:
Allergies = ' '
yield Request(response.urljoin(revurl), \
callback=self.parse_reviews,
meta={'Condition': Condition, 'Drug': Drug, 'Indication': Indication, 'Type': Type,
'Review': Review, \
'Use': Use, \
'HowtoUse': HowtoUse, \
'Sides': Sides, \
'Precautions': Precautions, \
'Interactions': Interactions, \
'BrandName': BrandName, \
'GenName': GenName, \
'AvoidUse': AvoidUse,\
'Allergies': Allergies,\
'priceid': priceid}, \
dont_filter=True)
def parse_reviews(self, response):
Condition = response.meta['Condition']
Drug = response.meta['Drug']
Indication = response.meta['Indication']
Type = response.meta['Type']
Review = response.meta['Review']
Use = response.meta['Use']
HowtoUse = response.meta['HowtoUse']
Sides = response.meta['Sides']
Precautions = response.meta['Precautions']
Interactions = response.meta['Interactions']
BrandName = response.meta['BrandName']
GenName = response.meta['GenName']
AvoidUse = response.meta['AvoidUse']
Allergies = response.meta['Allergies']
priceid = response.meta['priceid']
if re.search('Rate this treatment and share your opinion', response.body):
Effectiveness = ' '
EaseofUse = ' '
Satisfaction = ' '
yield Request('http://www.webmd.com/search/2/api/rx/forms/v2/' + priceid, \
method='GET', headers=headers, \
callback=self.parse_prices, \
meta={'Condition': Condition, 'Drug': Drug, 'Indication': Indication, 'Type': Type,
'Review': Review, \
'Use': Use, \
'HowtoUse': HowtoUse, \
'Sides': Sides, \
'Precautions': Precautions, \
'Interactions': Interactions, \
'BrandName': BrandName, \
'GenName': GenName, \
'AvoidUse': AvoidUse, \
'Allergies': Allergies,
'Effectiveness': Effectiveness, \
'EaseofUse': EaseofUse, \
'Satisfaction': Satisfaction}, \
dont_filter=True)
elif re.search('Be the first to share your experience with this treatment', response.body):
Effectiveness = ' '
EaseofUse = ' '
Satisfaction = ' '
yield Request('http://www.webmd.com/search/2/api/rx/forms/v2/' + priceid, \
method='GET', headers=headers, \
callback=self.parse_prices, \
meta={'Condition': Condition, 'Drug': Drug, 'Indication': Indication, 'Type': Type,
'Review': Review, \
'Use': Use, \
'HowtoUse': HowtoUse, \
'Sides': Sides, \
'Precautions': Precautions, \
'Interactions': Interactions, \
'BrandName': BrandName, \
'GenName': GenName, \
'AvoidUse': AvoidUse, \
'Allergies': Allergies,
'Effectiveness': Effectiveness, \
'EaseofUse': EaseofUse, \
'Satisfaction': Satisfaction}, \
dont_filter=True)
else:
url = 'http://www.webmd.com/drugs/service/UserRatingService.asmx/GetUserReviewSummary?repositoryId=1&primaryId=' # 6007&secondaryId=-1&secondaryIdValue='
url2 = '&secondaryId=-1&secondaryIdValue='
id = re.search('(drugid=)(\d+)', response.url).group(2)
id2 = urllib.quote(re.sub("\s+", " ", response.xpath('//option[@value = -1]//text()').extract()[0]).strip())
yield Request(url + id + url2 + id2,\
callback= self.parse_ratings, \
meta={'Condition': Condition, 'Drug': Drug, 'Indication': Indication, 'Type': Type,
'Review': Review, \
'Use': Use, \
'HowtoUse': HowtoUse, \
'Sides': Sides, \
'Precautions': Precautions, \
'Interactions': Interactions, \
'BrandName': BrandName, \
'GenName': GenName, \
'AvoidUse': AvoidUse, \
'Allergies': Allergies, \
'priceid': priceid}, \
dont_filter=True)
def parse_ratings(self, response):
Condition = response.meta['Condition']
Drug = response.meta['Drug']
Indication = response.meta['Indication']
Type = response.meta['Type']
Review = response.meta['Review']
Use = response.meta['Use']
HowtoUse = response.meta['HowtoUse']
Sides = response.meta['Sides']
Precautions = response.meta['Precautions']
Interactions = response.meta['Interactions']
BrandName = response.meta['BrandName']
GenName = response.meta['GenName']
AvoidUse = response.meta['AvoidUse']
Allergies = response.meta['Allergies']
priceid = response.meta['priceid']
if re.search('("xsd:string">)(\d+.\d+)',response.xpath('//*/*').extract()[3]):
Effectiveness = re.search('("xsd:string">)(\d+.\d+)',response.xpath('//*/*').extract()[3]).group(2)
else:
Effectiveness = re.search('("xsd:string">)(\d+)',response.xpath('//*/*').extract()[3]).group(2)
if re.search('("xsd:string">)(\d+.\d+)',response.xpath('//*/*').extract()[4]):
EaseofUse = re.search('("xsd:string">)(\d+.\d+)',response.xpath('//*/*').extract()[4]).group(2)
else:
EaseofUse = re.search('("xsd:string">)(\d+)',response.xpath('//*/*').extract()[4]).group(2)
if re.search('("xsd:string">)(\d+.\d+)',response.xpath('//*/*').extract()[5]):
Satisfaction = re.search('("xsd:string">)(\d+.\d+)',response.xpath('//*/*').extract()[5]).group(2)
else:
Satisfaction = re.search('("xsd:string">)(\d+)',response.xpath('//*/*').extract()[5]).group(2)
if priceid != '':
yield Request('http://www.webmd.com/search/2/api/rx/forms/v2/'+priceid,\
method='GET', headers=headers, \
callback=self.parse_prices, \
meta={'Condition': Condition, 'Drug': Drug, 'Indication': Indication, 'Type': Type,
'Review': Review, \
'Use': Use, \
'HowtoUse': HowtoUse, \
'Sides': Sides, \
'Precautions': Precautions, \
'Interactions': Interactions, \
'BrandName': BrandName, \
'GenName': GenName, \
'AvoidUse': AvoidUse, \
'Allergies': Allergies,
'Effectiveness': Effectiveness,\
'EaseofUse': EaseofUse,\
'Satisfaction': Satisfaction}, \
dont_filter=True)
else:
strength = ' '
form = ' '
val = ' '
EstimatedPrice = ' '
item = WebmdItem()
item['AvoidUse'] = AvoidUse
item['Allergies'] = Allergies
item['Use'] = Use
item['HowtoUse'] = HowtoUse
item['Precautions'] = Precautions
item['Interactions'] = Interactions
item['Sides'] = Sides
item['Condition'] = Condition
item['Drug'] = Drug
item['Indication'] = Indication
item['Type'] = Type
item['Review'] = Review
item['BrandName'] = BrandName
item['GenName'] = GenName
item['Effectiveness'] = Effectiveness
item['EaseofUse'] = EaseofUse
item['Satisfaction'] = Satisfaction
item['EstimatedPrice'] = EstimatedPrice
item['Dosage'] = strength
item['PkgCount'] = val
item['Form'] = form
yield item
def parse_prices(self, response):
Condition = response.meta['Condition']
Drug = response.meta['Drug']
Indication = response.meta['Indication']
Type = response.meta['Type']
Review = response.meta['Review']
Use = response.meta['Use']
HowtoUse = response.meta['HowtoUse']
Sides = response.meta['Sides']
Precautions = response.meta['Precautions']
Interactions = response.meta['Interactions']
BrandName = response.meta['BrandName']
GenName = response.meta['GenName']
AvoidUse = response.meta['AvoidUse']
Allergies = response.meta['Allergies']
Effectiveness = response.meta['Effectiveness']
EaseofUse = response.meta['EaseofUse']
Satisfaction = response.meta['Satisfaction']
if re.search('("NDC":\[")(\d+)', response.body):
if re.search('("value":)(\d+)', response.body).group(2):
ndc = re.search('("NDC":\[")(\d+)', response.body).group(2)
val = re.search('("value":)(\d+)', response.body).group(2)
if re.search('("form":")(\w+)', response.body):
form = re.search('("form":")(\w+)', response.body).group(2)
else:
form = ' '
if re.search('("strength":")(\d+\s+\w+)', response.body):
strength = re.search('("strength":")(\d+\s+\w+)', response.body).group(2)
else:
strength = ' '
urlp = 'http://www.webmd.com/search/2/api/rx/pricing/ndc/'
urlp2 = '00000?lat=40.7466&lng=-73.9098&rad=5&rollup=true&pgroup='
yield Request(urlp + ndc + '/' + val + '/' + urlp2, \
method='GET',
headers=headers,
callback=self.parse_estprice,
meta={'Condition': Condition, 'Drug': Drug, 'Indication': Indication, 'Type': Type,
'Review': Review, \
'Use': Use, \
'HowtoUse': HowtoUse, \
'Sides': Sides, \
'Precautions': Precautions, \
'Interactions': Interactions, \
'BrandName': BrandName, \
'GenName': GenName, \
'AvoidUse': AvoidUse, \
'Allergies': Allergies,
'Effectiveness': Effectiveness, \
'EaseofUse': EaseofUse, \
'Satisfaction': Satisfaction,\
'strength': strength,\
'val': val,\
'form': form}, \
dont_filter=True)
else:
strength = ' '
form = ' '
val= ' '
EstimatedPrice = ' '
item = WebmdItem()
item['AvoidUse'] = AvoidUse
item['Allergies'] = Allergies
item['Use'] = Use
item['HowtoUse'] = HowtoUse
item['Precautions'] = Precautions
item['Interactions'] = Interactions
item['Sides'] = Sides
item['Condition'] = Condition
item['Drug'] = Drug
item['Indication'] = Indication
item['Type'] = Type
item['Review'] = Review
item['BrandName'] = BrandName
item['GenName'] = GenName
item['Effectiveness'] = Effectiveness
item['EaseofUse'] = EaseofUse
item['Satisfaction'] = Satisfaction
item['EstimatedPrice'] = EstimatedPrice
item['Dosage'] = strength
item['PkgCount'] = val
item['Form'] = form
yield item
def parse_estprice(self,response):
Condition = response.meta['Condition']
Drug = response.meta['Drug']
Indication = response.meta['Indication']
Type = response.meta['Type']
Review = response.meta['Review']
Use = response.meta['Use']
HowtoUse = response.meta['HowtoUse']
Sides = response.meta['Sides']
Precautions = response.meta['Precautions']
Interactions = response.meta['Interactions']
BrandName = response.meta['BrandName']
GenName = response.meta['GenName']
AvoidUse = response.meta['AvoidUse']
Allergies = response.meta['Allergies']
Effectiveness = response.meta['Effectiveness']
EaseofUse = response.meta['EaseofUse']
Satisfaction = response.meta['Satisfaction']
strength = response.meta['strength']
val = response.meta['val']
form = response.meta['form']
if re.search('("PharmacyGroupMinPrice":)(\d+.\d+)', response.body):
EstimatedPrice = re.search('("PharmacyGroupMinPrice":)(\d+.\d+)', response.body).group(2)
item = WebmdItem()
item['AvoidUse'] = AvoidUse
item['Allergies'] = Allergies
item['Use'] = Use
item['HowtoUse'] = HowtoUse
item['Precautions'] = Precautions
item['Interactions'] = Interactions
item['Sides'] = Sides
item['Condition'] = Condition
item['Drug'] = Drug
item['Indication'] = Indication
item['Type'] = Type
item['Review'] = Review
item['BrandName'] = BrandName
item['GenName'] = GenName
item['Effectiveness'] = Effectiveness
item['EaseofUse'] = EaseofUse
item['Satisfaction'] = Satisfaction
item['EstimatedPrice'] = EstimatedPrice
item['Dosage'] = strength
item['PkgCount'] = val
item['Form'] = form
yield item
elif re.search('("PharmacyGroupMinPrice":)(\d+)', response.body):
EstimatedPrice = re.search('("PharmacyGroupMinPrice":)(\d+)', response.body).group(2)
item = WebmdItem()
item['AvoidUse'] = AvoidUse
item['Allergies'] = Allergies
item['Use'] = Use
item['HowtoUse'] = HowtoUse
item['Precautions'] = Precautions
item['Interactions'] = Interactions
item['Sides'] = Sides
item['Condition'] = Condition
item['Drug'] = Drug
item['Indication'] = Indication
item['Type'] = Type
item['Review'] = Review
item['BrandName'] = BrandName
item['GenName'] = GenName
item['Effectiveness'] = Effectiveness
item['EaseofUse'] = EaseofUse
item['Satisfaction'] = Satisfaction
item['EstimatedPrice'] = EstimatedPrice
item['Dosage'] = strength
item['PkgCount'] = val
item['Form'] = form
yield item
else:
EstimatedPrice = ' '
item = WebmdItem()
item['AvoidUse'] = AvoidUse
item['Allergies'] = Allergies
item['Use'] = Use
item['HowtoUse'] = HowtoUse
item['Precautions'] = Precautions
item['Interactions'] = Interactions
item['Sides'] = Sides
item['Condition'] = Condition
item['Drug'] = Drug
item['Indication'] = Indication
item['Type'] = Type
item['Review'] = Review
item['BrandName'] = BrandName
item['GenName'] = GenName
item['Effectiveness'] = Effectiveness
item['EaseofUse'] = EaseofUse
item['Satisfaction'] = Satisfaction
item['EstimatedPrice'] = EstimatedPrice
item['Dosage'] = strength
item['PkgCount'] = val
item['Form'] = form
yield item
``` |
{
"source": "27Saumya/dhravyapy",
"score": 3
} |
#### File: dhravyapy/examples/qrcode.py
```python
import dhravyapy
import asyncio
async def main():
# the text of the qrcode(can be links or just normal text)
text = "https://api.dhravya.me"
qrcode = await dhravyapy.Image.qrcode(text)
# saving the image of the qrcode
await qrcode.save("qrcode.png")
if __name__ == "__main__":
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
```
#### File: dhravyapy/examples/trivia.py
```python
import dhravyapy
import asyncio
async def main():
trivia = await dhravyapy.Info.trivia()
question = trivia.question
answer = trivia.answer
x = input(f"{question} \nType the answer...")
if x.lower() == answer.lower():
print("Good job you got the right answer!")
else:
print(":/ Lets do a different question")
if __name__ == "__main__":
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
``` |
{
"source": "27Saumya/next",
"score": 2
} |
#### File: boot/be/declarations.py
```python
from typing import Any
from llvmlite import ir, binding as _binding
class Delarator:
def __init__(self, module: ir.Module, binding: _binding):
self.module = module
self.binding = binding
self.print_()
def print_(self):
voidptr_ty = ir.IntType(8).as_pointer()
print_ty = ir.FunctionType(ir.IntType(32), [voidptr_ty], True)
self.print = ir.Function(self.module, print_ty, 'printf')
```
#### File: boot/be/ir_core.py
```python
import os
from llvmlite import ir, binding
from .declarations import Delarator
class Configurator:
def __init__(self):
self.binding = binding
self.binding.initialize()
self.binding.initialize_native_target()
self.binding.initialize_native_asmprinter()
self._initiate_execution_engine()
self.module = ir.Module(name=__file__)
self.module.triple = self.binding.get_default_triple()
_fn_t = ir.FunctionType(ir.VoidType(), [], False)
main_fn = ir.Function(self.module, _fn_t, 'main')
block = main_fn.append_basic_block(name='entry')
self.builder = ir.IRBuilder(block)
self._funcs = Delarator(self.module, self.binding)
def _initiate_execution_engine(self):
t = self.binding.Target.from_default_triple()
t_machine = t.create_target_machine()
backing_mod = binding.parse_assembly('')
engine = binding.create_mcjit_compiler(backing_mod, target_machine=t_machine)
self._engine = engine
def compile_(self) -> binding.ModuleRef:
self.builder.ret_void()
_ir = str(self.module)
mod = self.binding.parse_assembly(_ir)
try:
mod.verify()
except RuntimeError:
raise
self._engine.add_module(mod)
self._engine.finalize_object()
self._engine.run_static_constructors()
return mod
def create_output(self, fp: str):
try:
with open(file=fp, mode='x') as _fp:
self.compile_()
_fp.write(str(self.module))
except FileExistsError:
os.unlink(fp)
self.create_output(fp)
except FileNotFoundError:
dir = fp.split('/')[0]
os.makedirs(dir)
self.create_output(fp)
```
#### File: boot/fe/ast.py
```python
from llvmlite import ir
from ..be import ir_core
class Print:
def __init__(self, ir_core: ir_core.Configurator, value):
self.ir = ir_core
self.value = value
def eval(self):
val = self.value.eval()
voidptr_type = ir.IntType(8).as_pointer()
fmt = '%i \n\0'
c_fmt = ir.Constant(ir.ArrayType(ir.IntType(8), len(fmt)), bytearray(fmt.encode('utf8')))
global_fmt = ir.GlobalVariable(self.ir.module, c_fmt.type, 'fstr')
global_fmt.linkage = 'internal'
global_fmt.global_constant = True
global_fmt.initializer = c_fmt
fmt_arg = self.ir.builder.bitcast(global_fmt, voidptr_type)
self.ir.builder.call(self.ir._funcs.print, [fmt_arg, val])
class Integer:
def __init__(self, ir_core: ir_core.Configurator, value: str):
self.ir = ir_core
self.value = value
def eval(self):
return ir.Constant(ir.IntType(len(self.value)), int(self.value))
``` |
{
"source": "27Saumya/yarsaw",
"score": 3
} |
#### File: yarsaw/examples/dpy.py
```python
from discord.ext import commands
import yarsaw
bot = commands.Bot(command_prefix="!")
# create yarsaw client
client = yarsaw.Client("Random Stuff API Key", "RapidAPI Application Key")
@bot.command()
async def joke(ctx):
joke = await client.get_safe_joke()
await ctx.send(yarsaw.format_joke(joke))
bot.load_extension("dpy-cogs") # load the cog from dpy-cogs.py
bot.run("TOKEN")
```
#### File: yarsaw/tests/endpoints_test.py
```python
import unittest
import yarsaw
import os
import dotenv
import asyncio
dotenv.load_dotenv()
client = yarsaw.Client(os.getenv("RSA_KEY"), os.getenv("APP_KEY"))
class TestEndpoints(unittest.TestCase):
async def ai(self):
res = await client.get_ai_response("Hello")
self.assertIsInstance(res, yarsaw.AIResponse)
async def animal(self):
res = await client.get_animal_image("Fox")
self.assertIsInstance(res, yarsaw.Image)
async def anime(self):
res = await client.get_anime_gif("happy")
self.assertIsInstance(res, yarsaw.Image)
async def canvas(self):
res = await client.canvas("changemymind", txt="YARSAW is Awesome!")
self.assertIsInstance(res, yarsaw.CanvasResponse)
async def joke(self):
res = await client.get_joke()
self.assertIsInstance(res, yarsaw.Joke)
async def fetch_subreddit_post(self):
res = await client.fetch_subreddit_post("aww")
self.assertIsInstance(res, yarsaw.RedditPost)
async def fetch_post(self):
res = await client.fetch_post("aww")
self.assertIsInstance(res, yarsaw.RedditPost)
async def fetch_post_by_id(self):
res = await client.fetch_post_by_id("awyf90")
self.assertIsInstance(res, yarsaw.RedditPost)
async def fetch_random_post(self):
res = await client.fetch_random_post()
self.assertIsInstance(res, yarsaw.RedditPost)
async def random_meme(self):
res = await client.random_meme()
self.assertIsInstance(res, yarsaw.RedditPost)
async def get_weather(self):
res = await client.get_weather("New York")
self.assertIsInstance(res, dict)
async def get_fact(self):
res = await client.get_fact()
self.assertIsInstance(res, yarsaw.Fact)
def test_ai(self):
asyncio.get_event_loop().run_until_complete(self.ai())
def test_animal(self):
asyncio.get_event_loop().run_until_complete(self.animal())
def test_anime(self):
asyncio.get_event_loop().run_until_complete(self.anime())
def test_canvas(self):
asyncio.get_event_loop().run_until_complete(self.canvas())
def test_joke(self):
asyncio.get_event_loop().run_until_complete(self.joke())
def test_weather(self):
asyncio.get_event_loop().run_until_complete(self.get_weather())
def test_reddit(self):
asyncio.get_event_loop().run_until_complete(self.fetch_subreddit_post())
asyncio.get_event_loop().run_until_complete(self.fetch_post())
asyncio.get_event_loop().run_until_complete(self.fetch_post_by_id())
asyncio.get_event_loop().run_until_complete(self.fetch_random_post())
asyncio.get_event_loop().run_until_complete(self.random_meme())
def test_fact(self):
asyncio.get_event_loop().run_until_complete(self.get_fact())
if __name__ == "__main__":
unittest.main()
```
#### File: yarsaw/yarsaw/client.py
```python
from .httpclient import *
from .utils import *
import base64
import aiohttp
from .data_classes import *
from typing import Union
class Client(HTTPClient):
"""
Represents a client object used to interact with the Random Stuff API.
Parameters
----------
authorization : :class:`str`
Your API Key for the Random Stuff API used to authenticate your requests.
key : :class:`str`
Your RapidAPI-Key for the Random Stuff API used to authenticate your requests.
"""
async def get_ai_response(self, message: str, **kwargs) -> AIResponse:
"""
Gets AI responses from the API.
Parameters
-------------
message: :class:`str`
The message you want to get the AI response for.
id: Optional[Union[:class:`str`, :class:`int`]]
Assign an unique ID for customized response for each user.
bot_name: Optional[:class:`str`]
Set a name for the AI replying to your message.
bot_gender: Optional[:class:`str`]
Set a gender for the AI replying to your message.
bot_master: Optional[:class:`str`]
The creator/master of the AI replying to your message.
bot_age: Optional[:class:`str`]
The age of the AI replying to your message.
bot_company: Optional[:class:`str`]
The company that owns the AI replying to your message.
bot_location: Optional[:class:`str`]
The location of the AI replying to your message.
bot_email: Optional[:class:`str`]
The email of the AI replying to your message.
bot_build: Optional[:class:`str`]
The build of the AI replying to your message.
bot_birth_year: Optional[:class:`str`]
The birth year of the AI replying to your message.
bot_birth_date: Optional[:class:`str`]
The birth date of the AI replying to your message.
bot_birth_place: Optional[:class:`str`]
The birth place of the AI replying to your message.
bot_favorite_color: Optional[:class:`str`]
The favorite color of the AI replying to your message.
bot_favorite_book: Optional[:class:`str`]
The favorite book of the AI replying to your message.
bot_favorite_band: Optional[:class:`str`]
The favorite band of the AI replying to your message.
bot_favorite_artist: Optional[:class:`str`]
The favorite artist of the AI replying to your message.
bot_favorite_actress: Optional[:class:`str`]
The favorite actress of the AI replying to your message.
bot_favorite_actor: Optional[:class:`str`]
The favorite actor of the AI replying to your message.
Returns
-------------
:class:`AIResponse`
An object containing the AI response and its details.
"""
response = await self.request("ai", params={"msg": message, **kwargs})
return AIResponse(
response.body["AIResponse"],
BotDetails(
response.body["BotDetails"]["BotName"],
response.body["BotDetails"]["BotMaster"],
response.body["BotDetails"]["BotAge"],
response.body["BotDetails"]["BotLocation"],
response.body["BotDetails"]["BotCompany"],
response.body["BotDetails"]["BotBirthYear"],
response.body["BotDetails"]["BotBirthDate"],
response.body["BotDetails"]["BotBirthPlace"],
),
APIInfo(
int(response.headers["X-RateLimit-Requests-Limit"]),
int(response.headers["X-RateLimit-Requests-Remaining"]),
int(response.headers["X-RateLimit-Requests-Reset"]),
),
)
async def get_animal_image(self, animal: str, amount: int = 1) -> Image:
"""
Gets animal images from the API.
Parameters
-------------
animal: :class:`str`
The animal you want to get images for. Supported Animals: Dog, Cat, Wolf, Fox
amount: Optional[:class:`int`]
The amount of images you want to get.
Returns
-------------
:class:`Image`
An object containing the image.
"""
try:
if animal.upper() not in ANIMAL_TYPES:
raise ValueError(
"Animal not supported. Supported animals are: "
+ ", ".join(ANIMAL_TYPES)
)
except AttributeError as e:
raise ValueError(
"Invalid Parameter Type. Make sure you are passing a string."
) from e
response = await self.request(
f"animals/{animal.upper()}", params={"limit": amount}
)
images = response.body
image_list = [image["url"] for image in images]
return Image(
image_list,
APIInfo(
int(response.headers["X-RateLimit-Requests-Limit"]),
int(response.headers["X-RateLimit-Requests-Remaining"]),
int(response.headers["X-RateLimit-Requests-Reset"]),
),
)
async def get_anime_gif(self, gif_type: str, amount: int = 1) -> Image:
"""
Gets an anime gif from the API.
Parameters
-------------
gif_type: :class:`str`
The type of gif you want to get. Allowed Types: happy, hi, kiss, hug, punch, pat, slap, nervous, run, cry
amount: Optional[:class:`int`]
The number of gifs you want to get.
Returns
-------------
:class:`Image`
An object containing the gif.
"""
try:
if gif_type.lower() not in ANIME_TYPES:
raise ValueError(
"Invalid Anime GIF Type. Supported types are: "
+ ", ".join(ANIME_TYPES)
)
except AttributeError as e:
raise ValueError(
"Invalid Parameter Type. Make sure you are passing a string."
) from e
response = await self.request(
f"anime/{gif_type.lower()}", params={"limit": amount}
)
gifs = response.body
gif_list = [gif["url"] for gif in gifs]
return Image(
gif_list,
APIInfo(
int(response.headers["X-RateLimit-Requests-Limit"]),
int(response.headers["X-RateLimit-Requests-Remaining"]),
int(response.headers["X-RateLimit-Requests-Reset"]),
),
)
async def canvas(
self, method, save_to=None, txt=None, text=None, img1=None, img2=None, img3=None
) -> Union[CanvasResponse, int]:
"""
Edit Images with the API.
Parameters
-------------
method: :class:`str`
The method to be used to edit the image.
**Allowed Methods**:
- Method(s) in which only 1 image is required: ``affect``, ``beautiful``, ``wanted``, ``delete``, ``trigger``, ``facepalm``, ``blur``, ``hitler``, ``kiss``, ``jail``, ``invert``, ``jokeOverHead``
- Method(s) in which 2 images are required: ``bed``, ``fuse`` , ``kiss``, ``slap``, ``spank``
- Method(s) in which 3 images are required: ``distracted``
- Method(s) in which only Text is required: ``changemymind``
save_to: Optional[:class:`str`]
The path to save the edited image to. If not specified, the edited image will be returned as bytes.
txt: Optional[:class:`str`]
The text required for your method.
text: Optional[:class:`str`]
The text required for your method. Alias of txt.
img1: Optional[:class:`str`]
The path/link to the first image.
img2: Optional[:class:`str`]
The path/link to the second image.
img3: Optional[:class:`str`]
The path/link to the third image.
Returns
-------------
Union[:class:`CanvasResponse`, :class:`int`]
If save_to is not specified, the edited image will be returned as a Response object containing the base64 encoded image.
If save_to is specified, the edited image will be saved to the specified path, and will 200.
"""
params = {
"txt": txt or text or "",
"img1": img1 or "",
"img2": img2 or "",
"img3": img3 or "",
}
response = await self.request(f"canvas/{method}", params=params)
base = response.body["base64"]
if save_to:
with open(save_to, "wb") as file:
file.write(base64.b64decode((base)))
return 200
return CanvasResponse(
base64.b64decode((base)),
base,
APIInfo(
int(response.headers["X-RateLimit-Requests-Limit"]),
int(response.headers["X-RateLimit-Requests-Remaining"]),
int(response.headers["X-RateLimit-Requests-Reset"]),
),
)
async def get_joke(self, joke_type="any", blacklist: list = None) -> Joke:
"""
Fetches jokes from the API.
Parameters
-------------
joke_type: Optional[:class:`str`]
The type of joke you want to fetch.
Allowed Values: "any", "dark", "pun", "spooky", "christmas", "Programming", "misc"
blacklist: Optional[:class:`list`]
A list of types jokes you want to blacklist.
Allowed Values: "all", "nsfw", "religious", "political", "racist", "sexist", "explicit"
Returns
-------------
:class:`Joke`
An object containing the joke and its details.
"""
if blacklist is None:
blacklist = []
joke_type = joke_type.lower()
if joke_type.lower() not in JOKE_TYPES:
supported_types = ", ".join(JOKE_TYPES)
raise ValueError(f"Invalid Type. Supported types are: {supported_types}")
# API Bug: The Joke Type Query must be titlecased if the type of joke is "programming"
if joke_type.lower() == "programming":
joke_type = "Programming"
else:
joke_type = joke_type.lower()
blist = ""
if blacklist:
if "all" in blacklist:
blist = "nsfw&religious&political&racist&sexist&explicit"
else:
blist = "&".join(blacklist)
response = await self.request(
f"joke?blacklist={blist}", params={"type": joke_type}
)
if response.body["type"] == "twopart":
return Joke(
response.body["error"],
response.body["category"],
response.body["type"],
response.body["flags"],
response.body["id"],
response.body["safe"],
response.body["lang"],
APIInfo(
int(response.headers["X-RateLimit-Requests-Limit"]),
int(response.headers["X-RateLimit-Requests-Remaining"]),
int(response.headers["X-RateLimit-Requests-Reset"]),
),
setup=response.body["setup"],
delivery=response.body["delivery"],
)
return Joke(
response.body["error"],
response.body["category"],
response.body["type"],
response.body["flags"],
response.body["id"],
response.body["safe"],
response.body["lang"],
APIInfo(
int(response.headers["X-RateLimit-Requests-Limit"]),
int(response.headers["X-RateLimit-Requests-Remaining"]),
int(response.headers["X-RateLimit-Requests-Reset"]),
),
joke=response.body["joke"],
)
async def get_safe_joke(self, joke_type="any") -> Joke:
"""
Fetches safe jokes from the API. These jokes are family-friendly.
Parameters
-------------
joke_type: Optional[:class:`str`]
The type of joke you want to fetch.
Allowed Values: "any", "dark", "pun", "spooky", "christmas", "Programming", "misc"
Returns
-------------
:class:`Joke`
An object containing the joke and its details.
"""
joke = await self.get_joke(joke_type=joke_type, blacklist=["all"])
while joke.safe is not True:
joke = await self.get_joke(joke_type=joke_type, blacklist=["all"])
return joke
async def fetch_subreddit_post(
self, subreddit: str, search_type: str = "hot"
) -> RedditPost:
"""
Fetches a random post from a subreddit.
Parameters
-------------
subreddit: :class:`str`
The subreddit to fetch a post from.
search_type: Optional[:class:`str`]
This is how it sorts the posts. Allows: "hot", "new", "rising", "top"
Returns
-------------
:class:`RedditPost`
An object containing the post and its details.
"""
if search_type.lower() not in SEARCH_TYPES:
raise ValueError(
"Invalid Search Type. Supported types are: " + ", ".join(SEARCH_TYPES)
)
res = await self.request(
"reddit/FetchSubredditPost",
params={"subreddit": subreddit, "searchType": search_type},
)
return RedditPost(
res.body["id"],
res.body["type"],
res.body["title"],
res.body["author"],
res.body["postLink"],
res.body["image"],
res.body["gallery"],
res.body["text"],
res.body["thumbnail"],
res.body["subreddit"],
res.body["NSFW"],
res.body["spoiler"],
res.body["createdUtc"],
res.body["upvotes"],
res.body["downvotes"],
res.body["upvoteRatio"],
APIInfo(
int(res.headers["X-RateLimit-Requests-Limit"]),
int(res.headers["X-RateLimit-Requests-Remaining"]),
int(res.headers["X-RateLimit-Requests-Reset"]),
),
)
async def fetch_post(self, subreddit: str, search_type: str = "hot") -> RedditPost:
"""
Fetches a random post from a subreddit. This is an alias of :meth:`fetch_subreddit_post`.
Parameters
-------------
subreddit: :class:`str`
The subreddit to fetch a post from.
search_type: Optional[:class:`str`]
This is how it sorts the posts. Allows: "hot", "new", "rising", "top"
Returns
-------------
:class:`RedditPost`
An object containing the post and its details.
"""
if search_type.lower() not in SEARCH_TYPES:
raise ValueError(
"Invalid Search Type. Supported types are: " + ", ".join(SEARCH_TYPES)
)
res = await self.request(
"reddit/FetchPost",
params={"subreddit": subreddit, "searchType": search_type},
)
return RedditPost(
res.body["id"],
res.body["type"],
res.body["title"],
res.body["author"],
res.body["postLink"],
res.body["image"],
res.body["gallery"],
res.body["text"],
res.body["thumbnail"],
res.body["subreddit"],
res.body["NSFW"],
res.body["spoiler"],
res.body["createdUtc"],
res.body["upvotes"],
res.body["downvotes"],
res.body["upvoteRatio"],
APIInfo(
int(res.headers["X-RateLimit-Requests-Limit"]),
int(res.headers["X-RateLimit-Requests-Remaining"]),
int(res.headers["X-RateLimit-Requests-Reset"]),
),
)
async def random_meme(self, search_type: str = "hot") -> RedditPost:
"""
Gets a random meme from reddit.
Parameters
-------------
search_type: Optional[:class:`str`]
This is how it sorts the posts. Allows: "hot", "new", "rising", "top"
Returns
-------------
:class:`RedditPost`
An object containing the post and its details.
"""
if search_type.lower() not in SEARCH_TYPES:
raise ValueError(
"Invalid Search Type. Supported types are: " + ", ".join(SEARCH_TYPES)
)
res = await self.request(
"reddit/RandomMeme", params={"searchType": search_type}
)
return RedditPost(
res.body["id"],
res.body["type"],
res.body["title"],
res.body["author"],
res.body["postLink"],
res.body["image"],
res.body["gallery"],
res.body["text"],
res.body["thumbnail"],
res.body["subreddit"],
res.body["NSFW"],
res.body["spoiler"],
res.body["createdUtc"],
res.body["upvotes"],
res.body["downvotes"],
res.body["upvoteRatio"],
APIInfo(
int(res.headers["X-RateLimit-Requests-Limit"]),
int(res.headers["X-RateLimit-Requests-Remaining"]),
int(res.headers["X-RateLimit-Requests-Reset"]),
),
)
async def fetch_random_post(self, search_type: str = "hot") -> RedditPost:
"""
Fetches a random post from reddit.
Parameters
-------------
search_type: Optional[:class:`str`]
This is how it sorts the posts. Allows: "hot", "new", "rising", "top"
Returns
-------------
:class:`RedditPost`
An object containing the post and its details.
"""
if search_type.lower() not in SEARCH_TYPES:
raise ValueError(
"Invalid Search Type. Supported types are: " + ", ".join(SEARCH_TYPES)
)
res = await self.request(
"reddit/FetchRandomPost", params={"searchType": search_type}
)
return RedditPost(
res.body["id"],
res.body["type"],
res.body["title"],
res.body["author"],
res.body["postLink"],
res.body["image"],
res.body["gallery"],
res.body["text"],
res.body["thumbnail"],
res.body["subreddit"],
res.body["NSFW"],
res.body["spoiler"],
res.body["createdUtc"],
res.body["upvotes"],
res.body["downvotes"],
res.body["upvoteRatio"],
APIInfo(
int(res.headers["X-RateLimit-Requests-Limit"]),
int(res.headers["X-RateLimit-Requests-Remaining"]),
int(res.headers["X-RateLimit-Requests-Reset"]),
),
)
async def fetch_post_by_id(
self, post_id: str, search_type: str = "hot"
) -> RedditPost:
"""
Fetch a reddit post by its ID.
Parameters
-------------
post_id: :class:`str`
The ID of the post to fetch.
search_type: Optional[:class:`str`]
This is how it sorts the posts. Allows: "hot", "new", "rising", "top"
Returns
-------------
:class:`RedditPost`
An object containing the post and its details.
"""
if search_type.lower() not in SEARCH_TYPES:
raise ValueError(
"Invalid Search Type. Supported types are: " + ", ".join(SEARCH_TYPES)
)
res = await self.request(
"reddit/FetchPostById", params={"id": post_id, "searchType": search_type}
)
return RedditPost(
res.body["id"],
res.body["type"],
res.body["title"],
res.body["author"],
res.body["postLink"],
res.body["image"],
res.body["gallery"],
res.body["text"],
res.body["thumbnail"],
res.body["subreddit"],
res.body["NSFW"],
res.body["spoiler"],
res.body["createdUtc"],
res.body["upvotes"],
res.body["downvotes"],
res.body["upvoteRatio"],
APIInfo(
int(res.headers["X-RateLimit-Requests-Limit"]),
int(res.headers["X-RateLimit-Requests-Remaining"]),
int(res.headers["X-RateLimit-Requests-Reset"]),
),
)
# NOT TESTED - 401 Unauthorized
async def get_weather(self, city: str) -> list:
"""
Gets the weather for a city.
Parameters
-------------
city: :class:`str`
The city to get the weather for.
Returns
-------------
:class:`list`
A list containing the weather details.
"""
res = await self.request("weather", params={"city": city})
try:
res.body.append(
APIInfo(
int(res.headers["X-RateLimit-Requests-Limit"]),
int(res.headers["X-RateLimit-Requests-Remaining"]),
int(res.headers["X-RateLimit-Requests-Reset"]),
)
)
except:
return res.body
else:
return res.body
## PREMIUM ENDPOINTS
async def get_fact(self, fact_type="all") -> Fact:
"""
Fetches a random fact from the API. PREMIUM ENDPOINT.
Parameters
-------------
fact_type: Optional[:class:`str`]
The type of fact you want to fetch.
Returns
-------------
:class:`Fact`
An object containing the fact.
"""
if fact_type.lower() not in FACT_TYPES:
raise ValueError(
"Invalid Fact Type. Supported types are: " + ", ".join(FACT_TYPES)
)
res = await self.request(f"facts/{fact_type.lower()}")
return Fact(
res.body["fact"],
APIInfo(
int(res.headers["X-RateLimit-Requests-Limit"]),
int(res.headers["X-RateLimit-Requests-Remaining"]),
int(res.headers["X-RateLimit-Requests-Reset"]),
),
)
async def get_waifu(self, image_type, waifu_type=None) -> Waifu:
"""
Fetches SFW or NSFW waifu images from the API. PREMIUM ENDPOINT.
Parameters
-------------
image_type: :class:`str`
Whether you want SFW or NSFW images.
waifu_type: Optional[:class:`str`]
The type of waifu you want to fetch. Visit https://api-docs.pgamerx.com/Documentation/premium/waifu/#available-waifu_types for all available waifu types.
Returns
-------------
:class:`Waifu`
An object containing the waifu image url.
"""
if waifu_type is None:
waifu_type = ""
res = await self.request(
f"waifu/{image_type}", params={"waifu_type": waifu_type}
)
return Waifu(
res.body["url"],
APIInfo(
int(res.headers["X-RateLimit-Requests-Limit"]),
int(res.headers["X-RateLimit-Requests-Remaining"]),
int(res.headers["X-RateLimit-Requests-Reset"]),
),
)
async def disconnect(self):
"""Closes the Client Session"""
await self._session.close()
async def reconnect(self):
"""Restarts the Client Connection"""
self._session = aiohttp.ClientSession()
``` |
{
"source": "27Saumya/Yuki",
"score": 2
} |
#### File: 27Saumya/Yuki/bot.py
```python
import discord
from discord.ext import tasks, commands
import os
from discord.commands import Option, SlashCommandGroup
from pytube import YouTube
import asyncio
import config
from utils.buttons import TicketPanelView, TicketControlsView, TicketCloseTop
from cogs.help import HelpOptions, members
import sqlite3
from utils.helpers.help import Help_Embed
from utils.helpers.configuration import get_prefix
import giphy_client
import topgg
import aiohttp
from typing import *
class Bot(commands.Bot):
"""Subclass of `commands.Bot` (This will be our Yuki Bot)"""
def __init__(self):
super().__init__(
command_prefix=get_prefix,
description="Yuki ✨ has many features! Try it Out INVITE ME now!",
intents=discord.Intents().all(),
case_insensitiv1e=True,
strip_after_prefix=True)
for filename in os.listdir('./cogs'):
if filename.endswith('.py'):
self.load_extension(f"cogs.{filename[:-3]}")
self.load_extension("utils.buttons")
self.load_extension("jishaku")
self.db = sqlite3.connect("utils/databases/main.db")
self.dbcursor = self.db.cursor()
self.persistent_views_added = False
self.giphy = giphy_client.DefaultApi()
self.DEFAULT_PREFIX = '+'
self.topggpy = topgg.DBLClient(self, config.TOPGG_TOKEN, autopost=True, post_shard_count=True)
self.session = aiohttp.ClientSession()
self.updateactivity.start()
self.update_topgg_stats.start()
async def on_ready(self):
print(f"{self.user.name} is online!")
if not self.persistent_views_added:
self.add_view(TicketPanelView(self))
self.add_view(TicketControlsView(self))
self.add_view(TicketCloseTop(self))
self.persistent_views_added = True
self.dbcursor.execute('CREATE TABLE IF NOT EXISTS ticket (guild_id INTEGER , count INTEGER, category INTEGER)')
self.dbcursor.execute('CREATE TABLE IF NOT EXISTS settings (guild_id INTEGER, "bump")')
self.dbcursor.execute('CREATE TABLE IF NOT EXISTS tickets (guild_id INTEGER, channel_id INTEGER, opener INTEGER, switch TEXT)')
self.dbcursor.execute('CREATE TABLE IF NOT EXISTS guilds (guild_id INTEGER, prefix TEXT)')
self.db.commit()
async def on_guild_join(self, guild: discord.Guild):
self.dbcursor.execute('INSERT INTO guilds (guild_id, prefix) VALUES (?,?)', (guild.id, "+"))
self.db.commit()
print(f"Joined guild- {guild.name}\nAdded the server to database!")
async def on_guild_remove(self, guild: discord.Guild):
try:
self.dbcursor.execute('DELETE FROM guilds WHERE guild_id=?', (guild.id,))
print(f"Removed from guild- {guild.name}\nRemoved the server from the database")
except Exception as e:
botOwner = await self.fetch_user(self.owner_id)
await botOwner.send(str(e).capitalize())
async def on_message(self, message: discord.Message):
if message.content.lower() == f"<@!{self.user.id}>" or message.content.lower() == f"<@{self.user.id}>":
self.dbcursor.execute('SELECT * FROM guilds WHERE guild_id=?', (message.guild.id,))
prefix = self.dbcursor.fetchone()[1]
await message.channel.send(embed=discord.Embed(description=f"**My prefix for this server is {prefix}**", color=discord.Color.embed_background()))
try:
if message.author.id == bot.user.id and len(message.embeds) > 0 and message.embeds[0].description.startswith('**Ticket closed by'):
bot.dbcursor.execute(f'SELECT * FROM tickets WHERE guild_id=? AND channel_id=?', (message.guild.id, message.channel.id))
data = bot.dbcursor.fetchone()
member = await bot.fetch_user(data[2])
embed = discord.Embed(description="```py\n[Support team ticket controls]```", color=discord.Color.embed_background(theme="dark"))
await message.channel.send(embed=embed, view=TicketControlsView(bot))
except AttributeError:
pass
if message.author == bot.user:
return
try:
if message.author.id == 302050872383242240 and len(message.embeds) > 0 and "Bump done! :thumbsup:" in message.embeds[0].description:
bot.dbcursor.execute(f'SELECT bump FROM settings WHERE guild_id = {message.guild.id}')
data = bot.dbcursor.fetchone()
if not data:
return
if data:
bot.dbcursor.execute(f'SELECT * FROM settings WHERE guild_id=?', (message.guild.id,))
switch = bot.dbcursor.fetchone()
if switch[1] == "off":
return
embed = discord.Embed(description="**<:zerolove:920425612613660753> Thanks to bump the server <3**", color=discord.Color.green())
await message.channel.send(embed=embed)
await asyncio.sleep(3600*2) # Bump delay == 2 hours | 1 hour == 3600 seconds so, 2 hours == 3600*2
embed = discord.Embed(title="It's time to bump!", description="Use `!d bump` to bump the server!", color=discord.Color.green())
await message.channel.send(embed=embed)
except AttributeError:
pass
except Exception as e:
print(e)
self.dbcursor.execute('SELECT prefix FROM guilds WHERE guild_id=?', (message.guild.id,))
prefixes = self.dbcursor.fetchone()
if not prefixes:
self.dbcursor.execute('INSERT INTO guilds(guild_id, prefix) VALUES (?,?)', (message.guild.id, "+"))
self.db.commit()
await self.process_commands(message)
@tasks.loop(seconds=10)
async def updateactivity(self):
"""Updates the bot's activity"""
await self.change_presence(activity=discord.Activity(type=discord.ActivityType.listening, name=f"+help in {len(self.guilds)} servers for {members(self)} users."))
@updateactivity.before_loop
async def wait_for_ready(self):
"""Waits until the bot is ready"""
await self.wait_until_ready()
@tasks.loop(minutes=30)
async def update_topgg_stats(self):
"""Updates the bot's stats on [top.gg](https://top.gg)"""
await self.wait_until_ready()
try:
await self.topggpy.post_guild_count()
print("Successfully updated bot stats on top.gg")
except Exception as e:
print(str(e).capitalize())
bot = Bot()
@bot.slash_command(description="Stuck? Use ME!")
async def help(ctx: discord.ApplicationContext):
"""Get help about the most feature packed bot!!"""
await ctx.respond(embed=Help_Embed(), view=HelpOptions())
message = await ctx.interaction.original_message()
await asyncio.sleep(120)
try:
await message.edit("This help session expired", embed=None, view=None)
except:
pass
youtube = SlashCommandGroup("youtube", "Commands related to youtube")
@youtube.command(description="Download a youtube video!")
async def download(ctx: commands.Context, link: Option(str, "The video you want to download!", required=True, default=None)):
interaction: discord.Interaction = ctx.interaction
return await interaction.response.send_message("This command is currently closed ):")
embed = discord.Embed(description="**Downloading the video <a:loading:911568431315292211>\n-------------------------\nThis may take some time.**", color=discord.Color.green())
await interaction.response.send_message(embed=embed)
message = await interaction.original_message()
url = YouTube(link)
video = url.streams.filter(progressive=True, file_extension='mp4').order_by('resolution').desc().first()
video.download(output_path='./yt_vids')
def find_vid():
for vid in os.listdir('./yt_vids'):
if vid == video.default_filename:
print(vid)
return vid
await message.edit(content="**Here is your video!**", embed=None, file=discord.File(f'yt_vids/{find_vid()}'))
for mp4file in os.listdir('./yt_vids'):
os.remove(f"yt_vids/{mp4file}")
covid = SlashCommandGroup("covid", "commands related to covid info")
@covid.command(description="Covid Information!")
async def country(ctx, *, country: Option(str, "Name of the Country you want the Covid info of!", required=True, default=None)):
interaction: discord.Interaction = ctx.interaction
em = discord.Embed(description="**Fetching information <a:loading:911568431315292211>**", color=discord.Color.green())
await interaction.response.send_message(embed=em)
message = await interaction.original_message()
url = f"https://coronavirus-19-api.herokuapp.com/countries/{country}"
stats = await bot.session.get(url)
json_stats = await stats.json()
country = json_stats["country"]
totalCases = json_stats["cases"]
todayCases = json_stats["todayCases"]
totalDeaths = json_stats["deaths"]
todayDeaths = json_stats["todayDeaths"]
recovered = json_stats["recovered"]
active = json_stats["active"]
critical = json_stats["critical"]
casesPerOneMillion = json_stats["casesPerOneMillion"]
deathsPerOneMillion = json_stats["deathsPerOneMillion"]
totalTests = json_stats["totalTests"]
testsPerOneMillion = json_stats["testsPerOneMillion"]
embed = discord.Embed(title=f"**COVID-19 Status Of {country}**!", description="This Information Isn't Live Always, Hence It May Not Be Accurate!", colour=discord.Color.random())
embed.add_field(name="**Total Cases**", value=totalCases, inline=True)
embed.add_field(name="**Today Cases**", value=todayCases, inline=True)
embed.add_field(name="**Total Deaths**", value=totalDeaths, inline=True)
embed.add_field(name="**Today Deaths**", value=todayDeaths, inline=True)
embed.add_field(name="**Recovered**", value=recovered, inline=True)
embed.add_field(name="**Active**", value=active, inline=True)
embed.add_field(name="**Critical**", value=critical, inline=True)
embed.add_field(name="**Cases Per One Million**", value=casesPerOneMillion, inline=True)
embed.add_field(name="**Deaths Per One Million**", value=deathsPerOneMillion, inline=True)
embed.add_field(name="**Total Tests**", value=totalTests, inline=True)
embed.add_field(name="**Tests Per One Million**", value=testsPerOneMillion, inline=True)
embed.set_thumbnail(url="https://cdn.discordapp.com/attachments/564520348821749766/701422183217365052/2Q.png")
embed.set_footer(text=f"Requested by {ctx.author.name}", icon_url=ctx.author.avatar.url)
await message.edit(embed=embed)
@covid.command(name="global", description="View Global Covid Info!")
async def global_(ctx):
interaction: discord.Interaction = ctx.interaction
em = discord.Embed(description="**Fetching information <a:loading:911568431315292211>**", color=discord.Color.green())
await interaction.response.send_message(embed=em)
message = await interaction.original_message()
url = f"https://coronavirus-19-api.herokuapp.com/countries/world"
stats = await bot.session.get(url)
json_stats = await stats.json()
totalCases = json_stats["cases"]
todayCases = json_stats["todayCases"]
totalDeaths = json_stats["deaths"]
todayDeaths = json_stats["todayDeaths"]
recovered = json_stats["recovered"]
active = json_stats["active"]
critical = json_stats["critical"]
casesPerOneMillion = json_stats["casesPerOneMillion"]
deathsPerOneMillion = json_stats["deathsPerOneMillion"]
totalTests = json_stats["totalTests"]
testsPerOneMillion = json_stats["testsPerOneMillion"]
embed = discord.Embed(title=f"**Global Covid 19 Info**!", description="This Information Isn't Live Always, Hence It May Not Be Accurate!",colour=discord.Color.random())
embed.add_field(name="**Total Cases**", value=totalCases, inline=True)
embed.add_field(name="**Today Cases**", value=todayCases, inline=True)
embed.add_field(name="**Total Deaths**", value=totalDeaths, inline=True)
embed.add_field(name="**Today Deaths**", value=todayDeaths, inline=True)
embed.add_field(name="**Recovered**", value=recovered, inline=True)
embed.add_field(name="**Active**", value=active, inline=True)
embed.add_field(name="**Critical**", value=critical, inline=True)
embed.add_field(name="**Cases Per One Million**", value=casesPerOneMillion, inline=True)
embed.add_field(name="**Deaths Per One Million**", value=deathsPerOneMillion, inline=True)
embed.add_field(name="**Total Tests**", value=totalTests, inline=True)
embed.add_field(name="**Tests Per One Million**", value=testsPerOneMillion, inline=True)
embed.set_thumbnail(url="https://cdn.discordapp.com/attachments/564520348821749766/701422183217365052/2Q.png")
embed.set_footer(text=f"Requested by {ctx.author.name}", icon_url=ctx.author.avatar.url)
await message.edit(embed=embed)
@bot.command(hidden=True)
@commands.is_owner()
async def load(ctx: commands.Context, ext: str):
bot.load_extension(f"cogs.{ext}")
await ctx.send(f"Loaded extension `{ext}`", delete_after=7)
@bot.command(hidden=True)
@commands.is_owner()
async def unload(ctx: commands.Context, ext: str):
bot.unload_extension(f"cogs.{ext}")
await ctx.send(f"Unloaded extension `{ext}`", delete_after=7)
@bot.command(aliases=['al','autoload'], hidden=True)
@commands.is_owner()
async def reload(ctx: commands.Context, ext: str):
if ext == "all":
for filename in os.listdir('./cogs'):
bot.unload_extension(f"cogs.{filename[:-3]}")
await asyncio.sleep(0.3)
await ctx.send("Unloaded all extensions. Now loading them!")
await asyncio.sleep(0.5)
for filename in os.listdir('./cogs'):
bot.load_extension(f"cogs.{filename[:-3]}")
await ctx.send("Succesfully reloaded all extensions")
else:
bot.unload_extension(f"cogs.{ext}")
await asyncio.sleep(0.5)
bot.load_extension(f"cogs.{ext}")
await ctx.send(f"Succesfully reloaded `{ext}`")
bot.add_application_command(youtube)
bot.add_application_command(covid)
# For the mobile status
# ARCHIVED FOR SOME TIME
# async def mobile(self):
# import sys
# payload = {'op': self.IDENTIFY,'d': {'token': self.token,'properties': {'$os': sys.platform,'$browser': 'Discord iOS','$device': 'pycord','$referrer': '','$referring_domain': ''},'compress': True,'large_threshold': 250,'v': 3}}
# if self.shard_id is not None and self.shard_count is not None:
# payload['d']['shard'] = [self.shard_id, self.shard_count]
# state = self._connection
# if state._activity is not None or state._status is not None:
# payload["d"]["presence"] = {"status": state._status, "game": state._activity, "since": 0, "afk": False}
# if state._intents is not None:
# payload["d"]["intents"] = state._intents.value
# await self.call_hooks("before_identify", self.shard_id, initial=self._initial_identify)
# await self.send_as_json(payload)
# discord.gateway.DiscordWebSocket.identify = mobile
bot.run(config.TOKEN)
```
#### File: Yuki/cogs/help.py
```python
import discord
from discord.ext import commands
from discord.commands import slash_command, Option
from utils.buttons import InviteView2
from utils.helpers.help import Help_Embed, cog_help
import asyncio
import time
import datetime
import psutil
import platform
import sys
def members(bot: commands.Bot):
memc = 0
for guild in bot.guilds:
memc += guild._member_count
return memc
class HelpEmbed(discord.Embed):
def __init__(self, **kwargs):
super().__init__(**kwargs)
text = "Use help [command] | <>: required | []: optional"
self.set_footer(text=text)
self.color = discord.Color.embed_background(theme="dark")
class HelpOptions(discord.ui.View):
def __init__(self):
super().__init__()
self.add_item(discord.ui.Button(label="Invite Me!", url="https://discord.com/api/oauth2/authorize?client_id=919314151535419463&permissions=8&scope=bot%20applications.commands", row=1))
self.add_item(discord.ui.Button(label="Support Server", url="https://discord.gg/RqKvY5MQgb", row=1))
@discord.ui.button(label="Delete", style=discord.ButtonStyle.red, emoji="⛔", row=2)
async def delete_button(self, button: discord.ui.Button, interaction: discord.Interaction):
await interaction.message.delete()
@discord.ui.select(
placeholder="Select a Category!",
min_values=1,
max_values=1,
options=[
discord.SelectOption(
label="Config",
description="Configure the bot",
emoji="🔧"
),
discord.SelectOption(
label="Fun",
description="View all Fun commands!",
emoji="🪄"
),
discord.SelectOption(
label="Misc",
description="View all normal and mod commands!",
emoji="🤖"
),
discord.SelectOption(
label="Info",
description="View all Info commands!",
emoji="ℹ️"
),
discord.SelectOption(
label="Moderation",
description="View all MOD commands",
emoji="<:modlogo:923117346984435722>"
),
discord.SelectOption(
label="Tickets",
description="View all ticket system commands!",
emoji="📩"
)
])
async def select_callback(self, select, interaction: discord.Interaction):
if select.values[0]:
await interaction.response.edit_message(
embed=discord.Embed(
title=f"{select.values[0]} Help!",
description=cog_help[select.values[0]],
color=discord.Color.embed_background(theme="dark"),
).set_footer(
text="Use help <command> to get additional help on a specific command."
)
)
class MyHelpCommand(commands.MinimalHelpCommand):
def __init__(self):
super().__init__()
async def send_pages(self):
ctx = self.context
try:
m = await ctx.send(embed=Help_Embed(), view=HelpOptions())
await asyncio.sleep(120)
try:
await m.edit("This help session expired!", embed=Help_Embed(), view=None)
except:
pass
except discord.Forbidden:
await ctx.send(
"""Hey! it looks like i am missing some permissions."""
)
except Exception as e:
print(e)
async def send_command_help(self, command):
"""triggers when a `<prefix>help <command>` is called"""
try:
ctx = self.context
signature = self.get_command_signature(
command
)
embed = HelpEmbed(
title=signature, description=command.help or "No help found..."
)
if cog := command.cog:
embed.add_field(name="Category", value=cog.qualified_name)
if command._buckets and (cooldown := command._buckets._cooldown):
embed.add_field(
name="Cooldown",
value=f"{cooldown.rate} per {cooldown.per:.0f} seconds",
)
await ctx.send(embed=embed)
except Exception as e:
print(e)
class HelpCog(commands.Cog, name="Help"):
"""Help command and bot related commands"""
def __init__(self, bot: commands.Bot):
self.bot = bot
self.bot.help_command = MyHelpCommand()
@commands.Cog.listener()
async def on_ready(self):
global startTime
startTime = time.time()
@slash_command(description="Invite me to your server")
async def invite(self, ctx):
await ctx.respond("Invite Here!", view=InviteView2())
@commands.command(name="invite", aliases=['inv', 'botinv', 'botbotinvite'])
async def invite_(self, ctx):
"""Invite the bot to your server!"""
await ctx.send("Invite Here!", view=InviteView2())
@slash_command(description="View the bot's info")
async def botinfo(self, ctx: commands.Context):
memory = "{:.4} MB".format(psutil.Process().memory_info().rss / 1024 ** 2)
py_ver = ".".join([str(v) for v in sys.version_info[:3]])
uptime = str(datetime.timedelta(seconds=int(round(time.time()-startTime))))
embed = discord.Embed(title="Bot Info!", description=f"**Servers**\n{len(list(self.bot.guilds))}\n\n**Users**\n{members(self.bot)}\n\n**System**\n{platform.release()}\n\n**Memory**\n{memory}\n\n**Python Version**\n{py_ver}\n\n**Uptime**\n{uptime}\n\n**Owner/Creator**\n27Saumya", color=discord.Color.green())
embed.set_thumbnail(url=self.bot.user.avatar.url)
await ctx.respond(embed=embed)
@commands.command(name="botinfo", aliases=['bot', 'stats', 'info'])
async def botinfo_(self, ctx: commands.Context):
"""View the bot's info"""
memory = "{:.4} MB".format(psutil.Process().memory_info().rss / 1024 ** 2)
py_ver = ".".join([str(v) for v in sys.version_info[:3]])
uptime = str(datetime.timedelta(seconds=int(round(time.time()-startTime))))
embed = discord.Embed(title="Bot Info!", description=f"**Servers**\n{len(list(self.bot.guilds))}\n\n**Users**\n{members(self.bot)}\n\n**System**\n{platform.release()}\n\n**Memory**\n{memory}\n\n**Python Version**\n{py_ver}\n\n**Uptime**\n{uptime}\n\n**Owner/Creator**\n27Saumya", color=discord.Color.green())
embed.set_thumbnail(url=self.bot.user.avatar.url)
await ctx.send(embed=embed)
@slash_command(description="🏓 Check the bot's latency")
async def ping(self, ctx: commands.Context):
interaction: discord.Interaction = ctx.interaction
before = time.monotonic()
embed = discord.Embed(title=":ping_pong:", color=discord.Color.green())
await interaction.response.send_message(embed=embed)
message = await interaction.original_message()
ping = (time.monotonic() - before) * 1000
embed2 = discord.Embed(title=":ping_pong: Pong!", description=f"**Bot latency: `{round(self.bot.latency * 1000)}` ms\n------------------------------\nDiscord Latency: `{int(ping)}` ms**", color=discord.Color.green())
await message.edit(embed=embed2)
@commands.command(name="ping")
async def ping_(self, ctx: commands.Context):
"""View the bot's latency (Edit Latency)"""
before = time.monotonic()
embed = discord.Embed(title=":ping_pong:", color=discord.Color.green())
message = await ctx.send(embed=embed)
ping = (time.monotonic() - before) * 1000
embed2 = discord.Embed(title=":ping_pong: Pong!", description=f"**Bot latency: `{round(self.bot.latency * 1000)}` ms\n------------------------------\nDiscord Latency: `{int(ping)}` ms**", color=discord.Color.green())
await message.edit(embed=embed2)
def setup(bot):
bot.add_cog(HelpCog(bot))
```
#### File: Yuki/cogs/info.py
```python
import discord
from discord.ext import commands
from discord.commands import slash_command, Option
from utils.buttons import Google
import wikipedia
class Info(commands.Cog, name="Info", description="Information related commands"):
"""
Commands related to information come in this category
These include google, covid info and much more
"""
def __init__(self, bot):
self.bot = bot
@slash_command(description="Google Search")
async def google(self, ctx: commands.Context, *,
query: Option(str, "Type what you want to search!", required=True, default=None)):
await ctx.respond(f"Google Result for `{query}`", view=Google(query))
@commands.command(name="google")
async def google_(self, ctx: commands.Context, *, query: str):
await ctx.send(f"Google result for `{query}`", view=Google(query))
@commands.group(name="covid", description="Info about COVID-19")
async def covid_(self, ctx: commands.Context):
"""Command group to get covid stats use `covid` for more info"""
if ctx.invoked_subcommand is None:
embed = discord.Embed(title="Covid Info", description="**__Commands__:** \n-->`global`:\nGets Global covid info\naliases: `world` `all`\n\n-->`country` \nDirectly type the country you want.\nExample: \n`+covid country India`\n`+covid country USA`", color=discord.Color.green())
await ctx.send(embed=embed)
@covid_.command(name="country", aliases=['c', 'cou', 'coun'])
async def country_(self, ctx, *, country: str):
"""Get covid stats of a country\nExample: `covid country India` and `covid country USA`"""
em = discord.Embed(description="**Fetching information <a:loading:911568431315292211>**", color=discord.Color.green())
message = await ctx.send(embed=em)
url = f"https://coronavirus-19-api.herokuapp.com/countries/{country}"
stats = await self.bot.session.get(url)
json_stats = await stats.json()
country = json_stats["country"]
totalCases = json_stats["cases"]
todayCases = json_stats["todayCases"]
totalDeaths = json_stats["deaths"]
todayDeaths = json_stats["todayDeaths"]
recovered = json_stats["recovered"]
active = json_stats["active"]
critical = json_stats["critical"]
casesPerOneMillion = json_stats["casesPerOneMillion"]
deathsPerOneMillion = json_stats["deathsPerOneMillion"]
totalTests = json_stats["totalTests"]
testsPerOneMillion = json_stats["testsPerOneMillion"]
embed = discord.Embed(title=f"**COVID-19 Status Of {country}**!", description="This Information Isn't Live Always, Hence It May Not Be Accurate!", colour=discord.Color.random())
embed.add_field(name="**Total Cases**", value=totalCases, inline=True)
embed.add_field(name="**Today Cases**", value=todayCases, inline=True)
embed.add_field(name="**Total Deaths**", value=totalDeaths, inline=True)
embed.add_field(name="**Today Deaths**", value=todayDeaths, inline=True)
embed.add_field(name="**Recovered**", value=recovered, inline=True)
embed.add_field(name="**Active**", value=active, inline=True)
embed.add_field(name="**Critical**", value=critical, inline=True)
embed.add_field(name="**Cases Per One Million**", value=casesPerOneMillion, inline=True)
embed.add_field(name="**Deaths Per One Million**", value=deathsPerOneMillion, inline=True)
embed.add_field(name="**Total Tests**", value=totalTests, inline=True)
embed.add_field(name="**Tests Per One Million**", value=testsPerOneMillion, inline=True)
embed.set_thumbnail(url="https://cdn.discordapp.com/attachments/564520348821749766/701422183217365052/2Q.png")
embed.set_footer(text=f"Requested by {ctx.author.name}", icon_url=ctx.author.avatar.url)
await message.edit(embed=embed)
@covid_.command(name="global", aliases=['world', 'all'])
async def global_(self, ctx):
"""Gets the global Covid-19 INFO"""
em = discord.Embed(description="**Fetching information <a:loading:911568431315292211>**", color=discord.Color.green())
message = await ctx.send(embed=em)
url = f"https://coronavirus-19-api.herokuapp.com/countries/world"
stats = await self.bot.session.get(url)
json_stats = await stats.json()
totalCases = json_stats["cases"]
todayCases = json_stats["todayCases"]
totalDeaths = json_stats["deaths"]
todayDeaths = json_stats["todayDeaths"]
recovered = json_stats["recovered"]
active = json_stats["active"]
critical = json_stats["critical"]
casesPerOneMillion = json_stats["casesPerOneMillion"]
deathsPerOneMillion = json_stats["deathsPerOneMillion"]
totalTests = json_stats["totalTests"]
testsPerOneMillion = json_stats["testsPerOneMillion"]
embed = discord.Embed(title=f"**Global Covid 19 Info**!", description="This Information Isn't Live Always, Hence It May Not Be Accurate!", colour=discord.Color.random())
embed.add_field(name="**Total Cases**", value=totalCases, inline=True)
embed.add_field(name="**Today Cases**", value=todayCases, inline=True)
embed.add_field(name="**Total Deaths**", value=totalDeaths, inline=True)
embed.add_field(name="**Today Deaths**", value=todayDeaths, inline=True)
embed.add_field(name="**Recovered**", value=recovered, inline=True)
embed.add_field(name="**Active**", value=active, inline=True)
embed.add_field(name="**Critical**", value=critical, inline=True)
embed.add_field(name="**Cases Per One Million**", value=casesPerOneMillion, inline=True)
embed.add_field(name="**Deaths Per One Million**", value=deathsPerOneMillion, inline=True)
embed.add_field(name="**Total Tests**", value=totalTests, inline=True)
embed.add_field(name="**Tests Per One Million**", value=testsPerOneMillion, inline=True)
embed.set_thumbnail(url="https://cdn.discordapp.com/attachments/564520348821749766/701422183217365052/2Q.png")
embed.set_footer(text=f"Requested by {ctx.author.name}", icon_url=ctx.author.avatar.url)
await message.edit(embed=embed)
@slash_command(description="Search Wikipedia!")
async def wikipedia(self, ctx: commands.Context, *, query: Option(str, "Type what you want to search!", required=True, default=None),
lines:Option(int, "The number of lines you want the result in. By default it is 5", required=False, default=5)):
result = wikipedia.summary(query, sentences=lines)
embed = discord.Embed(title=query, description=f"**{result}**", color=discord.Color.random())
await ctx.respond(embed=embed)
@commands.command(name="wikipedia", aliases=['wiki'])
async def wikipedia_(self, ctx, *, query: str):
try:
result = wikipedia.summary(query, sentences=5)
embed = discord.Embed(title=query, description=f"**{result}**", color=discord.Color.random())
await ctx.send(embed=embed)
except:
embed = discord.Embed(description="**<:error:897382665781669908> An error occured while fetching the results**", color=discord.Color.red())
await ctx.send(embed=embed)
def setup(bot):
bot.add_cog(Info(bot))
```
#### File: Yuki/cogs/misc.py
```python
import discord
from discord.ext import commands
from discord.commands import slash_command, user_command
from discord.commands import Option
import qrcode
import os
from pytube import YouTube
from speedtest import Speedtest
from typing import Union
import aiohttp
from io import BytesIO
from utils.buttons import *
from discord.ext.commands import BucketType
class Misc(commands.Cog, name="Misc", description="Miscellaneous commands!"):
"""Miscellaneous commands fall in this category"""
def __init__(self, bot: commands.Bot):
self.bot = bot
#Avatar
@slash_command(description="Check your or someone else's PFP!")
async def avatar(self, ctx,
member: Option(discord.Member, "Check someone else's PFP!", required=False, default=None)):
member = member if member else ctx.author
em = discord.Embed(color=member.color)
em.set_image(url=member.avatar.url)
em.set_author(name=f"{member.name}'s avatar!", icon_url=member.avatar.url)
em.set_footer(text=f"Requested by {ctx.author.name}", icon_url=ctx.author.avatar.url)
await ctx.respond(embed=em)
@commands.command(name="avatar", aliases=['av', 'pfp'])
async def avatar_(self, ctx: commands.context, member: discord.Member=None):
"""View your's or someone's avatar"""
member = member if member else ctx.author
em = discord.Embed(color=member.color)
em.set_image(url=member.avatar.url)
em.set_author(name=f"{member.name}'s avatar!", icon_url=member.avatar.url)
em.set_footer(text=f"Requested by {ctx.author.name}", icon_url=ctx.author.avatar.url)
await ctx.send(embed=em)
#Qrcode
@slash_command(description="Generate a Qrcode!")
async def qrcode(self, ctx, url: Option(str, "The link you want the qrcode of", required=True, default=None), hidden: Option(str, "Do you want the qrcode to be visible only to you?", choices=["Yes", "No"], required=False, default=None)):
img = qrcode.make(url)
img.save("qrcode.png")
if hidden == "Yes":
await ctx.respond(content="**Here is your QRCode**", file=discord.File("qrcode.png"), ephemeral=True)
else:
await ctx.respond(content="**Here is your QRCode**", file=discord.File("qrcode.png"))
os.remove("qrcode.png")
@commands.command(name="qrcode", aliases=['qr'])
async def qrcode_(self, ctx, *, url: str):
"""Create a qrcode.\nExample: `qrcode youtube.com`"""
img = qrcode.make(url)
img.save("qrcode.png")
await ctx.send(content="**Here is your QRCode**", file=discord.File("qrcode.png"))
os.remove("qrcode.png")
#Youtube
@commands.group(name="youtube")
async def youtube_(self, ctx: commands.Context):
"""Youtube related commnands"""
if ctx.invoked_subcommand is None:
embed = discord.Embed(title="Youtube", description="**Youtube Commands:**\n -->**download**\n**Usage** --> `youtube download <url>`", color=discord.Color.green())
embed.set_footer(text="More Commands Coming Soon!")
await ctx.send(embed=embed)
@youtube_.command(name="download")
async def download_(self, ctx: commands.Context, *, link: str):
"""Download a youtube video (currently closed)\nExample: `youtube download youtube.com/watch?v=dQw4w9WgXcQ`"""
embed = discord.Embed(description="**Downloading the video <a:loading:911568431315292211>\n-------------------------\nThis may take some time.**", color=discord.Color.green())
message = await ctx.send("Sorry this command is currently disabled :(")
url = YouTube(link)
video = url.streams.filter(progressive=True, file_extension='mp4').order_by('resolution').desc().first()
video.download(output_path='./yt_vids')
def find_vid():
for vid in os.listdir('./yt_vids'):
if vid == video.default_filename:
print(vid)
return vid
await message.edit(content="**Here is your video!**", embed=None, file=discord.File(f'yt_vids/{find_vid()}'))
for mp4file in os.listdir('./yt_vids'):
os.remove(f"yt_vids/{mp4file}")
@commands.command(aliases=['st', 'speed'])
@commands.cooldown(1, 60, BucketType.user)
async def speedtest(self, ctx: commands.Context):
"""Test the bot's speed... Currently disabled (fixing some bugs)
Return of the command: N/A"""
return await ctx.send("Sorry... This command is currently disabled :(")
# Archived code for some time
# message = await ctx.send(
# embed=discord.Embed(description="**<a:loading:911568431315292211> Starting Speed Test!**", color=discord.Color.embed_background(theme="dark"))
# )
# s = Speedtest()
# s.get_best_server()
# await message.edit(embed=discord.Embed(description="**<a:loading:911568431315292211> Found Best Server**", color=discord.Color.embed_background(theme="dark")))
# s.download()
# await message.edit(embed=discord.Embed(description="**<a:loading:911568431315292211> Download Complete**", color=discord.Color.embed_background(theme="dark")))
# s.upload()
# await message.edit(embed=discord.Embed(description="**<a:loading:911568431315292211> Uploading Complete\n-----------------------------\nSending results**", color=discord.Color.embed_background(theme="dark")))
# s = s.results.dict()
# await message.edit(
# embed=discord.Embed(title="Speed Test Results", description=f"Ping: `{s['ping']}` ms\nDownload: `{round(s['download']/10**6, 3)}` Mbit/s\nUpload: `{round(s['upload']/10**6, 3)}` Mbit/s\nServer: `{s['server']['sponsor']}`", color=discord.Color.embed_background(theme="dark"))
# )
@commands.command(aliases=['eadd', 'ea'])
@commands.has_permissions(manage_emojis_and_stickers=True)
async def emojiadd(self, ctx: commands.Context, emoji: str, *, name: str):
"""Creates an emoji in the server using a url"""
async with aiohttp.ClientSession() as session:
async with session.get(emoji) as r:
try:
imgOrGIF = BytesIO(await r.read())
bValue = imgOrGIF.getvalue()
if r.status in range(200, 299):
emojiCreate = await ctx.guild.create_custom_emoji(image=bValue, name=name)
await ctx.send(embed=discord.Embed(description=f"**<:tick:897382645321850920> Successfully created emoji - {emojiCreate} with name: `{name}`**", color=discord.Color.green()))
else:
await ctx.send(embed=discord.Embed(description=f"<:error:897382665781669908> An error occured while creating the emoji | {r.status}", color=discord.Color.red()))
except discord.HTTPException:
await ctx.send(embed=discord.Embed(description=f"<:error:897382665781669908> The file size is too big!", color=discord.Color.red()))
except Exception as e:
print(e)
@commands.command(aliases=['emojisteal', 'copyemoji', 'steal'])
@commands.has_permissions(manage_emojis_and_stickers=True)
async def stealemoji(self, ctx: commands.Context, emoji: Union[discord.Emoji, discord.PartialEmoji], *, name: str):
"""Steal an emoji for another server.... The bot adds the emoji to this server"""
try:
emoji_bytes = await emoji.read()
emoji_create = await ctx.guild.create_custom_emoji(image=emoji_bytes, name=name)
await ctx.send(embed=discord.Embed(description=f"**<:tick:897382645321850920> Successfully created emoji - {emoji_create} with name: `{name}`**", color=discord.Color.green()))
except Exception as e:
error = str(e).capitalize()
return await ctx.send(embed=discord.Embed(description=f"**<:error:897382665781669908> An error occurred while creating the emoji\n`{error}`**", color=discord.Color.red()))
@commands.command(aliases=['userinfo'])
async def whois(self, ctx: commands.Context, user: Union[discord.Member, discord.User]=None):
"""Get information about a user or yourself"""
user = user or ctx.author
accType = "Bot" if user.bot else "Human"
badge_emojis = {
"bug_hunter": str(self.bot.get_emoji(928298721916112916)),
"bug_hunter_level_2": str(self.bot.get_emoji(928298721303736361)),
"discord_certified_moderator": str(self.bot.get_emoji(928298721475698708)),
"early_supporter": str(self.bot.get_emoji(928298721496686692)),
"verified_bot_developer": str(self.bot.get_emoji(928299192428953660)),
"hypesquad": str(self.bot.get_emoji(930418236678340668)),
"hypesquad_balance": str(self.bot.get_emoji(928299452446412821)),
"hypesquad_bravery": str(self.bot.get_emoji(928299808974843984)),
"hypesquad_brilliance": str(self.bot.get_emoji(928299672840327208)),
"partner": str(self.bot.get_emoji(928502472891330622)),
"staff": str(self.bot.get_emoji(928502668224262195))
}
def get_badges(user: Union[discord.User, discord.Member]):
badges = []
for badge, value in iter(user.public_flags):
if value and badge in badge_emojis.keys():
badges.append(badge_emojis[badge])
return badges
if not user in ctx.guild.members:
em = discord.Embed(
description=f"""**• Username: `{user}`
• UserID: `{user.id}`
• Account Type: `{accType}`
• Created at: {discord.utils.format_dt(user.created_at)}
• Badges: {" ".join(get_badges(user)) if len(get_badges(user)) > 0 else "`-`"}**""",
color=discord.Color.green()
).set_author(name=user.name, icon_url=user.avatar.url).set_thumbnail(url=user.avatar.url).set_footer(text="Note: This user is not from this server", icon_url=user.avatar.url)
user_for_banner = await self.bot.fetch_user(user.id)
if user_for_banner.banner:
em.set_image(url=user_for_banner.banner.url)
return await ctx.send(embed=em)
member: discord.Member = ctx.guild.get_member(user.id)
def timedOut(member: discord.Member):
"""Gets a string type of `member.timed_out` rather than a boolean type"""
if member.timed_out:
return "Yes"
else:
return "No"
def getRoles(member: discord.Member):
"""Gets the user roles"""
if len(list(member.roles)) == 0:
return "-"
else:
sorted_roles = sorted(
[role for role in member.roles[1:]], key=lambda x: x.position, reverse=True
)
roles = " ".join(role.mention for role in sorted_roles)
return roles
nick = user.nick if user.nick else "-"
embed = discord.Embed(
description=f"""**• Username: `{user}`
• UserID: `{user.id}`
• Nickname: `{nick}`
• Account Type: `{accType}`
• Created at: {discord.utils.format_dt(user.created_at)}
• Joined at: {discord.utils.format_dt(member.joined_at)}
• Timed Out: `{timedOut(member)}`
• Roles: {getRoles(member)}
• Badges: {" ".join(get_badges(user)) if len(get_badges(user)) > 0 else "`-`"}**""",
color=user.color
).set_author(name=user.name, icon_url=user.avatar.url).set_thumbnail(url=user.avatar.url)
userForBanner = await self.bot.fetch_user(user.id)
if userForBanner.banner:
embed.set_image(url=userForBanner.banner.url)
return await ctx.send(embed=embed)
@commands.command(aliases=['sourcecode'])
async def source(self, ctx: commands.Context):
await ctx.send("Here is my source code", view=SourceView())
@commands.command(aliases=['support', 'botserver', 'supportguild', 'supportserverinvite', 'server'])
async def supportserver(self, ctx: commands.Context):
await ctx.send("Here is my support server invite", view=SupportView())
@commands.command()
async def vote(self, ctx: commands.Context):
"""Vote the bot on [top.gg](https://top.gg/bot/919314151535419463/vote)"""
await ctx.send("Vote me now!", view=VoteView())
@commands.command(aliases=['guildinfo'])
@commands.guild_only()
async def serverinfo(self, ctx: commands.Context):
"""Get information about the server"""
guild = ctx.guild
icon = guild.icon.url if guild.icon else "https://pnggrid.com/wp-content/uploads/2021/05/Discord-Logo-Circle-1024x1024.png"
embed = discord.Embed(
description=f"""**• Owner: {guild.owner.mention}
• ServerID: `{guild.id}`
• Members: `{len(guild.members)}`
• Created at: {discord.utils.format_dt(guild.created_at)}
• Roles: `{len(guild.roles)}`
• Text Channels: `{len(guild.text_channels)}`
• Voice Channels: `{len(guild.voice_channels)}`**""",
color=discord.Color.green()
).set_author(name=guild.name, icon_url=icon).set_thumbnail(url=icon)
if guild.banner:
embed.set_image(url=guild.banner.url)
return await ctx.send(embed=embed)
@commands.command(aliases=['dogpic'])
async def dog(self, ctx: commands.Context):
"""Gives a random dog image"""
url = "https://some-random-api.ml/img/dog"
r = await self.bot.session.get(url)
if 300 > r.status >= 200:
data = await r.json()
else:
return await ctx.send(embed=discord.Embed(description="Something went wrong", color=discord.Color.red()))
embed = discord.Embed(description="**Here's a random dog image!**", color=discord.Color.embed_background(theme="dark")).set_image(url=data['link'])
await ctx.send(embed=embed)
@commands.command(aliases=['catpic'])
async def cat(self, ctx: commands.Context):
"""Gives a random cat image"""
url = "https://some-random-api.ml/img/cat"
r = await self.bot.session.get(url)
if 300 > r.status >= 200:
data = await r.json()
else:
return await ctx.send(embed=discord.Embed(description="Something went wrong", color=discord.Color.red()))
embed = discord.Embed(description="**Here's a random cat image!**", color=discord.Color.embed_background(theme="dark")).set_image(url=data['link'])
await ctx.send(embed=embed)
@commands.command(aliases=['pandapic'])
async def panda(self, ctx: commands.Context):
"""Gives a random panda image"""
url = "https://some-random-api.ml/img/panda"
r = await self.bot.session.get(url)
if 300 > r.status >= 200:
data = await r.json()
else:
return await ctx.send(embed=discord.Embed(description="Something went wrong", color=discord.Color.red()))
embed = discord.Embed(description="**Here's a random panda image!**", color=discord.Color.embed_background(theme="dark")).set_image(url=data['link'])
await ctx.send(embed=embed)
@commands.command(aliases=['memcount'])
@commands.guild_only()
async def membercount(self, ctx: commands.Context):
"""Gives the member count of the server"""
await ctx.send(embed=discord.Embed(title="Members", description=f"{ctx.guild.member_count}", color=discord.Color.green()).set_footer(text=f"{ctx.guild.name}", icon_url=ctx.guild.icon.url))
def setup(bot):
bot.add_cog(Misc(bot))
```
#### File: Yuki/cogs/mod.py
```python
import discord
from discord.ext import commands
from discord.commands import slash_command, Option
import json
from utils.buttons import NukeView
import datetime
import humanfriendly
class ModCog(commands.Cog, name="Moderation", description="Moderation commands"):
"""
Moderation related commands. Yuki ✨ mainly focuses on fun and multipurpose...
So, it doesn't include basic moderation commands like: `kick` and `ban`.
It instead has unique commands!
"""
def __init__(self, bot: commands.Bot):
self.bot = bot
@commands.command(name="changeprefix", aliases=['setprefix', 'chpre', 'setpre', 'changepre', 'prefix', 'pre'])
async def changeprefix_(self, ctx: commands.Context, *, prefix: str=None):
"""Changes Prefix for this server"""
if not ctx.author.guild_permissions.manage_messages or ctx.author.id != self.bot.owner_id:
embed = discord.Embed(description="**<:error:897382665781669908> You can't do that**", color=discord.Color.red())
return await ctx.reply(embed=embed)
if "_" in prefix:
np = prefix.replace("_", " ")
self.bot.dbcursor.execute('SELECT * FROM guilds WHERE guild_id=?', (ctx.guild.id,))
data = self.bot.dbcursor.fetchone()
if not data or data == None:
self.bot.dbcursor.execute('INSERT INTO guilds (guild_id, prefix)', (ctx.guild.id, np))
self.bot.db.commit()
else:
self.bot.dbcursor.execute('UPDATE guilds SET prefix=? WHERE guild_id=?', (np, ctx.guild.id))
embed = discord.Embed(description=f"**<:tick:897382645321850920> Prefix Updated to: `{np}`**", color=discord.Color.green())
return await ctx.send(embed=embed)
self.bot.dbcursor.execute('SELECT * FROM guilds WHERE guild_id=?', (ctx.guild.id,))
dataCheck = self.bot.dbcursor.fetchone()
if not dataCheck or dataCheck == None:
self.bot.dbcursor.execute('INSERT INTO guilds (guild_id, prefix)', (ctx.guild.id, prefix))
self.bot.db.commit()
else:
self.bot.dbcursor.execute('UPDATE guilds SET prefix=? WHERE guild_id=?', (prefix, ctx.guild.id))
self.bot.db.commit()
embed = discord.Embed(description=f"**<:tick:897382645321850920> Prefix Updated to: `{prefix}`**", color=discord.Color.green())
await ctx.send(embed=embed)
@slash_command(description="Change prefix for the server")
async def changeprefix(self, ctx: discord.ApplicationContext, *, prefix: str=None):
"""Changes Prefix for this server"""
if prefix == None:
return await ctx.send(embed=discord.Embed(description=f"My prefix for this server is `{ctx.clean_prefix}`", color=discord.Color.embed_background(theme="dark")))
if not ctx.author.guild_permissions.manage_messages or ctx.author.id != self.bot.owner_id:
embed = discord.Embed(description="**<:error:897382665781669908> You can't do that**", color=discord.Color.red())
return await ctx.reply(embed=embed)
if "_" in prefix:
np = prefix.replace("_", " ")
self.bot.dbcursor.execute('SELECT * FROM guilds WHERE guild_id=?', (ctx.guild.id,))
data = self.bot.dbcursor.fetchone()
if not data or data == None:
self.bot.dbcursor.execute('INSERT INTO guilds (guild_id, prefix)', (ctx.guild.id, np))
self.bot.db.commit()
else:
self.bot.dbcursor.execute('UPDATE guilds SET prefix=? WHERE guild_id=?', (np, ctx.guild.id))
embed = discord.Embed(description=f"**<:tick:897382645321850920> Prefix Updated to: `{np}`**", color=discord.Color.green())
return await ctx.respond(embed=embed)
self.bot.dbcursor.execute('SELECT * FROM guilds WHERE guild_id=?', (ctx.guild.id,))
dataCheck = self.bot.dbcursor.fetchone()
if not dataCheck or dataCheck == None:
self.bot.dbcursor.execute('INSERT INTO guilds (guild_id, prefix)', (ctx.guild.id, prefix))
self.bot.db.commit()
else:
self.bot.dbcursor.execute('UPDATE guilds SET prefix=? WHERE guild_id=?', (prefix, ctx.guild.id))
self.bot.db.commit()
embed = discord.Embed(description=f"**<:tick:897382645321850920> Prefix Updated to: `{prefix}`**", color=discord.Color.green())
await ctx.respond(embed=embed)
@slash_command(description="Nuke a channel")
@commands.bot_has_permissions(manage_channels=True)
async def nuke(self, ctx, channel: Option(discord.TextChannel, "The channel you want to nuke", required=False, default=None)):
channel = channel if channel else ctx.channel
interaction: discord.Interaction = ctx.interaction
if not ctx.author.guild_permissions.manage_channels:
em = discord.Embed(description="<:error:897382665781669908> You can't do that!", color=discord.Color.red())
return await interaction.response.send_message(embed=em, ephemeral=True)
embed1 = discord.Embed(description=f"Are you sure you want to **NUKE** {channel.mention}?\n------------------------------------------------\nRespond Within **15** seconds!", color=discord.Color.orange())
await interaction.response.send_message(embed=embed1)
message = await interaction.original_message()
await message.edit(embed=embed1, view=NukeView(ctx, channel, message))
@commands.command(name="nuke")
@commands.bot_has_permissions(manage_channels=True)
async def nuke_(self, ctx, *, channel: discord.TextChannel=None):
"""Delete all messages in a channel\nExample: `nuke [channel]\nIf channel is None then it will nuke the channel the command is used in`"""
channel = channel if channel else ctx.channel
if not ctx.author.guild_permissions.manage_channels:
em = discord.Embed(description="<:error:897382665781669908> You can't do that!", color=discord.Color.red())
return await ctx.send(embed=em)
embed1 = discord.Embed(description=f"Are you sure you want to **NUKE** {channel.mention}?\n------------------------------------------------\nRespond Within **15** seconds!", color=discord.Color.orange())
message = await ctx.send(embed=embed1)
await message.edit(embed=embed1, view=NukeView(ctx, channel, message))
@commands.group(name="purge")
async def purge_(self, ctx: commands.Context):
"""Sub commands for purge"""
if ctx.invoked_subcommand is None:
await ctx.send(embed=discord.Embed(title="Invalid Usage", description="**Usage: `{0}purge user <member> <amount>`**", color=discord.Color.red()))
@purge_.command(aliases=['member', 'mem'])
@commands.has_permissions(manage_messages=True)
@commands.bot_has_permissions(manage_messages=True)
async def user(self, ctx: commands.Context, user: discord.Member, amount):
"""Delete message of a user in the channel"""
def is_user(m):
"""Checks the user's messages in the channel"""
return m.author == user
channel: discord.TextChannel = ctx.channel
deleted = await channel.purge(limit=amount, check=is_user)
embed = discord.Embed(description=f"**<:tick:897382645321850920> Deleted {len(deleted)} messages of {user.mention}**", color=discord.Color.green())
await ctx.send(embed=embed)
@commands.command(aliases=['mute'])
@commands.has_permissions(moderate_members=True)
@commands.bot_has_permissions(moderate_members=True)
async def timeout(self, ctx: commands.Context, user: discord.Member, time, *, reason: str = "No reason provided"):
"""Timeout/Mute a user in the server"""
if user == ctx.author:
return await ctx.send(embed=discord.Embed(description="**<:error:897382665781669908> You can't timeout yourself!**", color=discord.Color.red()))
try:
timeConvert = humanfriendly.parse_timespan(time)
await user.timeout(discord.utils.utcnow()+datetime.timedelta(seconds=timeConvert), reason=reason)
embed = discord.Embed(description=f"**<:tick:897382645321850920> Successfully muted {user.mention} for {time} | Reason: {reason}**", color=discord.Color.green())
await ctx.send(embed=embed)
await user.send(embed=discord.Embed(description=f"**<:error:897382665781669908> You were muted in {ctx.guild.name} | Reason: {reason}**", color=discord.Color.red()))
except discord.Forbidden:
return await ctx.send(embed=discord.Embed(description="**<:error:897382665781669908> This user has a higher or equal role to me. **", color=discord.Color.red()))
@commands.command(aliases=['um'])
@commands.has_permissions(moderate_members=True)
@commands.bot_has_permissions(moderate_members=True)
async def unmute(self, ctx: commands.Context, user: discord.Member, *, reason: str = "No reason provided"):
"""Unmutes a user from the server"""
if user == ctx.author:
return await ctx.send(embed=discord.Embed(description="**<:error:897382665781669908> You can't unmute yourself!**", color=discord.Color.red()))
if not user.timed_out:
return await ctx.send(embed=discord.Embed(description="**<:error:897382665781669908> That user isn't muted!**", color=discord.Color.red()))
await user.timeout(None, reason=reason)
embed = discord.Embed(description=f"**<:tick:897382645321850920> Successfully unmuted {user.mention} | Reason: {reason}**", color=discord.Color.green())
await ctx.send(embed=embed)
def setup(bot):
bot.add_cog(ModCog(bot))
```
#### File: utils/helpers/configuration.py
```python
import discord
from discord.ext import commands
POWERED_BY_GIPHY = "https://images-ext-1.discordapp.net/external/FW8Emlcxhqqi1YsZoXVHfC6c58tPptohhqNz0GNmdYQ/https/image.ibb.co/b0Gkwo/Poweredby_640px_Black_Vert_Text.png"
def get_prefix(bot, message):
"""Gets the prefix for the server"""
try:
bot.dbcursor.execute('SELECT prefix FROM guilds WHERE guild_id=?', (message.guild.id,))
prefixes = bot.dbcursor.fetchone()
if not prefixes:
return "+"
else:
bot.dbcursor.execute('SELECT * FROM guilds WHERE guild_id=?', (message.guild.id,))
prefix = bot.dbcursor.fetchone()
return commands.when_mentioned_or(prefix[1])(bot, message)
except:
return "+"
def giphyUrl(id: str):
return f"https://media.giphy.com/media/{id}/giphy.gif"
``` |
{
"source": "27thLiz/IssueBot",
"score": 2
} |
#### File: 27thLiz/IssueBot/bot.py
```python
from __future__ import print_function
# twisted imports
from twisted.words.protocols import irc
from twisted.internet import reactor, protocol
from twisted.python import log
from github import Github
import re
import requests
import sys
class MessageHandler:
def __init__(self, bot, org, repos, gh):
self.bot = bot
self.org = org
self.repos = repos
self.github = gh
self.help_commands = ["!IssueBot-help", "!help", "!usage"]
self.current_pr = 0
self.repo = gh.get_repo("godotengine/godot")
self.pulls = []
def get_pulls(self):
all_pulls = self.repo.get_pulls()
self.pulls = []
for pr in all_pulls:
if pr.milestone != None and pr.milestone.title == "3.0":
self.pulls.append(pr)
print(self.pulls)
def parse_msg(self, name, msg, channel):
if (msg in self.help_commands):
self.print_usage(channel)
return
if (msg == "!startmeeting"):
self.get_pulls()
self.current_pr = 0
self.print_pr_link(channel)
self.print_pr(channel)
self.current_pr += 1
return
if (msg == "!next"):
self.print_pr(channel)
self.current_pr += 1
return
words = msg.split(" ")
for word in words:
if word.find("#") != -1:
repo = ""
split = word.split('#', 1)
try_repo = split[0]
res = re.match(r"[0-9]+", split[1])
if not res:
return
issue = res.group(0)
if word.find("/") != -1:
repo = word.split("/", 1)[0]
elif try_repo != "":
repo = try_repo
else:
repo = "godot"
if repo in self.repos:
self.generate_answer(repo, issue, channel)
else:
self.print_wrong_usage(channel, repo)
def print_pr(self, channel):
self.generate_answer("godot", str(
self.pulls[self.current_pr].number), channel)
def print_pr_link(self, channel):
message = "List of 3.0 PRs: https://github.com/godotengine/godot/pulls?q=is%3Aopen+is%3Apr+milestone%3A3.0"
self.bot.msg(channel, message)
def print_usage(self, channel):
message = "Usage: [repo]/#[issue_number]\n" + \
self.get_available_repos()
self.bot.msg(channel, message)
def print_wrong_usage(self, channel, repo):
message = "Unknown repository \"" + repo + "\"\n" + self.get_available_repos()
self.bot.msg(channel, message)
def get_available_repos(self):
return "Available repositories: godot, demos, docs, assetlib, escoria, collada"
def generate_answer(self, repo, issue, channel):
repo_name = self.repos[repo]
r = requests.get(
"https://api.github.com/repos/godotengine/" + repo_name + "/issues/" + issue)
if r.status_code == 200:
response = r.json()
title = response["title"]
long_url = response["html_url"]
header = {'user-agent': 'IssueBot/0.0.1',
"content-type": "application/x-www-form-urlencoded"}
body = {"url": long_url}
r = requests.post("https://git.io/", headers=header, data=body)
if r.status_code == 201:
#send_answer(repo, issue, title, r.headers["Location"], channel)
if repo == "godot":
repo = "#"
else:
repo = repo + "/#"
message = repo + issue + ": " + \
title + " | " + r.headers["Location"]
#sendmsg(message, channel)
self.bot.msg(channel, message)
class IssueBot(irc.IRCClient):
"""Simple irc bot that resolves Github issues to links"""
nickname = "IssueBot"
ignore = ["goBot", "[-einbot2-]", "http", "https"]
gh = Github()
def connectionMade(self):
irc.IRCClient.connectionMade(self)
self.msgHandler = MessageHandler(self, "godotengine",
{"godot": "godot", "demos": "godot-demo-projects", "docs": "godot-docs",
"assetlib": "asset-library", "escoria": "escoria", "collada": "collada-exporter",
"design": "godot-design"},
self.gh)
self.repo = self.gh.get_repo("godotengine/godot")
def connectionLost(self, reason):
irc.IRCClient.connectionLost(self, reason)
def ignore_message(self, user, message):
if user in self.ignore:
return True
for ignore_str in self.ignore:
if ignore_str in message:
return True
return False
def signedOn(self):
for channel in self.factory.channels:
self.join(channel)
def privmsg(self, user, channel, msg):
user = user.split('!', 1)[0]
if not self.ignore_message(user, msg):
self.msgHandler.parse_msg(user, msg, channel)
class IssueBotFactory(protocol.ClientFactory):
"""A factory for IssueBots.
A new protocol instance will be created each time we connect to the server.
"""
protocol = IssueBot
def __init__(self, channels):
self.channels = channels
def clientConnectionLost(self, connector, reason):
"""If we get disconnected, reconnect to server."""
connector.connect()
def clientConnectionFailed(self, connector, reason):
print("connection failed:", reason)
reactor.stop()
if __name__ == '__main__':
# initialize logging
log.startLogging(sys.stdout)
# create factory protocol and application
f = IssueBotFactory(["#godotengine", "#godotengine-devel",
"#godotengine-docs", "#godotengine-atelier"])
# connect factory to this host and port
reactor.connectTCP("irc.freenode.net", 6667, f)
# run bot
reactor.run()
``` |
{
"source": "27x2/BlueScan",
"score": 3
} |
#### File: BlueScan/src/BlueScan.py
```python
from virustotal import *
from abuseipdb import *
from badip import *
from xforceibm import *
import argparse
def main():
print(" ____ _ ___ ______ ___")
print("| _ \ | | |__ \ |____ | |__ \\")
print("| |_) || | _ _ ___ ) | / / __ __ ) |")
print("| _ < | || | | | / _ \ / / / / \ \/ / / /")
print("| |_) || || |_| || __/ / /_ / / > < / /_")
print("|____/ |_| \__,_| \___||____| /_/ /_/\_\|____|")
print('\n')
print('Use -h or --help')
print('usage: BleuScan.py [-h] [-H HASH] [-i IP] [-d DOMAIN]')
print('Scan hash, IP, virus online')
parser = argparse.ArgumentParser(description='Scan hash, IP, virus online',
epilog='Hope you like the program, for any feedback, please conntact me: <EMAIL>')
parser.add_argument('-H', '--hash', action='store',default=None, help='check your hash')
parser.add_argument('-i', '--ip', action='store',default=None, help='check your IP')
parser.add_argument('-d', '--domain', action='store',default=None, help='check your domain')
args = parser.parse_args()
if args.hash is not None:
check_hash(args.hash)
elif args.ip is not None:
print("[+] Your IP: "+args.ip)
print("|____Result:")
print("-------[-] Virus total: ")
check_ip(args.ip)
print("|\n|\n-------[-] AbuseIPDB: ")
check_ip_abuseipdb(args.ip)
print("|\n|\n-------[-] BadIP: ")
check_ip_badip(args.ip)
print("|\n|\n-------[-] X-force IBM: ")
check_ip_xforceibm(args.ip)
elif args.domain is not None:
check_domains(args.domain)
else:
return 0
if __name__ == "__main__":
main()
``` |
{
"source": "280185386/AlgoPlus",
"score": 2
} |
#### File: AdvancedCookbook/spread_trading/spread_trading_base.py
```python
from time import sleep, perf_counter as timer
from AlgoPlus.CTP.TraderApi import TraderApi
from AlgoPlus.CTP.ApiStruct import *
class LocalOrderInfo(BaseField):
_fields_ = [
('ExchangeID', c_char * 9), # 交易所代码
('InstrumentID', c_char * 31), # 合约代码
('OrderRef', c_char * 13), # 报单引用
('Direction', c_char * 1), # 买卖方向
('OffsetFlag', c_char * 5), # 组合开平标志
('LimitPrice', c_double), # 报单价格
('VolumeTotalOriginal', c_int), # 数量
('VolumeTotal', c_int), # 剩余数量
('OrderStatus', c_char * 1), # 报单状态
('InputTime', c_float), # 委托时间
]
class SpreadTradingBase(TraderApi):
def __init__(self, td_server, broker_id, investor_id, password, app_id, auth_code, md_queue=None
, page_dir='', private_resume_type=2, public_resume_type=2):
self.server_time = b'00:00:00' # 服务器时间
self.md_a = None # A合约最新行情
self.md_b = None # B合约最新行情
self.position_status = 0 # 策略方向
self.sig_stage = 0 # 信号触发后,执行状态
self.position_a = 0 # A合约持仓
self.position_b = 0 # B合约持仓
self.with_draw_num = 0 # 撤单次数
self.local_order_dict = {} # 所有报单本地信息字典
self.local_rtn_order_list = []
self.last_rtn_order_id = 0
self.work_status = -1 # 工作状态
# 需要初始化的参数
self.parameter_field = None
self.order_ref = None
self.order_ref_range = []
# 初始化参数
self.init_parameter()
# # 延时计时开始
# # 如果需要延时数据,请取消以下注释
# self.start_time = 0 # 开始时间
# self.anchor_time = 0# 锚点时间
# # 计时器信息
# self.timer_dict = {"FrontID": 0,
# "SessionID": 0,
# "OrderRef": b"",
# "FunctionName": "",
# "OrderStatus": b"",
# "StartTime": 0.0,
# "AnchorTime": 0.0,
# "DeltaTime": 0.0,
# }
# self.csv_file = None # csv file对象
# self.csv_writer = None# csv writer对象
# # 延时计时开始
self.Join()
# ############################################################################# #
def init_parameter(self):
"""
初始化策略参数
:return:
"""
pass
# ############################################################################# #
def buy_open(self, exchange_id, instrument_id, order_price, order_vol, order_ref):
"""
买开仓。与卖平仓为一组完成交易。
"""
self.req_order_insert(exchange_id, instrument_id, order_price, order_vol, order_ref, b'0', b'0')
def sell_close(self, exchange_id, instrument_id, order_price, order_vol, order_ref):
"""
卖平仓。与买开仓为一组完整交易。
SHFE与INE区分平今与平昨。
这里只实现了平今。
"""
if exchange_id == b'SHFE' or exchange_id == b'INE':
self.req_order_insert(exchange_id, instrument_id, order_price, order_vol, order_ref, b'1', b'3')
else:
self.req_order_insert(exchange_id, instrument_id, order_price, order_vol, order_ref, b'1', b'1')
def sell_open(self, exchange_id, instrument_id, order_price, order_vol, order_ref):
"""
卖开仓。与买平仓为一组完成交易。
"""
self.req_order_insert(exchange_id, instrument_id, order_price, order_vol, order_ref, b'1', b'0')
def buy_close(self, exchange_id, instrument_id, order_price, order_vol, order_ref):
"""
买平仓。与卖开仓为一组完整的交易。
SHFE与INE区分平今与平昨。
这里只实现了平今。
"""
if exchange_id == b'SHFE' or exchange_id == b'INE':
self.req_order_insert(exchange_id, instrument_id, order_price, order_vol, order_ref, b'0', b'3')
else:
self.req_order_insert(exchange_id, instrument_id, order_price, order_vol, order_ref, b'0', b'1')
def req_order_insert(self, exchange_id, instrument_id, order_price, order_vol, order_ref, direction, offset_flag):
"""
录入报单请求。将订单结构体参数传递给父类方法ReqOrderInsert执行。
:param exchange_id:交易所ID。
:param instrument_id:合约ID。
:param order_price:报单价格。
:param order_vol:报单手数。
:param order_ref:报单引用,用来标识订单来源。
:param direction:买卖方向。
(‘买 : 0’,)
(‘卖 : 1’,)
:param offset_flag:开平标志,只有SHFE和INE区分平今、平昨。
(‘开仓 : 0’,)
(‘平仓 : 1’,)
(‘强平 : 2’,)
(‘平今 : 3’,)
(‘平昨 : 4’,)
(‘强减 : 5’,)
(‘本地强平 : 6’,)
:return:
"""
input_order_field = InputOrderField(
BrokerID=self.broker_id,
InvestorID=self.investor_id,
ExchangeID=exchange_id,
InstrumentID=instrument_id,
UserID=self.investor_id,
OrderPriceType=b'2',
Direction=direction,
CombOffsetFlag=offset_flag,
CombHedgeFlag=b'1',
LimitPrice=order_price,
VolumeTotalOriginal=order_vol,
TimeCondition=b'3',
VolumeCondition=b'1',
MinVolume=1,
ContingentCondition=b'1',
StopPrice=0,
ForceCloseReason=b'0',
IsAutoSuspend=0,
OrderRef=str(order_ref),
)
if self.ReqOrderInsert(input_order_field) == 0:
# 本地订单信息字典
self.local_order_dict[input_order_field.OrderRef] = LocalOrderInfo(
ExchangeID=input_order_field.ExchangeID,
InstrumentID=input_order_field.InstrumentID,
OrderRef=input_order_field.OrderRef,
Direction=input_order_field.Direction,
OffsetFlag=input_order_field.CombOffsetFlag,
LimitPrice=input_order_field.LimitPrice,
VolumeTotalOriginal=input_order_field.VolumeTotalOriginal,
VolumeTotal=input_order_field.VolumeTotalOriginal,
OrderStatus=b'',
OrderSysID=b'',
InputTime=timer(),
)
# def ReqOrderInsert(self, pInputOrder):
# """
# 录入报单请求。已在父类封装。
# 如果不需要记录延时数据,则无需在此实现。
# """
# super(SpreadTradingBase, self).ReqOrderInsert(pInputOrder)
# # 延时计时开始
# # 如果需要延时数据,请取消以下注释
# self.anchor_time = timer()
# self.timer_dict["OrderRef"] = pInputOrder.OrderRef
# self.timer_dict["FunctionName"] = "ReqOrderInsert"
# self.timer_dict["OrderStatus"] = b""
# self.timer_dict["StartTime"] = self.start_time
# self.timer_dict["AnchorTime"] = self.anchor_time
# self.timer_dict["DeltaTime"] = self.anchor_time - self.start_time
# self.csv_writer.writerow(self.timer_dict)
# self.csv_file.flush()
# # 延时计时结束
# ############################################################################# #
def OnRspOrderInsert(self, pInputOrder, pRspInfo, nRequestID, bIsLast):
"""
录入撤单回报。不适宜在回调函数里做比较耗时的操作。可参考OnRtnOrder的做法。
:param pInputOrder: AlgoPlus.CTP.ApiStruct中InputOrderField的实例。
:param pRspInfo: AlgoPlus.CTP.ApiStruct中RspInfoField的实例。包含错误代码ErrorID和错误信息ErrorMsg
:param nRequestID:
:param bIsLast:
:return:
"""
if self.is_my_order(pInputOrder.OrderRef):
if pRspInfo.ErrorID != 0:
if pInputOrder.InstrumentID == self.parameter_field.AInstrumentID:
self.on_leg1_insert_fail(pInputOrder)
elif pInputOrder.InstrumentID == self.parameter_field.BInstrumentID:
self.on_leg2_insert_fail(pInputOrder)
self._write_log(f"{pRspInfo}=>{pInputOrder}")
# # 延时计时开始
# # 如果需要延时数据,请取消注释
# # 不适宜在回调函数里做比较耗时的操作。
# self.anchor_time = timer()
# self.timer_dict["FunctionName"] = "OnRspOrderInsert"
# self.timer_dict["OrderStatus"] = b""
# self.timer_dict["AnchorTime"] = self.anchor_time
# self.timer_dict["DeltaTime"] = self.anchor_time - self.start_time
# self.csv_writer.writerow(self.timer_dict)
# self.csv_file.flush()
# # 延时计时结束
# ############################################################################# #
def is_my_order(self, order_ref):
"""
以OrderRef标识本策略订单。
"""
return order_ref.isdigit() and self.order_ref_range[0] < int(order_ref) < self.order_ref_range[1]
def OnRtnOrder(self, pOrder):
"""
当收到订单状态变化时,可以在本方法中获得通知。不适宜在回调函数里做比较耗时的操作。可参考OnRtnOrder的做法。
根据pOrder.OrderStatus的取值调用适应的交易算法。
:param pOrder: AlgoPlus.CTP.ApiStruct中OrderField的实例。
OrderField的OrderStatus字段枚举值及含义:
(‘全部成交 : 0’,)
(‘部分成交还在队列中 : 1’,)
(‘部分成交不在队列中 : 2’,)
(‘未成交还在队列中 : 3’,)
(‘未成交不在队列中 : 4’,)
(‘撤单 : 5’,)
(‘未知 : a’,)
(‘尚未触发 : b’,)
(‘已触发 : c’,)
OrderField的OrderSubmitStatus字段枚举值及含义:
(‘已经提交 : 0’,)
(‘撤单已经提交 : 1’,)
(‘修改已经提交 : 2’,)
(‘已经接受 : 3’,)
(‘报单已经被拒绝 : 4’,)
(‘撤单已经被拒绝 : 5’,)
(‘改单已经被拒绝 : 6’,)
:return:
"""
# # 延时计时开始
# # 如果需要延时数据,请取消以下注释
# # 不适宜在回调函数里做比较耗时的操作。
# self.anchor_time = timer()
# self.timer_dict["FunctionName"] = "OnRtnOrder"
# self.timer_dict["OrderStatus"] = pOrder.OrderStatus
# self.timer_dict["AnchorTime"] = self.anchor_time
# self.timer_dict["DeltaTime"] = self.anchor_time - self.start_time
# self.csv_writer.writerow(self.timer_dict)
# self.csv_file.flush()
# # 延时计时结束
if self.is_my_order(pOrder.OrderRef):
self.local_rtn_order_list.append(pOrder.to_dict_raw())
def process_rtn_order(self):
last_rtn_order_id = len(self.local_rtn_order_list)
for rtn_order in self.local_rtn_order_list[self.last_rtn_order_id:last_rtn_order_id]:
local_order_info = self.local_order_dict[rtn_order["OrderRef"]]
if local_order_info.OrderSysID == b'':
local_order_info.OrderSysID = rtn_order["OrderSysID"]
# 未成交
if local_order_info.OrderStatus == b'' and rtn_order["OrderStatus"] == b'3':
local_order_info.OrderStatus = b'3' # 未成交状态
# 全部成交
elif rtn_order["OrderStatus"] == b'0':
local_order_info.OrderStatus = b'0' # 全部成交状态
if rtn_order["InstrumentID"] == self.parameter_field.AInstrumentID:
self.on_leg1_traded(rtn_order)
elif rtn_order["InstrumentID"] == self.parameter_field.BInstrumentID:
self.on_leg2_traded(rtn_order)
# 部分成交
elif rtn_order["OrderStatus"] == b'1':
local_order_info.OrderStatus = b'1' # 部分成交状态
if rtn_order["InstrumentID"] == self.parameter_field.AInstrumentID:
self.on_leg1_traded(rtn_order)
elif rtn_order["InstrumentID"] == self.parameter_field.BInstrumentID:
self.on_leg2_traded(rtn_order)
# 撤单成功
elif rtn_order["OrderStatus"] == b'5':
local_order_info.OrderStatus = b'5' # 已撤单状态
if rtn_order["InstrumentID"] == self.parameter_field.AInstrumentID:
self.with_draw_num += 1
self.on_leg1_action(rtn_order)
elif rtn_order["InstrumentID"] == self.parameter_field.BInstrumentID:
self.on_leg2_action(rtn_order)
# 委托失败
elif rtn_order["OrderSubmitStatus"] == b'4':
local_order_info.OrderStatus = b'9' # 委托失败状态
if rtn_order["InstrumentID"] == self.parameter_field.AInstrumentID:
self.on_leg1_insert_fail(rtn_order)
elif rtn_order["InstrumentID"] == self.parameter_field.BInstrumentID:
self.on_leg2_insert_fail(rtn_order)
# 撤单失败
elif rtn_order["OrderSubmitStatus"] == b'5':
local_order_info.OrderStatus = b'8' # 撤单失败状态
if rtn_order["InstrumentID"] == self.parameter_field.AInstrumentID:
self.on_leg1_action_fail(rtn_order)
elif rtn_order["InstrumentID"] == self.parameter_field.BInstrumentID:
self.on_leg2_action_fail(rtn_order)
self.last_rtn_order_id = last_rtn_order_id
def on_leg1_traded(self, rtn_order):
"""
腿一(不活跃合约)成交时需要执行的交易逻辑。
:param rtn_order: AlgoPlus.CTP.ApiStruct中OrderField的实例。
:return:
"""
pass
def on_leg2_traded(self, rtn_order):
"""
腿二(活跃合约)成交时需要执行的交易逻辑。
:param rtn_order: AlgoPlus.CTP.ApiStruct中OrderField的实例。
:return:
"""
pass
def on_leg1_action(self, rtn_order):
"""
腿一(不活跃合约)撤单成功时需要执行的交易逻辑。
:param rtn_order: AlgoPlus.CTP.ApiStruct中OrderField的实例。
:return:
"""
pass
def on_leg2_action(self, rtn_order):
"""
腿二(活跃合约)撤单成功时需要执行的交易逻辑。
:param rtn_order: AlgoPlus.CTP.ApiStruct中OrderField的实例。
:return:
"""
pass
def on_leg1_insert_fail(self, rtn_order):
"""
腿一(不活跃合约)订单失败时需要执行的交易逻辑。
:param rtn_order: AlgoPlus.CTP.ApiStruct中OrderField或者InputOrder的实例。注意使用其公共字段。
:return:
"""
pass
def on_leg2_insert_fail(self, rtn_order):
"""
腿一(不活跃合约)报单失败时需要执行的交易逻辑。
:param rtn_order: AlgoPlus.CTP.ApiStruct中OrderField或者InputOrder的实例。注意使用其公共字段。
:return:
"""
pass
def on_leg1_action_fail(self, rtn_order):
"""
腿一(不活跃合约)撤单失败时需要执行的交易逻辑。
:param rtn_order: AlgoPlus.CTP.ApiStruct中OrderField或者InputOrderActionField的实例。注意使用其公共字段。
:return:
"""
pass
def on_leg2_action_fail(self, rtn_order):
"""
腿二(活跃合约)撤单失败时需要执行的交易逻辑。
:param rtn_order: AlgoPlus.CTP.ApiStruct中OrderField或者InputOrderActionField的实例。注意使用其公共字段。
:return:
"""
pass
# ############################################################################# #
def OnRtnTrade(self, pTrade):
"""
当报单成交时,可以在本方法中获得通知。不适宜在回调函数里做比较耗时的操作。可参考OnRtnOrder的做法。
TradeField包含成交价格,而OrderField则没有。
如果不需要成交价格,可忽略该通知,使用OrderField。
:param pTrade: AlgoPlus.CTP.ApiStruct中的TradeField实例。
:return:
"""
pass
# ############################################################################# #
def OnErrRtnOrderInsert(self, pInputOrder, pRspInfo):
"""
订单错误通知。不适宜在回调函数里做比较耗时的操作。可参考OnRtnOrder的做法。
:param pInputOrder: AlgoPlus.CTP.ApiStruct中的InputOrderField实例。
:param pRspInfo: AlgoPlus.CTP.ApiStruct中RspInfoField的实例。包含错误代码ErrorID和错误信息ErrorMsg
:return:
"""
if self.is_my_order(pInputOrder.OrderRef):
if pRspInfo.ErrorID != 0:
if pInputOrder.InstrumentID == self.parameter_field.AInstrumentID:
self.on_leg1_action_fail(pInputOrder)
elif pInputOrder.InstrumentID == self.parameter_field.BInstrumentID:
self.on_leg2_action_fail(pInputOrder)
self._write_log(f"{pRspInfo}=>{pInputOrder}")
# # 延时计时开始
# # 如果需要延时数据,请取消注释
# # 不适宜在回调函数里做比较耗时的操作。
# self.anchor_time = timer()
# self.timer_dict["FunctionName"] = "OnErrRtnOrderInsert"
# self.timer_dict["OrderStatus"] = b""
# self.timer_dict["AnchorTime"] = self.anchor_time
# self.timer_dict["DeltaTime"] = self.anchor_time - self.start_time
# self.csv_writer.writerow(self.timer_dict)
# self.csv_file.flush()
# # 延时计时结束
# ############################################################################# #
def req_order_action(self, exchange_id, instrument_id, order_ref, order_sysid=''):
"""
撤单请求。将撤单结构体参数传递给父类方法ReqOrderAction执行。
:param exchange_id:交易所ID
:param instrument_id:合约ID
:param order_ref:报单引用,用来标识订单来源。根据该标识撤单。
:param order_sysid:系统ID,当录入成功时,可在回报/通知中获取该字段。
:return:
"""
input_order_action_field = InputOrderActionField(
BrokerID=self.broker_id,
InvestorID=self.investor_id,
UserID=self.investor_id,
ExchangeID=exchange_id,
ActionFlag=b'0',
InstrumentID=instrument_id,
FrontID=self.front_id,
SessionID=self.session_id,
OrderSysID=order_sysid,
OrderRef=order_ref,
)
l_retVal = self.ReqOrderAction(input_order_action_field)
# ############################################################################# #
# # 延时计时开始
# # 如果需要延时数据,请取消以下注释
# def ReqOrderAction(self, pInputOrderAction):
# """
# 录入撤单请求。已在父类封装。
# 如果不需要记录延时数据,则无需在此实现。
# """
# super(SpreadTrading, self).ReqOrderAction(pInputOrderAction)
# self.anchor_time = timer()
# self.timer_dict["OrderRef"] = pInputOrderAction.OrderRef
# self.timer_dict["FunctionName"] = "ReqOrderAction"
# self.timer_dict["OrderStatus"] = b""
# self.timer_dict["StartTime"] = self.start_time
# self.timer_dict["AnchorTime"] = self.anchor_time
# self.timer_dict["DeltaTime"] = self.anchor_time - self.start_time
# self.csv_writer.writerow(self.timer_dict)
# self.csv_file.flush()
# # 延时计时结束
# ############################################################################# #
def OnRspOrderAction(self, pInputOrderAction, pRspInfo, nRequestID, bIsLast):
"""
录入撤单回报。不适宜在回调函数里做比较耗时的操作。可参考OnRtnOrder的做法。
:param pInputOrderAction: AlgoPlus.CTP.ApiStruct中InputOrderActionField的实例。
:param pRspInfo: AlgoPlus.CTP.ApiStruct中RspInfoField的实例。包含错误代码ErrorID和错误信息ErrorMsg
:param nRequestID:
:param bIsLast:
:return:
"""
if self.is_my_order(pInputOrderAction.OrderRef):
if pRspInfo.ErrorID != 0:
if pInputOrderAction.InstrumentID == self.parameter_field.AInstrumentID:
self.on_leg1_action_fail(pInputOrderAction)
elif pInputOrderAction.InstrumentID == self.parameter_field.BInstrumentID:
self.on_leg2_action_fail(pInputOrderAction)
self._write_log(f"{pRspInfo}=>{pInputOrderAction}")
# # 延时计时开始
# # 如果需要延时数据,请取消注释
# # 不适宜在回调函数里做比较耗时的操作。
# self.anchor_time = timer()
# self.timer_dict["FunctionName"] = "OnRspOrderAction"
# self.timer_dict["OrderStatus"] = b""
# self.timer_dict["AnchorTime"] = self.anchor_time
# self.timer_dict["DeltaTime"] = self.anchor_time - self.start_time
# self.csv_writer.writerow(self.timer_dict)
# self.csv_file.flush()
# # 延时计时结束
# ############################################################################# #
def check_local_orders(self):
"""
检查所有挂单是否满足撤单条件。
:return:
"""
try:
for order_ref in list(self.local_order_dict):
local_order_field = self.local_order_dict[order_ref]
if local_order_field.OrderStatus == b'1' or local_order_field.OrderStatus == b'3':
if local_order_field.InstrumentID == self.parameter_field.AInstrumentID:
self.with_draw_leg1_order(local_order_field)
elif local_order_field.InstrumentID == self.parameter_field.BInstrumentID:
self.with_draw_leg2_order(local_order_field)
except Exception as err:
pass
def with_draw_leg1_order(self, local_order_field):
"""
判断是否满足腿一撤单条件。
:param local_order_field: 本地订单信息
:return:
"""
pass
def with_draw_leg2_order(self, local_order_field):
"""
判断是否满足腿二撤单条件。
:param local_order_field: 本地订单信息
:return:
"""
pass
# ############################################################################# #
def get_order_price_l1(self, direction, offset_flag):
"""
获取腿一(不活跃合约)报单价格。
:param direction: b"0"表示买,其他(b"1")表示卖,注意是bytes类型
:param offset_flag: b"0"表示开,其他(b"1")表示平,注意是bytes类型
:return: 根据买开、卖平、卖开、卖平类型,判断是否满足交易条件,如果满足,返回订单委托价格。否则,返回None。
"""
return None
def get_order_price_l2(self, direction):
"""
获取腿二(活跃合约)报单价格。与get_order_price_l1不同,要确保get_order_price_l2方法返回具体数值。
:param direction: b"0"表示买,其他(b"1")表示卖,注意是bytes类型
:return: 买入返回卖1价,卖出返回买1价
"""
if direction == b'0':
return self.md_b.AskPrice1
else:
return self.md_b.BidPrice1
# ############################################################################# #
def update_buy_spread_open(self):
"""
买价差开仓。
self.position_status标识策略持仓阶段(0表示无持仓,1表示多头开/持仓,2表示平仓)。
只有无持仓时,或者价差多头持仓时,可买价差开仓。
self.sig_stage标识信号执行阶段(0表示无信号执行,1表示信号执行中)。
触发信号后,先对腿一买开仓,再对腿二卖开仓。腿一腿二配平后,self.sig_stage重置为0。
order_price为None表示不满足交易条件,否则满足交易条件。
:return:
"""
if self.sig_stage == 0 and (self.position_status == 0 or self.position_status == 1):
order_price = self.get_order_price_l1(b'0', b'0')
if order_price:
self.position_status = 1
self.sig_stage = 1
self.order_ref += 1
self.buy_open(self.parameter_field.AExchangeID, self.parameter_field.AInstrumentID, order_price, self.parameter_field.Lots, self.order_ref)
self._write_log(f"买价差开仓信号 => 第一腿开仓!")
def update_sell_spread_close(self):
"""
卖价差平仓。
self.position_status标识策略持仓阶段(0表示无持仓,1表示多头开/持仓,2表示平仓)。
只有持有价差多头时,可卖价差平仓。
self.sig_stage标识信号执行阶段(0表示无信号执行,1表示信号执行中)。
触发信号后,先对腿一买开仓,再对腿二卖开仓。腿一腿二配平后,self.sig_stage重置为0。
order_price为None表示不满足交易条件,否则满足交易条件。
:return:
"""
if self.sig_stage == 0 and (self.position_status == 1 and self.sig_stage == 0):
order_price = self.get_order_price_l1(b'1', b'1')
if order_price:
self.position_status = 2
self.sig_stage = 1
self.order_ref += 1
self.sell_close(self.parameter_field.AExchangeID, self.parameter_field.AInstrumentID, order_price, self.parameter_field.Lots, self.order_ref)
self._write_log(f"卖价差平仓信号 => 第一腿平仓!")
def update_sell_spread_open(self):
"""
卖价差开仓。
self.position_status标识策略持仓阶段(0表示无持仓,1表示多头开/持仓,2表示平仓)。
只有无持仓时,或者持有价差空头时,可卖价差开仓。
self.sig_stage标识信号执行阶段(0表示无信号执行,1表示信号执行中)。
触发信号后,先对腿一买开仓,再对腿二卖开仓。腿一腿二配平后,self.sig_stage重置为0。
order_price为None表示不满足交易条件,否则满足交易条件。
:return:
"""
if self.sig_stage == 0 and (self.position_status == 0 or self.position_status == -1):
order_price = self.get_order_price_l1(b'1', b'0')
if order_price:
self.position_status = -1
self.sig_stage = 1
self.order_ref += 1
self.sell_open(self.parameter_field.AExchangeID, self.parameter_field.AInstrumentID, order_price, self.parameter_field.Lots, self.order_ref)
self._write_log(f"卖价差开仓信号 => 第一腿开仓!")
def update_buy_spread_close(self):
"""
买价差平仓。
self.position_status标识策略持仓阶段(0表示无持仓,1表示多头开/持仓,2表示平仓)。
只有持有价差空头时,可买价差平仓。
self.sig_stage标识信号执行阶段(0表示无信号执行,1表示信号执行中)。
触发信号后,先对腿一买开仓,再对腿二卖开仓。腿一腿二配平后,self.sig_stage重置为0。
order_price为None表示不满足交易条件,否则满足交易条件。
:return:
"""
if self.sig_stage == 0 and (self.position_status == -1 and self.sig_stage == 0):
order_price = self.get_order_price_l1(b'0', b'1')
if order_price:
self.position_status = -2
self.sig_stage = 1
self.order_ref += 1
self.buy_close(self.parameter_field.AExchangeID, self.parameter_field.AInstrumentID, order_price, self.parameter_field.Lots, self.order_ref)
self._write_log(f"买价差平仓信号 => 第一腿平仓!")
# ############################################################################# #
def update_open_status(self):
"""
开仓限制条件,以撤单次数为例。
:return: 可开仓,返回True。否则返回False。
"""
return False
def update_close_status(self):
"""
开仓限制条件,以撤单次数为例。
:return: 可平仓,返回True。否则返回False。
"""
return False
# ############################################################################# #
def Join(self):
while True:
if self.status == 0 and self.work_status >= 0:
while not self.md_queue.empty():
last_md = self.md_queue.get(block=False)
if last_md.InstrumentID == self.parameter_field.AInstrumentID:
self.md_a = last_md
self.server_time = max(self.server_time, self.md_a.UpdateTime)
elif last_md.InstrumentID == self.parameter_field.BInstrumentID:
self.md_b = last_md
self.server_time = max(self.server_time, self.md_a.UpdateTime)
if 0 < self.work_status < 4:
self.process_rtn_order()
self.check_local_orders()
if self.update_open_status():
self.update_buy_spread_open()
self.update_sell_spread_open()
elif (self.work_status == 1 or self.work_status == 3) and self.sig_stage == 0:
self.work_status = 2 if self.work_status == 1 else 4
self._write_log(f"触发暂停开仓条件!")
if self.update_close_status():
self.update_buy_spread_close()
self.update_sell_spread_close()
elif (self.work_status == 1 or self.work_status == 2) and self.sig_stage == 0:
self.work_status = 3 if self.work_status == 1 else 4
self._write_log(f"触发暂停平仓条件!")
elif self.work_status >= 4:
self._write_log(f"开仓与平仓均已暂停!")
break
elif self.md_a is not None and self.md_b is not None and \
0 < self.md_a.AskPrice1 < 9999999 and 0 < self.md_a.BidPrice1 < 9999999 and 0 < self.md_b.AskPrice1 < 9999999 and 0 < self.md_b.BidPrice1 < 9999999:
self.work_status = 1
else:
sleep(1)
```
#### File: AdvancedCookbook/spread_trading/spread_trading_example.py
```python
from ctypes import *
from multiprocessing import Process, Queue
from time import perf_counter as timer
from spread_trading_base import SpreadTradingBase
from AlgoPlus.utils.base_field import BaseField
from tick_engine import TickEngine
class SpreadTradingFields(BaseField):
_fields_ = [
('StrategyName', c_char * 100),
('StrategyID', c_int),
('AInstrumentID', c_char * 31), # A合约代码
('APriceTick', c_double), # A最小变动价位
('AExchangeID', c_char * 9), # A交易所代码
('BInstrumentID', c_char * 31), # B合约代码
('BPriceTick', c_double), # B最小变动价位
('BExchangeID', c_char * 9), # B交易所代码
('BuyOpenSpread', c_double), # 买开仓价差
('SellCloseSpread', c_double), # 卖平仓价差
('SellOpenSpread', c_double), # 卖开仓价差
('BuyCloseSpread', c_double), # 买平仓价差
('Lots', c_int), # 下单手数
('MaxActionNum', c_int), # 最大撤单次数
('MaxPosition', c_int), # 最大持仓手数
('AWaitSeconds', c_float), # B合约撤单前等待秒
('BWaitSeconds', c_float), # B合约撤单前等待秒
]
class MySpreadTrading(SpreadTradingBase):
# ############################################################################# #
def init_parameter(self):
"""
初始化策略参数
:return:
"""
self.parameter_field = self.md_queue.get(block=False) # 策略参数结构体
self.order_ref = self.parameter_field.StrategyID * 10000 # 报单引用
self.order_ref_range = [self.order_ref, self.order_ref + 10000] # 报单引用区间
self.work_status = 0
self._write_log(f"策略参数初始化完成!=>{self.parameter_field}")
# ############################################################################# #
def on_leg1_traded(self, rtn_order):
"""
腿一(不活跃合约)成交时需要执行的交易逻辑。
:param rtn_order: AlgoPlus.CTP.ApiStruct中OrderFireq_order_inserteld的实例。
:return:
"""
local_order_info = self.local_order_dict[rtn_order["OrderRef"]] # 本地订单信息
volume_traded = local_order_info.VolumeTotal - rtn_order["VolumeTotal"] # 腿一成交数量
if volume_traded > 0:
local_order_info.VolumeTotal = rtn_order["VolumeTotal"] # 腿一剩余数量
if rtn_order["CombOffsetFlag"] == b'0':
self.position_a += volume_traded # 腿一总持仓
else:
self.position_a -= volume_traded # 腿一总持仓
self.order_ref += 1
if rtn_order["Direction"] == b'0':
order_price = self.get_order_price_l2(b'1') # 腿二报单价格
self.sell_open(self.parameter_field.BExchangeID, self.parameter_field.BInstrumentID, order_price, volume_traded, self.order_ref)
else:
order_price = self.get_order_price_l2(b'0') # 腿二报单价格
self.buy_open(self.parameter_field.BExchangeID, self.parameter_field.BInstrumentID, order_price, volume_traded, self.order_ref)
def on_leg2_traded(self, rtn_order):
"""
腿二(活跃合约)成交时需要执行的交易逻辑。
:param rtn_order: AlgoPlus.CTP.ApiStruct中OrderField的实例。
:return:
"""
local_order_info = self.local_order_dict[rtn_order["OrderRef"]] # 本地订单信息
volume_traded = local_order_info.VolumeTotal - rtn_order["VolumeTotal"] # 腿二成交数量
if volume_traded > 0:
local_order_info.VolumeTotal = rtn_order["VolumeTotal"] # 腿二成交数量
if rtn_order["CombOffsetFlag"] == b'0':
self.position_b += volume_traded # 腿二总持仓
else:
self.position_b -= volume_traded # 腿二总持仓
if rtn_order["VolumeTotal"] == 0:
self.sig_stage = 0
if self.position_b == 0:
self.position_status = 0
self.local_order_dict.clear()
self._write_log(f"腿一与腿二配对完成!目前持仓情况,腿一:{self.position_a},腿二:{self.position_b}")
def on_leg1_action(self, rtn_order):
"""
腿一(不活跃合约)撤单成功时需要执行的交易逻辑。
:param rtn_order: AlgoPlus.CTP.ApiStruct中OrderField的实例。
:return:
"""
self.sig_stage = 0
if self.position_a == 0:
self.position_status = 0
def on_leg2_action(self, rtn_order):
"""
腿二(活跃合约)撤单成功时需要执行的交易逻辑。
:param rtn_order: AlgoPlus.CTP.ApiStruct中OrderField的实例。
:return:
"""
self.order_ref += 1
order_price = self.get_order_price_l2(rtn_order["Direction"]) # 腿二报单价格
self.req_order_insert(rtn_order["ExchangeID"], rtn_order["InstrumentID"], order_price, rtn_order["VolumeTotal"], self.order_ref, rtn_order["Direction"], rtn_order["CombOffsetFlag"])
def on_leg1_insert_fail(self, rtn_order):
"""
腿一(不活跃合约)订单失败时需要执行的交易逻辑。
:param rtn_order: AlgoPlus.CTP.ApiStruct中OrderField的实例。
:return:
"""
pass
def on_leg2_insert_fail(self, rtn_order):
"""
腿一(不活跃合约)报单失败时需要执行的交易逻辑。
:param rtn_order: AlgoPlus.CTP.ApiStruct中OrderField的实例。
:return:
"""
pass
def on_leg1_action_fail(self, rtn_order):
"""
腿一(不活跃合约)撤单失败时需要执行的交易逻辑。
:param rtn_order: AlgoPlus.CTP.ApiStruct中OrderField的实例。
:return:
"""
pass
def on_leg2_action_fail(self, rtn_order):
"""
腿二(活跃合约)撤单失败时需要执行的交易逻辑。
:param rtn_order: AlgoPlus.CTP.ApiStruct中OrderField的实例。
:return:
"""
pass
# ############################################################################# #
def with_draw_leg1_order(self, local_order_field):
"""
判断是否满足腿一撤单条件。
:param local_order_field: 本地订单信息
:return:
"""
anchor_time = timer()
if anchor_time - local_order_field.InputTime > self.parameter_field.AWaitSeconds:
self.req_order_action(self.parameter_field.AExchangeID, self.parameter_field.AInstrumentID, local_order_field.OrderRef)
local_order_field.OrderStatus = b'7' # 修改本地订单状态,避免重复撤单
self._write_log(f"撤销腿一挂单!OrderRef={local_order_field.OrderRef}")
def with_draw_leg2_order(self, local_order_field):
"""
判断是否满足腿二撤单条件。
:param local_order_field: 本地订单信息
:return:
"""
anchor_time = timer()
if anchor_time - local_order_field.InputTime > self.parameter_field.BWaitSeconds:
self.req_order_action(self.parameter_field.BExchangeID, self.parameter_field.BInstrumentID, local_order_field.OrderRef)
local_order_field.OrderStatus = b'7' # 修改本地订单状态,避免重复撤单
self._write_log(f"撤销腿二挂单!OrderRef={local_order_field.OrderRef}")
# ############################################################################# #
def get_order_price_l1(self, direction, offset_flag):
"""
获取腿一(不活跃合约)报单价格。
:param direction: b"0"表示买,其他(b"1")表示卖,注意是bytes类型
:param offset_flag: b"0"表示开,其他(b"1")表示平,注意是bytes类型
:return: 根据买开、卖平、卖开、卖平类型,判断是否满足交易条件,如果满足,返回订单委托价格。否则,返回None。
"""
order_price = None
try:
if direction == b'0':
if self.md_a.BidPrice1 - self.md_b.BidPrice1 < self.parameter_field.BuyOpenSpread if offset_flag == b'0' else self.parameter_field.BuyCloseSpread:
order_price = self.md_a.BidPrice1 + self.parameter_field.APriceTick
else:
if self.md_a.AskPrice1 - self.md_b.AskPrice1 > self.parameter_field.SellOpenSpread if offset_flag == b'0' else self.parameter_field.SellCloseSpread:
order_price = self.md_a.AskPrice1 - self.parameter_field.APriceTick
finally:
return order_price
# def get_order_price_l2(self, direction):
# """
# 获取腿二(活跃合约)报单价格。与get_order_price_l1不同,要确保get_order_price_l2方法返回具体数值。
# :param direction: b"0"表示买,其他(b"1")表示卖,注意是bytes类型
# :return: 买入返回卖1价,卖出返回买1价
# """
# if direction == '0':
# return self.md_b.AskPrice1
# else:
# return self.md_b.BidPrice1
# ############################################################################# #
def update_open_status(self):
"""
开仓限制条件,以撤单次数为例。
:return: 可开仓,返回True。否则返回False。
"""
if self.with_draw_num < self.parameter_field.MaxActionNum and self.position_a < self.parameter_field.MaxPosition:
return True
return False
def update_close_status(self):
"""
平仓限制条件。
:return: 可平仓,返回True。否则返回False。
"""
return True
if __name__ == "__main__":
from account_info import my_future_account_info_dict
future_account = my_future_account_info_dict['SimNow']
# 共享队列
share_queue = Queue(maxsize=100)
my_strategy_parameter_field = SpreadTradingFields(
StrategyName=b'AlgoPlus Spread Trading Exemplification',
StrategyID=1,
AInstrumentID=b'ni2001',
APriceTick=10,
AExchangeID=b'SHFE',
BInstrumentID=b'ni1912', # B合约代码
BPriceTick=10, # B最小变动价位
BExchangeID=b'SHFE', # B交易所代码
BuyOpenSpread=30000, # 买开仓价差
SellCloseSpread=0, # 卖平仓价差
SellOpenSpread=50000, # 卖开仓价差
BuyCloseSpread=40000, # 买平仓价差
Lots=10, # 下单手数
MaxActionNum=100, # 最大撤单次数
MaxPosition=60, # 最大持仓手数
AWaitSeconds=1, # B合约撤单前等待秒
BWaitSeconds=1, # B合约撤单前等待秒
)
share_queue.put(my_strategy_parameter_field)
# 行情进程
md_process = Process(target=TickEngine, args=(future_account.server_dict['MDServer']
, future_account.broker_id
, future_account.investor_id
, future_account.password
, future_account.app_id
, future_account.auth_code
, future_account.instrument_id_list
, [share_queue]
, future_account.md_page_dir)
)
# 交易进程
trader_process = Process(target=MySpreadTrading, args=(future_account.server_dict['TDServer']
, future_account.broker_id
, future_account.investor_id
, future_account.password
, future_account.app_id
, future_account.auth_code
, share_queue
, future_account.td_page_dir)
)
#
md_process.start()
trader_process.start()
md_process.join()
trader_process.join()
```
#### File: AlgoPlus/utils/base_field.py
```python
from ctypes import *
class BaseField(Structure):
def _to_bytes(self, value):
if isinstance(value, bytes):
return value
else:
return bytes(str(value), encoding="utf-8")
def to_dict_raw(self):
results = {}
for key, _ in self._fields_:
_value = getattr(self, key)
results[key] = _value
return results
def to_dict(self):
results = {}
for key, _ in self._fields_:
_value = getattr(self, key)
if isinstance(_value, bytes):
results[key] = _value.decode(encoding="gb18030", errors="ignore")
else:
results[key] = _value
return results
def to_list(self):
results = []
for key, _ in self._fields_:
_value = getattr(self, key)
if isinstance(_value, bytes):
results.append(_value.decode(encoding="gb18030", errors="ignore"))
else:
results.append(_value)
return results
def __repr__(self):
return f"{self.__class__.__name__}->{self.to_dict()}"
@classmethod
def from_dict(cls, obj):
return cls(**obj)
``` |
{
"source": "2803Aditya/My-Python-Library",
"score": 3
} |
#### File: MyLibrary/PlotMyGoogleSheet28/PlotMyGoogleSheet.py
```python
import gspread
import matplotlib.pyplot as plt
from gspread_dataframe import get_as_dataframe
import seaborn as sns
class PlotMyGoogleSheet():
# Constructor
def __init__(self, link):
# Authenticating using serive account
# Open the file using the sheet URL.
# Select only sheet1
self.sh = gspread.service_account(filename='credentials.json').open_by_url(link).sheet1
# Line plot b/w col1 and col2 using matplotlib
def plot(self, x, y):
# Sheet to Dataframe
df = get_as_dataframe(self.sh) # It will return the worksheets contents as a Dataframe
df = df.dropna(how = "all", axis = 1) # Do not include unnamed columns
sns.set_style('darkgrid')
plt.figure(figsize = (15, 15))
sns.lineplot(x = df[x], y = df[y])
plt.xlabel(x)
plt.ylabel(y)
plt.savefig(x+' VS '+y+'.png') # Save the figure
plt.show() # Render the figure
print('Figure saved...')
# Return column names of our sheet
def get_cols(self):
# Sheet to Dataframe
df = get_as_dataframe(self.sh) # It will return the worksheets contents as a Dataframe
df = df.dropna(how = "all", axis = 1) # Do not include unnamed columns
return df.columns.to_list()
obj = PlotMyGoogleSheet('https://docs.google.com/spreadsheets/d/1SrZfvr2ee54r7HR1jGtAE9zHIj_Y-UzK9ok8bdwkpqc/edit#gid=0')
print(obj.get_cols())
``` |
{
"source": "2811299/PythonCook",
"score": 3
} |
#### File: PythonCook/python2.0/Time_and_date.py
```python
import datetime
import time
def date_str_to_ts(a_shit):
""" 1548509506 -> 2019-01-26 21:31:46 """
return time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(a_shit))
print datetime.datetime.now()
print time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
print time.strftime('%Y-%m-%d', time.localtime(time.time()))
#
#
#
#
# print int(time.mktime(time.strptime('2019-02-21 23:59:59','%Y-%m-%d %H:%M:%S')))
#
#
#
# print int(time.mktime(time.strptime('2019-02-21','%Y-%m-%d')))
# print int(time.mktime(time.strptime('2019-02-22','%Y-%m-%d')))
#
# def get_day_ts_range(day):
# day_begin = int(time.mktime(time.strptime(day, '%Y-%m-%d')))
# day_end = day_begin + 3600 * 24 -1
# print day_begin
# print day_end
#
# get_day_ts_range('2019-02-22')
# print time.strftime('%Y-%m-%d', time.localtime(time.time()))
# import datetime
# def get_date_from_datestr(date_str):
#
# try:
# print type(datetime.datetime.strptime(startDate, '%Y-%m-%d'))
# return datetime.datetime.strptime(startDate, '%Y-%m-%d')
# except ValueError:
# return None
#
# startDate = "2018-2-11"
# a=get_date_from_datestr(startDate)
# startDate = "2018-2-13"
# b=get_date_from_datestr(startDate)
#
# print (b - a).days
#
#
#
# while a <= b:
# print str(a)
# print type(str(a.date()))
# a = a + datetime.timedelta(days=1)
#
``` |
{
"source": "282951387/KFrame",
"score": 3
} |
#### File: _gcm/builder/gcm_http.py
```python
from requests_toolbelt import MultipartEncoder
import requests
import json
def do_post(url, path):
print 'Post file %s to %s' % (path, url)
m = MultipartEncoder(
fields = {
'name' : 'filename',
'filename' : (path, open(path, 'rb'), 'application/octet-stream')
})
#print m
#print m.content_type
r = requests.post(url, verify=True, data=m, headers={'Content-Type': m.content_type})
retjson = json.loads(r.text)
if retjson['code'] != 0 or retjson['msg'] != 'ok':
raise Exception('[ERROR] upload [%s] to web server failed, code=%d msg=%s' % (path, retjson['code'], retjson['msg']))
else:
print 'upload [%s] to web server success, info = %s' % (path, r.text)
#do_post('https://version.laywoodgames.com/upload.php', 'sgame_online_svn_12345678_201808311343.tar.gz')
``` |
{
"source": "2838778326/chegg-scraper",
"score": 3
} |
#### File: chegg-scraper/cheggscraper/Downloader.py
```python
import argparse
import json
import os
from importlib.resources import read_text
from .CheggScraper import CheggScraper
def main():
"""
User Friendly Downloader for chegg homework help pages
:return: Nothing
:rtype: None
"""
conf = json.loads(read_text('cheggscraper', 'conf.json'))
default_save_file_format = conf.get('default_save_file_format')
default_cookie_file_path = conf.get('default_cookie_file_path')
ap = argparse.ArgumentParser()
ap.add_argument('-c', '--cookie', default=default_cookie_file_path,
help='path of cookie life', dest='cookie_file')
ap.add_argument('-u', '--url', help='url of chegg homework-help, put inside " "',
type=str, dest='url')
# FIXME: DIFF TAGS FOR FILE FORMAT AND BASE PATH
ap.add_argument('-s', '--save',
help='file path, where you want to save, put inside " " eg: test.html or'
' D:\\myFolder\\test.html or /home/test.html',
type=str, default=default_save_file_format, dest='file_format')
args = vars(ap.parse_args())
if not os.path.exists(path=args['cookie_file']):
raise Exception(f'{args["cookie_file"]} does not exists')
if not args.get('url'):
args.update({'url': input('Enter url of the homework-help: ')})
Chegg = CheggScraper(cookie_path=args['cookie_file'])
print(Chegg.url_to_html(args['url'], file_name_format=args['file_format']))
``` |
{
"source": "284885166/djangodemo",
"score": 2
} |
#### File: djangodemo/learn/views.py
```python
from __future__ import unicode_literals
from django.shortcuts import render
# Create your views here.
from django.http import HttpResponse
from django.http import HttpResponseRedirect
from django.urls import reverse
def index(request):
return HttpResponse(u"欢迎自学!!")
def add(request):
a = request.GET['a']
b = request.GET['b']
# c = int(a) + int(b)
# return HttpResponse(str(c))
return HttpResponseRedirect(
reverse('add2', args={a, b})
)
def add2(request, a, b):
c = int(a) + int(b)
return HttpResponse(str(c))
def home(request):
items = [u'张三', u'李四', u'王五']
return render(request, 'learn/home.html', {'string':u'张三', 'items':items })
``` |
{
"source": "285219011/hello-world",
"score": 2
} |
#### File: python/kernel_tests/stochastic_graph_test.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
sg = tf.contrib.bayesflow.stochastic_graph
distributions = tf.contrib.distributions
class NormalNotParam(distributions.Normal):
@property
def is_reparameterized(self):
return False
class DistributionTensorTest(tf.test.TestCase):
def testConstructionAndValue(self):
with self.test_session() as sess:
mu = [0.0, 0.1, 0.2]
sigma = tf.constant([1.1, 1.2, 1.3])
sigma2 = tf.constant([0.1, 0.2, 0.3])
with self.assertRaisesRegexp(ValueError, 'No value type currently set'):
prior = sg.DistributionTensor(distributions.Normal, mu=mu, sigma=sigma)
prior_0 = sg.DistributionTensor(
distributions.Normal, mu=mu, sigma=sigma,
dist_value_type=sg.SampleAndReshapeValue())
with sg.value_type(sg.SampleAndReshapeValue()):
prior = sg.DistributionTensor(distributions.Normal, mu=mu, sigma=sigma)
likelihood = sg.DistributionTensor(
distributions.Normal, mu=prior, sigma=sigma2)
coll = tf.get_collection(sg.STOCHASTIC_TENSOR_COLLECTION)
self.assertEqual(coll, [prior_0, prior, likelihood])
prior_0 = tf.identity(prior_0)
prior = tf.identity(prior) # Also works: tf.convert_to_tensor(prior)
likelihood = tf.identity(likelihood)
# Mostly a smoke test for now...
prior_0_val, prior_val, _ = sess.run(
[prior_0, prior, likelihood])
self.assertEqual(prior_0_val.shape, prior_val.shape)
# These are different random samples from the same distribution,
# so the values should differ.
self.assertGreater(np.abs(prior_0_val - prior_val).sum(), 1e-6)
def testMeanValue(self):
with self.test_session() as sess:
mu = [0.0, -1.0, 1.0]
sigma = tf.constant([1.1, 1.2, 1.3])
with sg.value_type(sg.MeanValue()):
prior = sg.DistributionTensor(distributions.Normal, mu=mu, sigma=sigma)
prior_mean = prior.mean()
prior_value = prior.value()
prior_mean_val, prior_value_val = sess.run([prior_mean, prior_value])
self.assertAllEqual(prior_mean_val, mu)
self.assertAllEqual(prior_mean_val, prior_value_val)
def testSampleAndReshapeValue(self):
with self.test_session() as sess:
mu = [[0.0, -1.0, 1.0], [0.0, -1.0, 1.0]]
sigma = tf.constant([[1.1, 1.2, 1.3], [1.1, 1.2, 1.3]])
with sg.value_type(sg.SampleAndReshapeValue()):
prior_single = sg.DistributionTensor(
distributions.Normal, mu=mu, sigma=sigma)
prior_single_value = prior_single.value()
self.assertEqual(prior_single_value.get_shape(), (2, 3))
prior_single_value_val = sess.run([prior_single_value])[0]
self.assertEqual(prior_single_value_val.shape, (2, 3))
with sg.value_type(sg.SampleAndReshapeValue(n=2)):
prior_double = sg.DistributionTensor(
distributions.Normal, mu=mu, sigma=sigma)
prior_double_value = prior_double.value()
self.assertEqual(prior_double_value.get_shape(), (4, 3))
prior_double_value_val = sess.run([prior_double_value])[0]
self.assertEqual(prior_double_value_val.shape, (4, 3))
def testSampleValue(self):
with self.test_session() as sess:
mu = [[0.0, -1.0, 1.0], [0.0, -1.0, 1.0]]
sigma = tf.constant([[1.1, 1.2, 1.3], [1.1, 1.2, 1.3]])
with sg.value_type(sg.SampleValue()):
prior_single = sg.DistributionTensor(
distributions.Normal, mu=mu, sigma=sigma)
prior_single_value = prior_single.value()
self.assertEqual(prior_single_value.get_shape(), (1, 2, 3))
prior_single_value_val = sess.run([prior_single_value])[0]
self.assertEqual(prior_single_value_val.shape, (1, 2, 3))
with sg.value_type(sg.SampleValue(n=2)):
prior_double = sg.DistributionTensor(
distributions.Normal, mu=mu, sigma=sigma)
prior_double_value = prior_double.value()
self.assertEqual(prior_double_value.get_shape(), (2, 2, 3))
prior_double_value_val = sess.run([prior_double_value])[0]
self.assertEqual(prior_double_value_val.shape, (2, 2, 3))
def testDistributionEntropy(self):
with self.test_session() as sess:
mu = [0.0, -1.0, 1.0]
sigma = tf.constant([1.1, 1.2, 1.3])
with sg.value_type(sg.MeanValue()):
prior = sg.DistributionTensor(distributions.Normal, mu=mu, sigma=sigma)
entropy = prior.entropy()
deep_entropy = prior.entropy()
expected_deep_entropy = distributions.Normal(
mu=mu, sigma=sigma).entropy()
entropies = sess.run([entropy, deep_entropy, expected_deep_entropy])
self.assertAllEqual(entropies[2], entropies[0])
self.assertAllEqual(entropies[1], entropies[0])
class ValueTypeTest(tf.test.TestCase):
def testValueType(self):
type_mean = sg.MeanValue()
type_reshape = sg.SampleAndReshapeValue()
type_full = sg.SampleValue()
with sg.value_type(type_mean):
self.assertEqual(sg.get_current_value_type(), type_mean)
with sg.value_type(type_reshape):
self.assertEqual(sg.get_current_value_type(), type_reshape)
with sg.value_type(type_full):
self.assertEqual(sg.get_current_value_type(), type_full)
self.assertEqual(sg.get_current_value_type(), type_mean)
with self.assertRaisesRegexp(ValueError, 'No value type currently set'):
sg.get_current_value_type()
class TestSurrogateLosses(tf.test.TestCase):
def testPathwiseDerivativeDoesNotAddSurrogateLosses(self):
with self.test_session():
mu = [0.0, 0.1, 0.2]
sigma = tf.constant([1.1, 1.2, 1.3])
with sg.value_type(sg.SampleAndReshapeValue()):
prior = sg.DistributionTensor(distributions.Normal, mu=mu, sigma=sigma)
likelihood = sg.DistributionTensor(
distributions.Normal, mu=prior, sigma=sigma)
self.assertTrue(prior.distribution.is_reparameterized)
self.assertTrue(likelihood.distribution.is_reparameterized)
loss = tf.square(tf.identity(likelihood) - [0.0, 0.1, 0.2])
sum_loss = tf.reduce_sum(loss)
surrogate_from_loss = sg.surrogate_losses([loss])
surrogate_from_sum_loss = sg.surrogate_losses([sum_loss])
surrogate_from_both = sg.surrogate_losses(
[loss, sum_loss])
# Pathwise derivative terms do not require score function
# surrogate losses.
self.assertEqual(surrogate_from_loss, [])
self.assertEqual(surrogate_from_sum_loss, [])
self.assertEqual(surrogate_from_both, [])
def _testSurrogateLoss(self, session, losses, expected, xs):
sf_losses = sg.surrogate_losses(losses)
n = len(expected)
self.assertEqual(len(expected), len(sf_losses))
values = session.run(list(expected) + sf_losses)
# Test forward surrogate losses
if isinstance(expected, set):
# Hack: sort the two halves of the values by norm, and compare
# those
sorted_expected = sorted(values[:n], key=np.linalg.norm)
sorted_losses = sorted(values[n:], key=np.linalg.norm)
self.assertAllClose(sorted_expected, sorted_losses)
else:
# Expected losses in a particular order
self.assertAllClose(values[:n], values[n:])
# Test backprop
expected_grads = tf.gradients(ys=losses + list(expected), xs=xs)
sf_grads = tf.gradients(ys=losses + sf_losses, xs=xs)
self.assertEqual(len(expected_grads), len(sf_grads))
n_grad = len(expected_grads)
grad_values = session.run(expected_grads + sf_grads)
self.assertAllClose(grad_values[:n_grad], grad_values[n_grad:])
def testSurrogateLoss(self):
with self.test_session() as sess:
mu = tf.constant([0.0, 0.1, 0.2])
sigma = tf.constant([1.1, 1.2, 1.3])
with sg.value_type(sg.SampleAndReshapeValue()):
prior = sg.DistributionTensor(NormalNotParam, mu=mu, sigma=sigma)
likelihood = sg.DistributionTensor(
NormalNotParam, mu=prior, sigma=sigma)
prior_2 = sg.DistributionTensor(NormalNotParam, mu=mu, sigma=sigma)
loss = tf.square(tf.identity(likelihood) - mu)
part_loss = tf.square(tf.identity(prior) - mu)
sum_loss = tf.reduce_sum(loss)
loss_nodeps = tf.square(tf.identity(prior_2) - mu)
# For ground truth, use the stop-gradient versions of the losses
loss_nograd = tf.stop_gradient(loss)
loss_nodeps_nograd = tf.stop_gradient(loss_nodeps)
sum_loss_nograd = tf.stop_gradient(sum_loss)
# These score functions should ignore prior_2
self._testSurrogateLoss(
session=sess,
losses=[loss],
expected=set([
likelihood.distribution.log_pdf(likelihood.value()) * loss_nograd,
prior.distribution.log_pdf(prior.value()) * loss_nograd]),
xs=[mu, sigma])
self._testSurrogateLoss(
session=sess,
losses=[loss, part_loss],
expected=set([
likelihood.distribution.log_pdf(likelihood.value()) * loss_nograd,
(prior.distribution.log_pdf(prior.value())
* tf.stop_gradient(part_loss + loss))]),
xs=[mu, sigma])
self._testSurrogateLoss(
session=sess,
losses=[sum_loss],
expected=set([
(likelihood.distribution.log_pdf(likelihood.value())
* sum_loss_nograd),
prior.distribution.log_pdf(prior.value()) * sum_loss_nograd]),
xs=[mu, sigma])
self._testSurrogateLoss(
session=sess,
losses=[loss, sum_loss],
expected=set([
(likelihood.distribution.log_pdf(likelihood.value())
* tf.stop_gradient(loss + sum_loss)),
(prior.distribution.log_pdf(prior.value())
* tf.stop_gradient(loss + sum_loss))]),
xs=[mu, sigma])
# These score functions should ignore prior and likelihood
self._testSurrogateLoss(
session=sess,
losses=[loss_nodeps],
expected=[prior_2.distribution.log_pdf(prior_2.value())
* loss_nodeps_nograd],
xs=[mu, sigma])
# These score functions should include all terms selectively
self._testSurrogateLoss(
session=sess,
losses=[loss, loss_nodeps],
# We can't guarantee ordering of output losses in this case.
expected=set(
[(likelihood.distribution.log_pdf(likelihood.value())
* loss_nograd),
prior.distribution.log_pdf(prior.value()) * loss_nograd,
(prior_2.distribution.log_pdf(prior_2.value())
* loss_nodeps_nograd)]),
xs=[mu, sigma])
if __name__ == '__main__':
tf.test.main()
```
#### File: python/losses/loss_ops_test.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: enable=unused-import
import numpy as np
import tensorflow as tf
class AbsoluteDifferenceLossTest(tf.test.TestCase):
def setUp(self):
self._predictions = tf.constant([4, 8, 12, 8, 1, 3], shape=(2, 3))
self._targets = tf.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
def testValueErrorThrownWhenWeightIsNone(self):
with self.test_session():
with self.assertRaises(ValueError):
tf.contrib.losses.absolute_difference(
self._predictions, self._predictions, weight=None)
def testAllCorrectNoLossWeight(self):
loss = tf.contrib.losses.absolute_difference(
self._predictions, self._predictions)
with self.test_session():
self.assertAlmostEqual(0.0, loss.eval(), 3)
def testNonZeroLoss(self):
loss = tf.contrib.losses.absolute_difference(
self._predictions, self._targets)
with self.test_session():
self.assertAlmostEqual(5.5, loss.eval(), 3)
def testNonZeroLossWithPythonScalarWeight(self):
weight = 2.3
loss = tf.contrib.losses.absolute_difference(
self._predictions, self._targets, weight)
with self.test_session():
self.assertAlmostEqual(5.5 * weight, loss.eval(), 3)
def testNonZeroLossWithScalarTensorWeight(self):
weight = 2.3
loss = tf.contrib.losses.absolute_difference(
self._predictions, self._targets, tf.constant(weight))
with self.test_session():
self.assertAlmostEqual(5.5 * weight, loss.eval(), 3)
def testNonZeroLossWithOneDimBatchSpecificWeights(self):
weight = tf.constant([1.2, 0.0], shape=[2,])
loss = tf.contrib.losses.absolute_difference(
self._predictions, self._targets, weight)
with self.test_session():
self.assertAlmostEqual(5.6, loss.eval(), 3)
def testNonZeroLossWithTwoDimBatchSpecificWeights(self):
weight = tf.constant([1.2, 0.0], shape=[2, 1])
loss = tf.contrib.losses.absolute_difference(
self._predictions, self._targets, weight)
with self.test_session():
self.assertAlmostEqual(5.6, loss.eval(), 3)
def testNonZeroLossWithSampleSpecificWeights(self):
weight = tf.constant([3, 6, 5, 0, 4, 2], shape=[2, 3])
loss = tf.contrib.losses.absolute_difference(
self._predictions, self._targets, weight)
with self.test_session():
self.assertAlmostEqual(16.6, loss.eval(), 3)
def testNonZeroLossWithSampleSpecificWeightsMostZero(self):
weight = tf.constant([0, 0, 0, 0, 0, 2], shape=[2, 3])
loss = tf.contrib.losses.absolute_difference(
self._predictions, self._targets, weight)
with self.test_session():
self.assertAlmostEqual(6.0, loss.eval(), 3)
def testLossWithSampleSpecificWeightsAllZero(self):
weight = tf.zeros((2, 3))
loss = tf.contrib.losses.absolute_difference(
self._predictions, self._targets, weight)
with self.test_session():
self.assertAlmostEqual(0.0, loss.eval(), 3)
class SoftmaxCrossEntropyLossTest(tf.test.TestCase):
def testNoneWeightRaisesValueError(self):
logits = tf.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = tf.constant([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
with self.test_session():
with self.assertRaises(ValueError):
tf.contrib.losses.softmax_cross_entropy(logits, labels, weight=None)
def testAllCorrect(self):
with self.test_session():
logits = tf.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = tf.constant([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
loss = tf.contrib.losses.softmax_cross_entropy(logits, labels)
self.assertEquals(loss.op.name, 'softmax_cross_entropy_loss/value')
self.assertAlmostEqual(loss.eval(), 0.0, 3)
def testAllWrong(self):
logits = tf.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = tf.constant([[0, 0, 1],
[1, 0, 0],
[0, 1, 0]])
with self.test_session():
loss = tf.contrib.losses.softmax_cross_entropy(logits, labels)
self.assertEquals(loss.op.name, 'softmax_cross_entropy_loss/value')
self.assertAlmostEqual(loss.eval(), 10.0, 3)
def testNonZeroLossWithPythonScalarWeight(self):
logits = tf.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = tf.constant([[0, 0, 1],
[1, 0, 0],
[0, 1, 0]])
weight = 2.3
with self.test_session():
loss = tf.contrib.losses.softmax_cross_entropy(logits, labels, weight)
self.assertAlmostEqual(loss.eval(), weight * 10.0, 3)
def testNonZeroLossWithScalarTensorWeight(self):
logits = tf.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = tf.constant([[0, 0, 1],
[1, 0, 0],
[0, 1, 0]])
weight = 2.3
with self.test_session():
loss = tf.contrib.losses.softmax_cross_entropy(
logits, labels, tf.constant(weight))
self.assertAlmostEqual(loss.eval(), weight * 10.0, 3)
def testNonZeroLossWithOneDimBatchSpecificWeights(self):
logits = tf.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = tf.constant([[0, 0, 1],
[1, 0, 0],
[0, 1, 0]])
weight = tf.constant([1.2, 3.4, 5.6], shape=[3])
with self.test_session():
loss = tf.contrib.losses.softmax_cross_entropy(logits, labels, weight)
self.assertAlmostEqual(loss.eval(), (1.2 + 3.4 + 5.6) * 10.0 / 3.0, 3)
def testAllWrongAllMissing(self):
logits = tf.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = tf.constant([[0, 0, 1],
[1, 0, 0],
[0, 1, 0]])
weight = tf.constant([0, 0, 0], shape=[3])
with self.test_session():
loss = tf.contrib.losses.softmax_cross_entropy(logits, labels, weight)
self.assertAlmostEqual(loss.eval(), 0.0, 3)
def testSomeMissing(self):
logits = tf.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = tf.constant([[0, 0, 1],
[1, 0, 0],
[0, 1, 0]])
weight = tf.constant([1.2, 0, 0], shape=[3])
with self.test_session():
loss = tf.contrib.losses.softmax_cross_entropy(logits, labels, weight)
self.assertAlmostEqual(loss.eval(), 12.0, 3)
def testSoftmaxWithMeasurementSpecificWeightsRaisesException(self):
with self.test_session():
logits = tf.constant([[100.0, -100.0, -100.0],
[-100.0, 100.0, -100.0],
[-100.0, -100.0, 100.0]])
labels = tf.constant([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
weight = tf.constant([[3, 4, 5],
[2, 6, 0],
[8, 0, 1]])
with self.assertRaises(ValueError):
tf.contrib.losses.softmax_cross_entropy(
logits, labels, weight=weight).eval()
def testSoftmaxLabelSmoothing(self):
with self.test_session():
logits = tf.constant([[100.0, -100.0, -100.0]])
labels = tf.constant([[1, 0, 0]])
label_smoothing = 0.1
loss = tf.contrib.losses.sigmoid_cross_entropy(
logits, labels, label_smoothing=label_smoothing)
self.assertEquals(loss.op.name, 'sigmoid_cross_entropy_loss/value')
expected_value = 400.0 * label_smoothing / 9.0
self.assertAlmostEqual(loss.eval(), expected_value, 3)
class SigmoidCrossEntropyLossTest(tf.test.TestCase):
def testAllCorrectSigmoid(self):
with self.test_session():
logits = tf.constant([[100.0, -100.0, -100.0],
[-100.0, 100.0, -100.0],
[-100.0, -100.0, 100.0]])
labels = tf.constant([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
loss = tf.contrib.losses.sigmoid_cross_entropy(logits, labels)
self.assertEquals(loss.op.name, 'sigmoid_cross_entropy_loss/value')
self.assertAlmostEqual(loss.eval(), 0.0, 3)
def testAllWrongSigmoid(self):
with self.test_session():
logits = tf.constant([[100.0, -100.0, -100.0],
[-100.0, 100.0, -100.0],
[-100.0, -100.0, 100.0]])
labels = tf.constant([[0, 0, 1],
[1, 0, 0],
[0, 1, 0]])
loss = tf.contrib.losses.sigmoid_cross_entropy(logits, labels)
self.assertEquals(loss.op.name, 'sigmoid_cross_entropy_loss/value')
self.assertAlmostEqual(loss.eval(), 600.0 / 9.0, 3)
def testAllWrongSigmoidWithMeasurementSpecificWeights(self):
with self.test_session():
logits = tf.constant([[100.0, -100.0, -100.0],
[-100.0, 100.0, -100.0],
[-100.0, -100.0, 100.0]])
labels = tf.constant([[0, 0, 1],
[1, 0, 0],
[0, 1, 0]])
weight = tf.constant([[3, 4, 5],
[2, 6, 0],
[8, 0, 1]])
loss = tf.contrib.losses.sigmoid_cross_entropy(
logits, labels, weight=weight)
self.assertEquals(loss.op.name, 'sigmoid_cross_entropy_loss/value')
self.assertAlmostEqual(loss.eval(), 1700.0 / 7.0, 3)
def testMultiCorrectSigmoid(self):
logits = tf.constant([[100.0, -100.0, 100.0],
[100.0, 100.0, -100.0],
[-100.0, 100.0, 100.0]])
labels = tf.constant([[1, 0, 1],
[1, 1, 0],
[0, 1, 1]])
loss = tf.contrib.losses.sigmoid_cross_entropy(logits, labels)
self.assertEquals(loss.op.name, 'sigmoid_cross_entropy_loss/value')
with self.test_session():
self.assertAlmostEqual(loss.eval(), 0.0, 3)
def testSigmoidLabelSmoothingCorrect(self):
with self.test_session():
logits = tf.constant([[100.0, -100.0, -100.0]])
labels = tf.constant([[1, 0, 0]])
label_smoothing = 0.1
loss = tf.contrib.losses.sigmoid_cross_entropy(
logits, labels, label_smoothing=label_smoothing)
self.assertEquals(loss.op.name, 'sigmoid_cross_entropy_loss/value')
expected_value = 400.0 * label_smoothing / 9.0
self.assertAlmostEqual(loss.eval(), expected_value, 3)
class LogLossTest(tf.test.TestCase):
def setUp(self):
predictions = np.asarray([.9, .2, .2, .8, .4, .6]).reshape((2, 3))
targets = np.asarray([1.0, 0.0, 1.0, 1.0, 0.0, 0.0]).reshape((2, 3))
self._np_predictions = predictions
self._np_targets = targets
epsilon = 1e-7
self._expected_losses = np.multiply(
targets, np.log(predictions + epsilon)) + np.multiply(
1 - targets, np.log(1 - predictions + epsilon))
self._predictions = tf.constant(predictions)
self._targets = tf.constant(targets)
def testValueErrorThrownWhenWeightIsNone(self):
with self.test_session():
with self.assertRaises(ValueError):
tf.contrib.losses.log_loss(self._targets, self._targets, weight=None)
def testAllCorrectNoLossWeight(self):
loss = tf.contrib.losses.log_loss(self._targets, self._targets)
with self.test_session():
self.assertAlmostEqual(0.0, loss.eval(), 3)
def testAllCorrectNoLossWeightWithPlaceholder(self):
tf_predictions = tf.placeholder(tf.float32, shape=self._np_targets.shape)
loss = tf.contrib.losses.log_loss(tf_predictions, self._targets)
with self.test_session():
self.assertAlmostEqual(0.0, loss.eval(feed_dict={
tf_predictions: self._np_targets}), 3)
def testNonZeroLoss(self):
loss = tf.contrib.losses.log_loss(self._predictions, self._targets)
with self.test_session():
self.assertAlmostEqual(-np.sum(self._expected_losses) / 6.0,
loss.eval(), 3)
def testNonZeroLossWithPythonScalarWeight(self):
weight = 2.3
loss = tf.contrib.losses.log_loss(
self._predictions, self._targets, weight)
with self.test_session():
self.assertAlmostEqual(weight * -np.sum(self._expected_losses) / 6.0,
loss.eval(), 3)
def testNonZeroLossWithScalarTensorWeight(self):
weight = 2.3
loss = tf.contrib.losses.log_loss(
self._predictions, self._targets, tf.constant(weight))
with self.test_session():
self.assertAlmostEqual(weight * -np.sum(self._expected_losses) / 6.0,
loss.eval(), 3)
def testNonZeroLossWithScalarTensorWeightAndPlaceholder(self):
tf_predictions = tf.placeholder(tf.float32,
shape=self._np_predictions.shape)
weight = 2.3
loss = tf.contrib.losses.log_loss(
tf_predictions, self._targets, tf.constant(weight))
with self.test_session() as sess:
loss = sess.run(loss, feed_dict={tf_predictions: self._np_predictions})
self.assertAlmostEqual(weight * -np.sum(self._expected_losses) / 6.0,
loss, 3)
def testNonZeroLossWithScalarTensorWeightAndPlaceholderWithRankOnly(self):
tf_predictions = tf.placeholder(tf.float32, shape=[None, None])
weight = 2.3
loss = tf.contrib.losses.log_loss(
tf_predictions, self._targets, tf.constant(weight))
with self.test_session() as sess:
loss = sess.run(loss, feed_dict={tf_predictions: self._np_predictions})
self.assertAlmostEqual(weight * -np.sum(self._expected_losses) / 6.0,
loss, 3)
def testNonZeroLossWithOneDimBatchSpecificWeights(self):
weight = tf.constant([1.2, 3.4], shape=[2])
expected_losses = np.multiply(
self._expected_losses,
np.asarray([1.2, 1.2, 1.2, 3.4, 3.4, 3.4]).reshape((2, 3)))
loss = tf.contrib.losses.log_loss(
self._predictions, self._targets, weight)
with self.test_session():
self.assertAlmostEqual(-np.sum(expected_losses) / 6.0,
loss.eval(), 3)
def testNonZeroLossWithOneDimBatchSpecificWeightsSomeZero(self):
weight = tf.constant([1.2, 0], shape=[2])
expected_losses = np.multiply(
self._expected_losses,
np.asarray([1.2, 1.2, 1.2, 0, 0, 0]).reshape((2, 3)))
loss = tf.contrib.losses.log_loss(
self._predictions, self._targets, weight)
with self.test_session():
self.assertAlmostEqual(-np.sum(expected_losses) / 3.0,
loss.eval(), 3)
def testNonZeroLossWithTwoDimBatchSpecificWeightsSomeZero(self):
weight = tf.constant([1.2, 0], shape=[2, 1])
expected_losses = np.multiply(
self._expected_losses,
np.asarray([1.2, 1.2, 1.2, 0, 0, 0]).reshape((2, 3)))
loss = tf.contrib.losses.log_loss(
self._predictions, self._targets, weight)
with self.test_session():
self.assertAlmostEqual(-np.sum(expected_losses) / 3.0,
loss.eval(), 3)
def testWeightsWithSameNumDimsButWrongShapeThrowsException(self):
weight = tf.constant(np.random.normal(size=(2, 4)), shape=[2, 4])
with self.test_session():
with self.assertRaises(ValueError):
tf.contrib.losses.log_loss(self._predictions, self._targets, weight)
def testNonZeroLossWithMeasurementSpecificWeights(self):
weight = np.array([3, 6, 5, 0, 4, 2]).reshape((2, 3))
expected_losses = np.multiply(self._expected_losses, weight)
loss = tf.contrib.losses.log_loss(
self._predictions,
self._targets,
weight=tf.constant(weight, shape=(2, 3)))
with self.test_session():
self.assertAlmostEqual(-np.sum(expected_losses) / 5.0, loss.eval(), 3)
def testNonZeroLossWithMeasurementSpecificWeightsWithPlaceholder(self):
weight = np.array([3, 6, 5, 0, 4, 2]).reshape((2, 3))
expected_losses = np.multiply(self._expected_losses, weight)
tf_predictions = tf.placeholder(tf.float32, shape=[2, 3])
loss = tf.contrib.losses.log_loss(
tf_predictions,
self._targets,
weight=tf.constant(weight, shape=(2, 3)))
with self.test_session() as sess:
loss = sess.run(loss, feed_dict={tf_predictions: self._np_predictions})
self.assertAlmostEqual(-np.sum(expected_losses) / 5.0, loss, 3)
def testNonZeroLossWithSampleSpecificWeightsMostZero(self):
weight = np.array([0, 0, 0, 0, 0, 2]).reshape((2, 3))
expected_losses = np.multiply(self._expected_losses, weight)
loss = tf.contrib.losses.log_loss(
self._predictions,
self._targets,
weight=tf.constant(weight, shape=(2, 3)))
with self.test_session():
self.assertAlmostEqual(-np.sum(expected_losses), loss.eval(), 3)
def testNonZeroLossWithSampleSpecificWeightsMostZeroWithPlaceholder(self):
weight = np.array([0, 0, 0, 0, 0, 2]).reshape((2, 3))
expected_losses = np.multiply(self._expected_losses, weight)
tf_predictions = tf.placeholder(tf.float32, shape=[2, 3])
tf_weight = tf.constant(weight, shape=(2, 3))
loss = tf.contrib.losses.log_loss(tf_predictions, self._targets, tf_weight)
with self.test_session() as sess:
loss = sess.run(loss, feed_dict={tf_predictions: self._np_predictions})
self.assertAlmostEqual(-np.sum(expected_losses), loss, 3)
def testLossWithSampleSpecificWeightsAllZero(self):
tf_weight = tf.zeros(shape=(2, 3))
loss = tf.contrib.losses.log_loss(
self._predictions, self._targets, tf_weight)
with self.test_session():
self.assertAlmostEqual(0.0, loss.eval(), 3)
class SumOfSquaresLossTest(tf.test.TestCase):
def setUp(self):
self._predictions = tf.constant([4, 8, 12, 8, 1, 3], shape=(2, 3))
self._targets = tf.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
def testValueErrorThrownWhenWeightIsNone(self):
with self.test_session():
with self.assertRaises(ValueError):
tf.contrib.losses.sum_of_squares(
self._predictions, self._predictions, weight=None)
def testAllCorrectNoLossWeight(self):
loss = tf.contrib.losses.sum_of_squares(
self._predictions, self._predictions)
with self.test_session():
self.assertAlmostEqual(0.0, loss.eval(), 3)
def testNonZeroLoss(self):
loss = tf.contrib.losses.sum_of_squares(
self._predictions, self._targets)
with self.test_session():
self.assertAlmostEqual(49.5, loss.eval(), 3)
def testNonZeroLossWithPythonScalarWeight(self):
weight = 2.3
loss = tf.contrib.losses.sum_of_squares(
self._predictions, self._targets, weight)
with self.test_session():
self.assertAlmostEqual(49.5 * weight, loss.eval(), 3)
def testNonZeroLossWithScalarTensorWeight(self):
weight = 2.3
loss = tf.contrib.losses.sum_of_squares(
self._predictions, self._targets, tf.constant(weight))
with self.test_session():
self.assertAlmostEqual(49.5 * weight, loss.eval(), 3)
def testNonZeroLossWithOneDimBatchSpecificWeights(self):
weight = tf.constant([1.2, 3.4], shape=[2,])
loss = tf.contrib.losses.sum_of_squares(
self._predictions, self._targets, weight)
with self.test_session():
self.assertAlmostEqual(767.8 / 6.0, loss.eval(), 3)
def testNonZeroLossWithTwoDimBatchSpecificWeights(self):
weight = tf.constant([1.2, 3.4], shape=[2, 1])
loss = tf.contrib.losses.sum_of_squares(
self._predictions, self._targets, weight)
with self.test_session():
self.assertAlmostEqual(767.8 / 6.0, loss.eval(), 3)
def testNonZeroLossWithSampleSpecificWeights(self):
weight = tf.constant([3, 6, 5, 0, 4, 2], shape=[2, 3])
loss = tf.contrib.losses.sum_of_squares(
self._predictions, self._targets, weight)
with self.test_session():
self.assertAlmostEqual(587 / 5.0, loss.eval(), 3)
def testNonZeroLossWithSampleSpecificWeightsMostZero(self):
weight = tf.constant([0, 0, 0, 0, 0, 2], shape=[2, 3])
loss = tf.contrib.losses.sum_of_squares(
self._predictions, self._targets, weight)
with self.test_session():
self.assertAlmostEqual(18.0, loss.eval(), 3)
def testLossWithSampleSpecificWeightsAllZero(self):
weight = tf.zeros((2, 3))
loss = tf.contrib.losses.sum_of_squares(
self._predictions, self._targets, weight)
with self.test_session():
self.assertAlmostEqual(0.0, loss.eval(), 3)
class SumOfPairwiseSquaresLossTest(tf.test.TestCase):
def setUp(self):
self._predictions = np.array([[4, 8, 12],
[8, 1, 3]])
self._targets = np.array([[1, 9, 2],
[-5, -5, 7]])
batch_size, dims = self._targets.shape
# Compute the expected loss 'manually'.
total = np.zeros((batch_size, 1))
for b in range(batch_size):
for i in range(dims):
for j in range(dims):
x = self._predictions[b, i].item() - self._predictions[b, j].item()
y = self._targets[b, i].item() - self._targets[b, j].item()
tmp = (x-y) * (x-y)
total[b] += tmp
self._expected_losses = np.divide(total, 9.0)
def testValueErrorThrownWhenWeightIsNone(self):
with self.test_session():
with self.assertRaises(ValueError):
tf.contrib.losses.sum_of_pairwise_squares(
predictions=tf.constant(self._targets),
targets=tf.constant(self._targets),
weight=None)
def testAllCorrectNoLossWeight(self):
loss = tf.contrib.losses.sum_of_pairwise_squares(
predictions=tf.constant(self._targets),
targets=tf.constant(self._targets))
with self.test_session():
self.assertAlmostEqual(0.0, loss.eval(), 3)
def testNonZeroLoss(self):
loss = tf.contrib.losses.sum_of_pairwise_squares(
predictions=tf.constant(self._predictions),
targets=tf.constant(self._targets))
with self.test_session():
self.assertAlmostEqual(np.sum(self._expected_losses), loss.eval(), 3)
def testGradientWithZeroWeight(self):
with tf.Graph().as_default():
tf.set_random_seed(0)
inputs = tf.ones((2, 3))
weights = tf.get_variable('weights',
shape=[3, 4],
initializer=tf.truncated_normal_initializer())
predictions = tf.matmul(inputs, weights)
optimizer = tf.train.MomentumOptimizer(learning_rate=0.001, momentum=0.9)
loss = tf.contrib.losses.sum_of_pairwise_squares(
predictions,
predictions,
0)
gradients_to_variables = optimizer.compute_gradients(loss)
init_op = tf.initialize_all_variables()
with self.test_session() as sess:
sess.run(init_op)
for grad, _ in gradients_to_variables:
np_grad = sess.run(grad)
self.assertFalse(np.isnan(np_grad).any())
def testNonZeroLossWithPythonScalarWeight(self):
weight = 2.3
loss = tf.contrib.losses.sum_of_pairwise_squares(
predictions=tf.constant(self._predictions),
targets=tf.constant(self._targets),
weight=weight)
with self.test_session():
self.assertAlmostEqual(weight * np.sum(self._expected_losses),
loss.eval(), 3)
def testNonZeroLossWithScalarTensorWeight(self):
weight = 2.3
loss = tf.contrib.losses.sum_of_pairwise_squares(
predictions=tf.constant(self._predictions),
targets=tf.constant(self._targets),
weight=tf.constant(weight))
with self.test_session():
self.assertAlmostEqual(weight * np.sum(self._expected_losses),
loss.eval(), 3)
def testNonZeroLossWithScalarZeroWeight(self):
weight = 0
loss = tf.contrib.losses.sum_of_pairwise_squares(
predictions=tf.constant(self._predictions),
targets=tf.constant(self._targets),
weight=tf.constant(weight))
with self.test_session():
self.assertAlmostEqual(0, loss.eval(), 3)
def testNonZeroLossWithScalarTensorWeightWithPlaceholder(self):
weight = 2.3
tf_predictions = tf.placeholder(tf.float32, shape=self._predictions.shape)
tf_targets = tf.placeholder(tf.float32, shape=self._targets.shape)
loss = tf.contrib.losses.sum_of_pairwise_squares(
predictions=tf_predictions,
targets=tf_targets,
weight=tf.constant(weight))
with self.test_session() as sess:
loss = sess.run(loss, feed_dict={
tf_predictions: self._predictions,
tf_targets: self._targets,
})
self.assertAlmostEqual(weight * np.sum(self._expected_losses), loss, 3)
def testNonZeroLossWithOneDimBatchSpecificWeights(self):
weight = np.asarray([2.0, 1.0]).reshape((2, 1))
expected_losses = np.multiply(weight, self._expected_losses)
loss = tf.contrib.losses.sum_of_pairwise_squares(
predictions=tf.constant(self._predictions),
targets=tf.constant(self._targets),
weight=tf.constant(weight, shape=[2]))
with self.test_session():
self.assertAlmostEqual(np.sum(expected_losses), loss.eval(), 3)
def testZeroLossWithOneDimBatchZeroWeights(self):
weight = np.asarray([0.0, 0.0]).reshape((2, 1))
loss = tf.contrib.losses.sum_of_pairwise_squares(
predictions=tf.constant(self._predictions),
targets=tf.constant(self._targets),
weight=tf.constant(weight, shape=[2]))
with self.test_session():
self.assertAlmostEqual(0, loss.eval(), 3)
def testNonZeroLossWithOneDimBatchSpecificWeightsAndPlaceholders(self):
weight = np.asarray([1.2, 3.4]).reshape((2, 1))
expected_losses = np.multiply(weight, self._expected_losses)
tf_predictions = tf.placeholder(tf.float32, shape=self._predictions.shape)
tf_targets = tf.placeholder(tf.int32, shape=self._targets.shape)
loss = tf.contrib.losses.sum_of_pairwise_squares(
predictions=tf_predictions,
targets=tf_targets,
weight=tf.constant(weight, shape=[2]))
with self.test_session() as sess:
loss = sess.run(loss, feed_dict={
tf_predictions: self._predictions,
tf_targets: self._targets,
})
self.assertAlmostEqual(np.sum(expected_losses), loss, 3)
def testLossWithAllZeroBatchSpecificWeights(self):
weight = np.zeros((2, 1))
loss = tf.contrib.losses.sum_of_pairwise_squares(
predictions=tf.constant(self._predictions),
targets=tf.constant(self._targets),
weight=tf.constant(weight, shape=[2]))
with self.test_session():
self.assertAlmostEqual(0.0, loss.eval(), 3)
class CosineDistanceLossTest(tf.test.TestCase):
def setUp(self):
self._predictions = np.asarray([[1, 0, 0], # Batch 1
[0, 0, -1],
[1, 0, 0], # Batch 2
[1, 0, 0],
[0, 0, -1], # Batch 3
[1, 0, 0]]).reshape((3, 2, 3))
self._targets = np.asarray([[1, 0, 0],
[0, 0, 1],
[0, 1, 0],
[1, 0, 0],
[0, 0, 1],
[0, 1, 0]]).reshape((3, 2, 3))
def testValueErrorThrownWhenWeightIsNone(self):
with self.test_session():
with self.assertRaises(ValueError):
tf.contrib.losses.cosine_distance(
predictions=tf.constant(self._targets),
targets=tf.constant(self._targets),
dim=2,
weight=None)
def testAllCorrectNoWeights(self):
loss = tf.contrib.losses.cosine_distance(
predictions=tf.constant(self._targets),
targets=tf.constant(self._targets),
dim=2)
with self.test_session():
self.assertAlmostEqual(0, loss.eval(), 5)
def testPartiallyCorrectWithIntegerValues(self):
loss = tf.contrib.losses.cosine_distance(
predictions=tf.constant(self._predictions),
targets=tf.constant(self._targets),
dim=2)
with self.test_session():
self.assertAlmostEqual(1, loss.eval(), 5)
def testPartiallyCorrectFloatingPointValues(self):
predictions = np.matrix((
'0.819031913261206 0.567041924552012 0.087465312324590;'
'-0.665139432070255 -0.739487441769973 -0.103671883216994;'
'0.707106781186548 -0.707106781186548 0'))
targets = np.matrix((
'0.819031913261206 0.567041924552012 0.087465312324590;'
'0.665139432070255 0.739487441769973 0.103671883216994;'
'0.707106781186548 0.707106781186548 0'))
tf_preds = tf.constant(predictions, shape=(3, 1, 3), dtype=tf.float32)
tf_targets = tf.constant(targets, shape=(3, 1, 3), dtype=tf.float32)
loss = tf.contrib.losses.cosine_distance(tf_preds, tf_targets, dim=2)
with self.test_session():
self.assertAlmostEqual(1.0, loss.eval(), 5)
def testSampleSpecificWeights(self):
loss = tf.contrib.losses.cosine_distance(
predictions=tf.constant(self._predictions),
targets=tf.constant(self._targets),
dim=2,
weight=tf.constant([1, 0, 0]))
with self.test_session():
self.assertEqual(1.0, loss.eval())
def testMeasurementSpecificWeights(self):
loss = tf.contrib.losses.cosine_distance(
predictions=tf.constant(self._predictions),
targets=tf.constant(self._targets),
dim=2,
weight=tf.constant([1, 0, 0, 1, 1, 1], shape=(3, 2)))
with self.test_session():
self.assertEqual(3.0 / 4.0, loss.eval())
def testValueErrorThrownWithShapelessPlaceholder(self):
tf_predictions = tf.placeholder(tf.float32)
with self.test_session():
with self.assertRaises(ValueError):
tf.contrib.losses.cosine_distance(
predictions=tf_predictions,
targets=tf.constant(self._targets),
dim=2,
weight=tf.constant([1, 0, 0, 1, 1, 1], shape=(3, 2)))
def testMeasurementSpecificWeightsWithPlaceholderWithShape(self):
tf_predictions = tf.placeholder(tf.float32, shape=self._targets.shape)
loss = tf.contrib.losses.cosine_distance(
predictions=tf_predictions,
targets=tf.constant(self._targets),
dim=2,
weight=tf.constant([1, 0, 0, 1, 1, 1], shape=(3, 2)))
with self.test_session() as sess:
loss = sess.run(loss, feed_dict={tf_predictions: self._predictions})
self.assertEqual(3.0 / 4.0, loss)
def testZeroLossWhenAllSampleSpecificWeightsAreZero(self):
loss = tf.contrib.losses.cosine_distance(
predictions=tf.constant(self._predictions),
targets=tf.constant(self._targets),
dim=2,
weight=tf.zeros((3,)))
with self.test_session():
self.assertEqual(0, loss.eval())
def testZeroLossWhenAllMeasurementSpecificWeightsAreZero(self):
loss = tf.contrib.losses.cosine_distance(
predictions=tf.constant(self._predictions),
targets=tf.constant(self._targets),
dim=2,
weight=tf.zeros((3, 2)))
with self.test_session():
self.assertEqual(0, loss.eval())
if __name__ == '__main__':
tf.test.main()
```
#### File: tensor_forest/python/tensor_forest.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import random
import tensorflow as tf
from tensorflow.contrib.tensor_forest.python.ops import inference_ops
from tensorflow.contrib.tensor_forest.python.ops import training_ops
# If tree[i][0] equals this value, then i is a leaf node.
LEAF_NODE = -1
# A convenience class for holding random forest hyperparameters.
#
# To just get some good default parameters, use:
# hparams = ForestHParams(num_classes=2, num_features=40).fill()
#
# Note that num_classes can not be inferred and so must always be specified.
# Also, either num_splits_to_consider or num_features should be set.
#
# To override specific values, pass them to the constructor:
# hparams = ForestHParams(num_classes=5, num_trees=10, num_features=5).fill()
#
# TODO(thomaswc): Inherit from tf.HParams when that is publicly available.
class ForestHParams(object):
"""A base class for holding hyperparameters and calculating good defaults."""
def __init__(self, num_trees=100, max_nodes=10000, bagging_fraction=1.0,
max_depth=0, num_splits_to_consider=0,
feature_bagging_fraction=1.0,
max_fertile_nodes=0, split_after_samples=250,
valid_leaf_threshold=1, **kwargs):
self.num_trees = num_trees
self.max_nodes = max_nodes
self.bagging_fraction = bagging_fraction
self.feature_bagging_fraction = feature_bagging_fraction
self.max_depth = max_depth
self.num_splits_to_consider = num_splits_to_consider
self.max_fertile_nodes = max_fertile_nodes
self.split_after_samples = split_after_samples
self.valid_leaf_threshold = valid_leaf_threshold
for name, value in kwargs.items():
setattr(self, name, value)
def values(self):
return self.__dict__
def fill(self):
"""Intelligently sets any non-specific parameters."""
# Fail fast if num_classes or num_features isn't set.
_ = getattr(self, 'num_classes')
_ = getattr(self, 'num_features')
self.training_library_base_dir = getattr(
self, 'training_library_base_dir', '')
self.inference_library_base_dir = getattr(
self, 'inference_library_base_dir', '')
self.bagged_num_features = int(self.feature_bagging_fraction *
self.num_features)
self.bagged_features = None
if self.feature_bagging_fraction < 1.0:
self.bagged_features = [random.sample(
range(self.num_features),
self.bagged_num_features) for _ in range(self.num_trees)]
self.regression = getattr(self, 'regression', False)
# Num_outputs is the actual number of outputs (a single prediction for
# classification, a N-dimenensional point for regression).
self.num_outputs = self.num_classes if self.regression else 1
# Add an extra column to classes for storing counts, which is needed for
# regression and avoids having to recompute sums for classification.
self.num_output_columns = self.num_classes + 1
# Allow each tree to be unbalanced by up to a factor of 2.
self.max_depth = (self.max_depth or
int(2 * math.ceil(math.log(self.max_nodes, 2))))
# The Random Forest literature recommends sqrt(# features) for
# classification problems, and p/3 for regression problems.
# TODO(thomaswc): Consider capping this for large number of features.
self.num_splits_to_consider = (
self.num_splits_to_consider or
max(10, int(math.ceil(math.sqrt(self.num_features)))))
# max_fertile_nodes doesn't effect performance, only training speed.
# We therefore set it primarily based upon space considerations.
# Each fertile node takes up num_splits_to_consider times as much
# as space as a non-fertile node. We want the fertile nodes to in
# total only take up as much space as the non-fertile nodes, so
num_fertile = int(math.ceil(self.max_nodes / self.num_splits_to_consider))
# But always use at least 1000 accumulate slots.
num_fertile = max(num_fertile, 1000)
self.max_fertile_nodes = self.max_fertile_nodes or num_fertile
# But it also never needs to be larger than the number of leaves,
# which is max_nodes / 2.
self.max_fertile_nodes = min(self.max_fertile_nodes,
int(math.ceil(self.max_nodes / 2.0)))
# We have num_splits_to_consider slots to fill, and we want to spend
# approximately split_after_samples samples initializing them.
num_split_initializiations_per_input = max(1, int(math.floor(
self.num_splits_to_consider / self.split_after_samples)))
self.split_initializations_per_input = getattr(
self, 'split_initializations_per_input',
num_split_initializiations_per_input)
# If base_random_seed is 0, the current time will be used to seed the
# random number generators for each tree. If non-zero, the i-th tree
# will be seeded with base_random_seed + i.
self.base_random_seed = getattr(self, 'base_random_seed', 0)
return self
# A simple container to hold the training variables for a single tree.
class TreeTrainingVariables(object):
"""Stores tf.Variables for training a single random tree.
Uses tf.get_variable to get tree-specific names so that this can be used
with a tf.learn-style implementation (one that trains a model, saves it,
then relies on restoring that model to evaluate).
"""
def __init__(self, params, tree_num, training):
self.tree = tf.get_variable(
name=self.get_tree_name('tree', tree_num), dtype=tf.int32,
initializer=tf.constant(
[[-1, -1]] + [[-2, -1]] * (params.max_nodes - 1)))
self.tree_thresholds = tf.get_variable(
name=self.get_tree_name('tree_thresholds', tree_num),
shape=[params.max_nodes],
initializer=tf.constant_initializer(-1.0))
self.tree_depths = tf.get_variable(
name=self.get_tree_name('tree_depths', tree_num),
shape=[params.max_nodes],
dtype=tf.int32,
initializer=tf.constant_initializer(1))
self.end_of_tree = tf.get_variable(
name=self.get_tree_name('end_of_tree', tree_num),
dtype=tf.int32,
initializer=tf.constant([1]))
if training:
self.non_fertile_leaves = tf.get_variable(
name=self.get_tree_name('non_fertile_leaves', tree_num),
dtype=tf.int32,
initializer=tf.constant([0]))
self.non_fertile_leaf_scores = tf.get_variable(
name=self.get_tree_name('non_fertile_leaf_scores', tree_num),
initializer=tf.constant([1.0]))
self.node_to_accumulator_map = tf.get_variable(
name=self.get_tree_name('node_to_accumulator_map', tree_num),
shape=[params.max_nodes],
dtype=tf.int32,
initializer=tf.constant_initializer(-1))
self.candidate_split_features = tf.get_variable(
name=self.get_tree_name('candidate_split_features', tree_num),
shape=[params.max_fertile_nodes, params.num_splits_to_consider],
dtype=tf.int32,
initializer=tf.constant_initializer(-1))
self.candidate_split_thresholds = tf.get_variable(
name=self.get_tree_name('candidate_split_thresholds', tree_num),
shape=[params.max_fertile_nodes, params.num_splits_to_consider],
initializer=tf.constant_initializer(0.0))
# Statistics shared by classification and regression.
self.node_sums = tf.get_variable(
name=self.get_tree_name('node_sums', tree_num),
shape=[params.max_nodes, params.num_output_columns],
initializer=tf.constant_initializer(0.0))
if training:
self.candidate_split_sums = tf.get_variable(
name=self.get_tree_name('candidate_split_sums', tree_num),
shape=[params.max_fertile_nodes, params.num_splits_to_consider,
params.num_output_columns],
initializer=tf.constant_initializer(0.0))
self.accumulator_sums = tf.get_variable(
name=self.get_tree_name('accumulator_sums', tree_num),
shape=[params.max_fertile_nodes, params.num_output_columns],
initializer=tf.constant_initializer(-1.0))
# Regression also tracks second order stats.
if params.regression:
self.node_squares = tf.get_variable(
name=self.get_tree_name('node_squares', tree_num),
shape=[params.max_nodes, params.num_output_columns],
initializer=tf.constant_initializer(0.0))
self.candidate_split_squares = tf.get_variable(
name=self.get_tree_name('candidate_split_squares', tree_num),
shape=[params.max_fertile_nodes, params.num_splits_to_consider,
params.num_output_columns],
initializer=tf.constant_initializer(0.0))
self.accumulator_squares = tf.get_variable(
name=self.get_tree_name('accumulator_squares', tree_num),
shape=[params.max_fertile_nodes, params.num_output_columns],
initializer=tf.constant_initializer(-1.0))
else:
self.node_squares = tf.constant(
0.0, name=self.get_tree_name('node_squares', tree_num))
self.candidate_split_squares = tf.constant(
0.0, name=self.get_tree_name('candidate_split_squares', tree_num))
self.accumulator_squares = tf.constant(
0.0, name=self.get_tree_name('accumulator_squares', tree_num))
def get_tree_name(self, name, num):
return '{0}-{1}'.format(name, num)
class ForestStats(object):
def __init__(self, tree_stats, params):
"""A simple container for stats about a forest."""
self.tree_stats = tree_stats
self.params = params
def get_average(self, thing):
val = 0.0
for i in range(self.params.num_trees):
val += getattr(self.tree_stats[i], thing)
return val / self.params.num_trees
class TreeStats(object):
def __init__(self, num_nodes, num_leaves):
self.num_nodes = num_nodes
self.num_leaves = num_leaves
class ForestTrainingVariables(object):
"""A container for a forests training data, consisting of multiple trees.
Instantiates a TreeTrainingVariables object for each tree. We override the
__getitem__ and __setitem__ function so that usage looks like this:
forest_variables = ForestTrainingVariables(params)
... forest_variables.tree ...
"""
def __init__(self, params, device_assigner, training=True,
tree_variable_class=TreeTrainingVariables):
self.variables = []
for i in range(params.num_trees):
with tf.device(device_assigner.get_device(i)):
self.variables.append(tree_variable_class(params, i, training))
def __setitem__(self, t, val):
self.variables[t] = val
def __getitem__(self, t):
return self.variables[t]
class RandomForestDeviceAssigner(object):
"""A device assigner that uses the default device.
Write subclasses that implement get_device for control over how trees
get assigned to devices. This assumes that whole trees are assigned
to a device.
"""
def __init__(self):
self.cached = None
def get_device(self, unused_tree_num):
if not self.cached:
dummy = tf.constant(0)
self.cached = dummy.device
return self.cached
class RandomForestGraphs(object):
"""Builds TF graphs for random forest training and inference."""
def __init__(self, params, device_assigner=None, variables=None,
tree_graphs=None,
t_ops=training_ops,
i_ops=inference_ops):
self.params = params
self.device_assigner = device_assigner or RandomForestDeviceAssigner()
tf.logging.info('Constructing forest with params = ')
tf.logging.info(self.params.__dict__)
self.variables = variables or ForestTrainingVariables(
self.params, device_assigner=self.device_assigner)
tree_graph_class = tree_graphs or RandomTreeGraphs
self.trees = [
tree_graph_class(
self.variables[i], self.params,
t_ops.Load(self.params.training_library_base_dir),
i_ops.Load(self.params.inference_library_base_dir), i)
for i in range(self.params.num_trees)]
def _bag_features(self, tree_num, input_data):
split_data = tf.split(1, self.params.num_features, input_data)
return tf.concat(1, [split_data[ind]
for ind in self.params.bagged_features[tree_num]])
def training_graph(self, input_data, input_labels):
"""Constructs a TF graph for training a random forest.
Args:
input_data: A tensor or placeholder for input data.
input_labels: A tensor or placeholder for labels associated with
input_data.
Returns:
The last op in the random forest training graph.
"""
tree_graphs = []
for i in range(self.params.num_trees):
with tf.device(self.device_assigner.get_device(i)):
seed = self.params.base_random_seed
if seed != 0:
seed += i
# If using bagging, randomly select some of the input.
tree_data = input_data
tree_labels = input_labels
if self.params.bagging_fraction < 1.0:
# TODO(thomaswc): This does sampling without replacment. Consider
# also allowing sampling with replacement as an option.
batch_size = tf.slice(tf.shape(input_data), [0], [1])
r = tf.random_uniform(batch_size, seed=seed)
mask = tf.less(r, tf.ones_like(r) * self.params.bagging_fraction)
gather_indices = tf.squeeze(tf.where(mask), squeeze_dims=[1])
# TODO(thomaswc): Calculate out-of-bag data and labels, and store
# them for use in calculating statistics later.
tree_data = tf.gather(input_data, gather_indices)
tree_labels = tf.gather(input_labels, gather_indices)
if self.params.bagged_features:
tree_data = self._bag_features(i, tree_data)
tree_graphs.append(
self.trees[i].training_graph(tree_data, tree_labels, seed))
return tf.group(*tree_graphs)
def inference_graph(self, input_data):
"""Constructs a TF graph for evaluating a random forest.
Args:
input_data: A tensor or placeholder for input data.
Returns:
The last op in the random forest inference graph.
"""
probabilities = []
for i in range(self.params.num_trees):
with tf.device(self.device_assigner.get_device(i)):
tree_data = input_data
if self.params.bagged_features:
tree_data = self._bag_features(i, input_data)
probabilities.append(self.trees[i].inference_graph(tree_data))
with tf.device(self.device_assigner.get_device(0)):
all_predict = tf.pack(probabilities)
return tf.reduce_sum(all_predict, 0) / self.params.num_trees
def average_size(self):
"""Constructs a TF graph for evaluating the average size of a forest.
Returns:
The average number of nodes over the trees.
"""
sizes = []
for i in range(self.params.num_trees):
with tf.device(self.device_assigner.get_device(i)):
sizes.append(self.trees[i].size())
return tf.reduce_mean(tf.pack(sizes))
def average_impurity(self):
"""Constructs a TF graph for evaluating the leaf impurity of a forest.
Returns:
The last op in the graph.
"""
impurities = []
for i in range(self.params.num_trees):
with tf.device(self.device_assigner.get_device(i)):
impurities.append(self.trees[i].average_impurity())
return tf.reduce_mean(tf.pack(impurities))
def get_stats(self, session):
tree_stats = []
for i in range(self.params.num_trees):
with tf.device(self.device_assigner.get_device(i)):
tree_stats.append(self.trees[i].get_stats(session))
return ForestStats(tree_stats, self.params)
class RandomTreeGraphs(object):
"""Builds TF graphs for random tree training and inference."""
def __init__(self, variables, params, t_ops, i_ops, tree_num):
self.training_ops = t_ops
self.inference_ops = i_ops
self.variables = variables
self.params = params
self.tree_num = tree_num
def _gini(self, class_counts):
"""Calculate the Gini impurity.
If c(i) denotes the i-th class count and c = sum_i c(i) then
score = 1 - sum_i ( c(i) / c )^2
Args:
class_counts: A 2-D tensor of per-class counts, usually a slice or
gather from variables.node_sums.
Returns:
A 1-D tensor of the Gini impurities for each row in the input.
"""
smoothed = 1.0 + tf.slice(class_counts, [0, 1], [-1, -1])
sums = tf.reduce_sum(smoothed, 1)
sum_squares = tf.reduce_sum(tf.square(smoothed), 1)
return 1.0 - sum_squares / (sums * sums)
def _weighted_gini(self, class_counts):
"""Our split score is the Gini impurity times the number of examples.
If c(i) denotes the i-th class count and c = sum_i c(i) then
score = c * (1 - sum_i ( c(i) / c )^2 )
= c - sum_i c(i)^2 / c
Args:
class_counts: A 2-D tensor of per-class counts, usually a slice or
gather from variables.node_sums.
Returns:
A 1-D tensor of the Gini impurities for each row in the input.
"""
smoothed = 1.0 + tf.slice(class_counts, [0, 1], [-1, -1])
sums = tf.reduce_sum(smoothed, 1)
sum_squares = tf.reduce_sum(tf.square(smoothed), 1)
return sums - sum_squares / sums
def _variance(self, sums, squares):
"""Calculate the variance for each row of the input tensors.
Variance is V = E[x^2] - (E[x])^2.
Args:
sums: A tensor containing output sums, usually a slice from
variables.node_sums. Should contain the number of examples seen
in index 0 so we can calculate expected value.
squares: Same as sums, but sums of squares.
Returns:
A 1-D tensor of the variances for each row in the input.
"""
total_count = tf.slice(sums, [0, 0], [-1, 1])
e_x = sums / total_count
e_x2 = squares / total_count
return tf.reduce_sum(e_x2 - tf.square(e_x), 1)
def training_graph(self, input_data, input_labels, random_seed):
"""Constructs a TF graph for training a random tree.
Args:
input_data: A tensor or placeholder for input data.
input_labels: A tensor or placeholder for labels associated with
input_data.
random_seed: The random number generator seed to use for this tree. 0
means use the current time as the seed.
Returns:
The last op in the random tree training graph.
"""
# Count extremely random stats.
(node_sums, node_squares, splits_indices, splits_sums,
splits_squares, totals_indices, totals_sums,
totals_squares, input_leaves) = (
self.training_ops.count_extremely_random_stats(
input_data, input_labels, self.variables.tree,
self.variables.tree_thresholds,
self.variables.node_to_accumulator_map,
self.variables.candidate_split_features,
self.variables.candidate_split_thresholds,
num_classes=self.params.num_output_columns,
regression=self.params.regression))
node_update_ops = []
node_update_ops.append(
tf.assign_add(self.variables.node_sums, node_sums))
splits_update_ops = []
splits_update_ops.append(self.training_ops.scatter_add_ndim(
self.variables.candidate_split_sums,
splits_indices, splits_sums))
splits_update_ops.append(self.training_ops.scatter_add_ndim(
self.variables.accumulator_sums, totals_indices,
totals_sums))
if self.params.regression:
node_update_ops.append(tf.assign_add(self.variables.node_squares,
node_squares))
splits_update_ops.append(self.training_ops.scatter_add_ndim(
self.variables.candidate_split_squares,
splits_indices, splits_squares))
splits_update_ops.append(self.training_ops.scatter_add_ndim(
self.variables.accumulator_squares, totals_indices,
totals_squares))
# Sample inputs.
update_indices, feature_updates, threshold_updates = (
self.training_ops.sample_inputs(
input_data, self.variables.node_to_accumulator_map,
input_leaves, self.variables.candidate_split_features,
self.variables.candidate_split_thresholds,
split_initializations_per_input=(
self.params.split_initializations_per_input),
split_sampling_random_seed=random_seed))
update_features_op = tf.scatter_update(
self.variables.candidate_split_features, update_indices,
feature_updates)
update_thresholds_op = tf.scatter_update(
self.variables.candidate_split_thresholds, update_indices,
threshold_updates)
# Calculate finished nodes.
with tf.control_dependencies(splits_update_ops):
children = tf.squeeze(tf.slice(self.variables.tree, [0, 0], [-1, 1]),
squeeze_dims=[1])
is_leaf = tf.equal(LEAF_NODE, children)
leaves = tf.to_int32(tf.squeeze(tf.where(is_leaf), squeeze_dims=[1]))
finished = self.training_ops.finished_nodes(
leaves, self.variables.node_to_accumulator_map,
self.variables.accumulator_sums,
num_split_after_samples=self.params.split_after_samples)
# Update leaf scores.
# TODO(gilberth): Optimize this. It currently calculates counts for
# every non-fertile leaf.
with tf.control_dependencies(node_update_ops):
def dont_update_leaf_scores():
return self.variables.non_fertile_leaf_scores
def update_leaf_scores_regression():
sums = tf.gather(self.variables.node_sums,
self.variables.non_fertile_leaves)
squares = tf.gather(self.variables.node_squares,
self.variables.non_fertile_leaves)
new_scores = self._variance(sums, squares)
return tf.assign(self.variables.non_fertile_leaf_scores, new_scores)
def update_leaf_scores_classification():
counts = tf.gather(self.variables.node_sums,
self.variables.non_fertile_leaves)
new_scores = self._weighted_gini(counts)
return tf.assign(self.variables.non_fertile_leaf_scores, new_scores)
# Because we can't have tf.self.variables of size 0, we have to put in a
# garbage value of -1 in there. Here we check for that so we don't
# try to index into node_per_class_weights in a tf.gather with a negative
# number.
update_nonfertile_leaves_scores_op = tf.cond(
tf.less(self.variables.non_fertile_leaves[0], 0),
dont_update_leaf_scores,
update_leaf_scores_regression if self.params.regression else
update_leaf_scores_classification)
# Calculate best splits.
with tf.control_dependencies(splits_update_ops):
split_indices = self.training_ops.best_splits(
finished, self.variables.node_to_accumulator_map,
self.variables.candidate_split_sums,
self.variables.candidate_split_squares,
self.variables.accumulator_sums,
self.variables.accumulator_squares,
regression=self.params.regression)
# Grow tree.
with tf.control_dependencies([update_features_op, update_thresholds_op]):
(tree_update_indices, tree_children_updates,
tree_threshold_updates, tree_depth_updates, new_eot) = (
self.training_ops.grow_tree(
self.variables.end_of_tree, self.variables.tree_depths,
self.variables.node_to_accumulator_map, finished, split_indices,
self.variables.candidate_split_features,
self.variables.candidate_split_thresholds))
tree_update_op = tf.scatter_update(
self.variables.tree, tree_update_indices, tree_children_updates)
threhsolds_update_op = tf.scatter_update(
self.variables.tree_thresholds, tree_update_indices,
tree_threshold_updates)
depth_update_op = tf.scatter_update(
self.variables.tree_depths, tree_update_indices, tree_depth_updates)
# Update fertile slots.
with tf.control_dependencies([update_nonfertile_leaves_scores_op,
depth_update_op]):
(node_map_updates, accumulators_cleared, accumulators_allocated,
new_nonfertile_leaves, new_nonfertile_leaves_scores) = (
self.training_ops.update_fertile_slots(
finished, self.variables.non_fertile_leaves,
self.variables.non_fertile_leaf_scores,
self.variables.end_of_tree, self.variables.tree_depths,
self.variables.accumulator_sums,
self.variables.node_to_accumulator_map,
max_depth=self.params.max_depth,
regression=self.params.regression))
# Ensure end_of_tree doesn't get updated until UpdateFertileSlots has
# used it to calculate new leaves.
gated_new_eot, = tf.tuple([new_eot], control_inputs=[new_nonfertile_leaves])
eot_update_op = tf.assign(self.variables.end_of_tree, gated_new_eot)
updates = []
updates.append(eot_update_op)
updates.append(tree_update_op)
updates.append(threhsolds_update_op)
updates.append(tf.assign(
self.variables.non_fertile_leaves, new_nonfertile_leaves,
validate_shape=False))
updates.append(tf.assign(
self.variables.non_fertile_leaf_scores,
new_nonfertile_leaves_scores, validate_shape=False))
updates.append(tf.scatter_update(
self.variables.node_to_accumulator_map,
tf.squeeze(tf.slice(node_map_updates, [0, 0], [1, -1]),
squeeze_dims=[0]),
tf.squeeze(tf.slice(node_map_updates, [1, 0], [1, -1]),
squeeze_dims=[0])))
cleared_and_allocated_accumulators = tf.concat(
0, [accumulators_cleared, accumulators_allocated])
# Calculate values to put into scatter update for candidate counts.
# Candidate split counts are always reset back to 0 for both cleared
# and allocated accumulators. This means some accumulators might be doubly
# reset to 0 if the were released and not allocated, then later allocated.
split_values = tf.tile(
tf.expand_dims(tf.expand_dims(
tf.zeros_like(cleared_and_allocated_accumulators, dtype=tf.float32),
1), 2),
[1, self.params.num_splits_to_consider, self.params.num_output_columns])
updates.append(tf.scatter_update(
self.variables.candidate_split_sums,
cleared_and_allocated_accumulators, split_values))
if self.params.regression:
updates.append(tf.scatter_update(
self.variables.candidate_split_squares,
cleared_and_allocated_accumulators, split_values))
# Calculate values to put into scatter update for total counts.
total_cleared = tf.tile(
tf.expand_dims(
tf.neg(tf.ones_like(accumulators_cleared, dtype=tf.float32)), 1),
[1, self.params.num_output_columns])
total_reset = tf.tile(
tf.expand_dims(
tf.zeros_like(accumulators_allocated, dtype=tf.float32), 1),
[1, self.params.num_output_columns])
accumulator_updates = tf.concat(0, [total_cleared, total_reset])
updates.append(tf.scatter_update(
self.variables.accumulator_sums,
cleared_and_allocated_accumulators, accumulator_updates))
if self.params.regression:
updates.append(tf.scatter_update(
self.variables.accumulator_squares,
cleared_and_allocated_accumulators, accumulator_updates))
# Calculate values to put into scatter update for candidate splits.
split_features_updates = tf.tile(
tf.expand_dims(
tf.neg(tf.ones_like(cleared_and_allocated_accumulators)), 1),
[1, self.params.num_splits_to_consider])
updates.append(tf.scatter_update(
self.variables.candidate_split_features,
cleared_and_allocated_accumulators, split_features_updates))
return tf.group(*updates)
def inference_graph(self, input_data):
"""Constructs a TF graph for evaluating a random tree.
Args:
input_data: A tensor or placeholder for input data.
Returns:
The last op in the random tree inference graph.
"""
return self.inference_ops.tree_predictions(
input_data, self.variables.tree, self.variables.tree_thresholds,
self.variables.node_sums,
valid_leaf_threshold=self.params.valid_leaf_threshold)
def average_impurity(self):
"""Constructs a TF graph for evaluating the average leaf impurity of a tree.
If in regression mode, this is the leaf variance. If in classification mode,
this is the gini impurity.
Returns:
The last op in the graph.
"""
children = tf.squeeze(tf.slice(self.variables.tree, [0, 0], [-1, 1]),
squeeze_dims=[1])
is_leaf = tf.equal(LEAF_NODE, children)
leaves = tf.to_int32(tf.squeeze(tf.where(is_leaf), squeeze_dims=[1]))
counts = tf.gather(self.variables.node_sums, leaves)
impurity = self._weighted_gini(counts)
return tf.reduce_sum(impurity) / tf.reduce_sum(counts + 1.0)
def size(self):
"""Constructs a TF graph for evaluating the current number of nodes.
Returns:
The current number of nodes in the tree.
"""
return self.variables.end_of_tree - 1
def get_stats(self, session):
num_nodes = self.variables.end_of_tree.eval(session=session) - 1
num_leaves = tf.where(
tf.equal(tf.squeeze(tf.slice(self.variables.tree, [0, 0], [-1, 1])),
LEAF_NODE)).eval(session=session).shape[0]
return TreeStats(num_nodes, num_leaves)
``` |
{
"source": "2855239858/CenLight-Traffic-Grid-Signal-Optimization-viaAction-and-State-Decomposition",
"score": 2
} |
#### File: flow/core/ppo_rnn_discrete.py
```python
import tensorflow as tf
import numpy as np
import copy
EP_MAX = 1000
EP_LEN = 200
GAMMA = 0.9
A_LR = 0.002
C_LR = 0.002
BATCH = 40
A_UPDATE_STEPS = 5
C_UPDATE_STEPS = 10
METHOD = [
dict(name='kl_pen', kl_target=0.01, lam=0.5), # KL penalty
dict(name='clip', epsilon=0.2), # Clipped surrogate objective, find this is better
][1] # choose the method for optimization
class PPO(object):
def __init__(self, s_dim=32, a_dim=1, name="meme"):
runner1 = '/cpu:0'
runner2 = '/gpu:0'
with tf.device('/cpu:0'):
self.sess = tf.Session()
self.tfs = tf.placeholder(tf.float32, [None, s_dim/a_dim], 'state')
self.a_dim = a_dim
self.s_dim = s_dim
self.name = name
self.buffer_a = []
self.buffer_s = []
self.buffer_r = []
self.global_steps = 0
self.update_steps_a = 0
self.update_steps_c = 0
self.global_counter = 0
self.pre_counter = 0
self.batch_size = 0
# self.rnn_input = tf.placeholder(tf.float32, [None, TIME_STEP, INPUT_SIZE]) # (batch, height, width, channel)
# critic
with tf.variable_scope(self.name + '_critic'):
l1 = tf.layers.dense(self.tfs, 100, tf.nn.relu,bias_initializer = tf.constant_initializer(0.01),
kernel_initializer = tf.random_normal_initializer(0., .01))
# l2 = tf.layers.dense(l1, 32, tf.nn.relu,kernel_initializer = w_init,bias_initializer = tf.constant_initializer(0.01))
self.v = tf.layers.dense(l1, 1)
self.tfdc_r = tf.placeholder(tf.float32, [None, 1], 'discounted_r')
self.advantage = self.tfdc_r - self.v
self.closs = tf.reduce_mean(tf.square(self.advantage))
self.ctrain_op = tf.train.AdamOptimizer(C_LR).minimize(self.closs)
# actor
self.pi, pi_params = self._build_anet(self.name + '_pi', trainable=True)
self.oldpi, oldpi_params = self._build_anet(self.name + '_oldpi', trainable=False)
self.tfa = tf.placeholder(tf.int32, [None, self.a_dim], 'action')
self.tfadv = tf.placeholder(tf.float32, [None, 1], 'advantage')
self.update_oldpi_op = [oldp.assign(p) for p, oldp in zip(pi_params, oldpi_params)]
##调整概率分布的维度,方便获取概率
# ratio = []
# ratio_temp = []
index = []
pi_resize = tf.reshape(self.pi,[-1,2])
oldpi_resize = tf.reshape(self.oldpi,[-1,2])
# for i in range(self.batch_size):
# for j in range(self.a_dim):
# index.append([i,j,self.tfa[i][j]])
self.a_indices = tf.stack([tf.range(tf.shape(tf.reshape(self.tfa,[-1]))[0], dtype=tf.int32), tf.reshape(self.tfa,[-1])], axis=1)
pi_prob = tf.gather_nd(params=pi_resize, indices=self.a_indices)
oldpi_prob = tf.gather_nd(params=oldpi_resize, indices=self.a_indices)
ratio_temp1 = pi_prob / (oldpi_prob + 1e-8)
# ratio_temp2 = tf.reshape(ratio_temp1,[-1,self.a_dim])
# ratio = tf.reduce_mean(ratio_temp2,axis = 1)
surr = ratio_temp1 * self.tfadv # surrogate loss
# for i in range(self.batch_size):
# ratio_temp = []
# for j in range(self.a_dim):
# ratio_temp.append(self.pi_resize[i][j][a_indices[i][j]]
# /(self.oldpi_resize[i][j][a_indices[i][j]] + 1e-8))
# ratio.append(tf.reduce_mean(ratio_temp))
# surr = ratio * self.tfadv
# a_indices = tf.stack([tf.range(tf.shape(self.tfa)[0], dtype=tf.int32), self.tfa], axis=1)
# pi_prob = tf.gather_nd(params=self.pi, indices=a_indices) # shape=(None, )
# oldpi_prob = tf.gather_nd(params=self.oldpi, indices=a_indices) # shape=(None, )
# ratio = pi_prob / (oldpi_prob + 1e-8)
# surr = ratio * self.tfadv # surrogate loss
self.aloss = -tf.reduce_mean(tf.minimum( # clipped surrogate objective
surr,
tf.clip_by_value(ratio_temp1, 1. - 0.2, 1. + 0.2) * self.tfadv))
self.atrain_op = tf.train.AdamOptimizer(A_LR).minimize(self.aloss)
self.sess.run(tf.global_variables_initializer())
self.writer = tf.summary.FileWriter("baseline/rnn_discrete/" + self.name + "_log/", self.sess.graph)
self.saver = tf.train.Saver(max_to_keep=20)
tf.get_default_graph().finalize()
def update_critic(self):
s = np.vstack(self.buffer_s)
r = np.array(self.buffer_r)[:, np.newaxis]
critic_loss = self.sess.run(self.closs, {self.tfs: s, self.tfdc_r: r})
self.summarize(critic_loss,self.pre_counter,'Pre_critic_loss')
self.sess.run(self.ctrain_op, {self.tfs: s, self.tfdc_r: r})
self.pre_counter += 1
def update_actor(self):
s = np.vstack(self.buffer_s)
r = np.array(self.buffer_r)[:, np.newaxis]
a = self.buffer_a
self.batch_size = len(a)
self.sess.run(self.update_oldpi_op)
adv = self.sess.run(self.advantage, {self.tfs: s, self.tfdc_r: r})
# print(np.array(adv).shape)
# adv = (adv - adv.mean())/(adv.std()+1e-6) # sometimes helpful
actor_loss = self.sess.run(self.aloss, {self.tfs: s, self.tfa: a, self.tfadv: adv})
self.summarize(actor_loss,self.global_counter,'Actor_loss')
[self.sess.run(self.atrain_op, {self.tfs: s, self.tfa: a, self.tfadv: adv}) for _ in range(A_UPDATE_STEPS)]
# update critic
critic_loss = self.sess.run(self.closs, {self.tfs: s, self.tfdc_r: r})
self.summarize(critic_loss,self.global_counter,'Critic_loss')
# [self.sess.run(self.ctrain_op, {self.tfs: s, self.tfdc_r: r}) for _ in range(C_UPDATE_STEPS)]
self.global_counter += 1
def update(self):
s = np.vstack(self.buffer_s)
# r = np.array(self.buffer_r)[:, np.newaxis]
# print(r)
r = np.vstack(self.buffer_r)
a = np.array(self.buffer_a).reshape([-1,self.a_dim])
# self.batch_size = len(a)
self.sess.run(self.update_oldpi_op)
adv = self.sess.run(self.advantage, {self.tfs: s, self.tfdc_r: r})
# print(np.array(adv).shape)
# adv = (adv - adv.mean())/(adv.std()+1e-6) # sometimes helpful
actor_loss = self.sess.run(self.aloss, {self.tfs: s, self.tfa: a, self.tfadv: adv})
self.summarize(actor_loss,self.global_counter,'Actor_loss')
[self.sess.run(self.atrain_op, {self.tfs: s, self.tfa: a, self.tfadv: adv}) for _ in range(A_UPDATE_STEPS)]
# update critic
critic_loss = self.sess.run(self.closs, {self.tfs: s, self.tfdc_r: r})
self.summarize(critic_loss,self.global_counter,'Critic_loss')
[self.sess.run(self.ctrain_op, {self.tfs: s, self.tfdc_r: r}) for _ in range(C_UPDATE_STEPS)]
self.global_counter += 1
def _build_anet(self, name, trainable):
# with tf.variable_scope(name):
# self.l1 = tf.layers.dense(self.tfs, 32, tf.nn.relu, trainable=trainable)
# self.l2 = tf.layers.dense(self.l1, 32, tf.nn.relu, trainable=trainable)
# out = tf.layers.dense(self.l2, self.a_dim, tf.nn.softmax, trainable=trainable)
# params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIA_aRL5
with tf.variable_scope(name):
# RNN
out_temp2 = []
rnn_input = tf.reshape(self.tfs,[-1,self.a_dim,int(self.s_dim/self.a_dim)])
rnn_cell = tf.nn.rnn_cell.LSTMCell(num_units=64,activation = tf.nn.tanh,trainable = trainable,
initializer = tf.random_normal_initializer(0., .01))
outputs, (h_c, h_n) = tf.nn.dynamic_rnn(
rnn_cell, # cell you have chosen
rnn_input, # input
initial_state=None, # the initial hidden state
dtype=tf.float32, # must given if set initial_state = None
time_major=False, # False: (batch, time step, input); True: (time step, batch, input)
)
for i in range(self.a_dim):
out_temp1 = tf.layers.dense(outputs[:, i, :], 2,tf.nn.softmax,trainable = trainable,
kernel_initializer = tf.random_normal_initializer(0., .01),
bias_initializer = tf.constant_initializer(0.01)) # output based on the last output step
# out_temp2 = tf.layers.dense(out_temp1, 2,tf.nn.softmax,trainable = trainable,
# kernel_initializer = tf.random_normal_initializer(0., .01),
# bias_initializer = tf.constant_initializer(0.01)) # output based on the last output step
# out.append(out_temp2)
out_temp2.append(out_temp1)
out = tf.stack([out_temp2[k] for k in range(self.a_dim)], axis=1)
params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=name)
return out, params
def choose_action(self, s):
# _s = np.array(s).reshape([-1,TIME_STEP,INPUT_SIZE])
# prob_weights = self.sess.run(self.pi, feed_dict={self.rnn_input: _s})
# print(prob_weights)
# action = np.random.choice(range(prob_weights.shape[1]),
# p=prob_weights.ravel()) # select action w.r.t the actions prob
# prob = tf.nn.softmax(prob_weights)
_s = np.array(s).reshape([-1,int(self.s_dim/self.a_dim)])
action = []
prob = self.sess.run(self.pi,feed_dict={self.tfs: _s})
prob_temp = np.array(prob).reshape([-1,2])
# print(prob)
for i in range(self.a_dim):
action_temp = np.random.choice(range(prob_temp[i].shape[0]),
p=prob_temp[i].ravel()) # select action w.r.t the actions prob
action.append(action_temp)
return action
def choose_best_action(self, s):
# _s = np.array(s).reshape([-1,TIME_STEP,INPUT_SIZE])
# prob_weights = self.sess.run(self.pi, feed_dict={self.rnn_input: _s})
# print(prob_weights)
# action = np.random.choice(range(prob_weights.shape[1]),
# p=prob_weights.ravel()) # select action w.r.t the actions prob
# prob = tf.nn.softmax(prob_weights)
action = []
prob = self.sess.run(self.pi,feed_dict={self.tfs: s})
for i in range(self.a_dim):
action_temp = np.argmax(prob[i].ravel()) # select action w.r.t the actions prob
action.append(action_temp)
return action
def get_state(self, s):
s = s[np.newaxis, :]
h = self.sess.run(self.l2, {self.tfs: s})[0]
return h
def get_v(self, s):
_s = np.array(s)
if _s.ndim < 2:
s = _s[np.newaxis, :]
# print(self.sess.run(self.v, {self.tfs: s}))
return self.sess.run(self.v, {self.tfs: s})
def experience_store(self, s, a, r):
self.buffer_a.append(a)
self.buffer_s.append(s)
self.buffer_r.append(r)
def empty_buffer(self):
self.buffer_s, self.buffer_r, self.buffer_a = [], [], []
##每一步的reward进行一个discount,让越远的reward影响变小
def trajction_process(self, s_):
_s = np.array(s_).reshape([-1,int(self.s_dim/self.a_dim)])
v_s_ = self.get_v(_s)
buffer_r = np.array(self.buffer_r).reshape([-1,self.a_dim])
buffer = [[],[],[],[],[],[],[],[],[]]
for r in buffer_r[::-1]:
for i in range(self.a_dim):
v_s_[i] = r[i] + GAMMA * v_s_[i]
buffer[i].append(copy.deepcopy(v_s_[i]))
for i in range(self.a_dim):
buffer[i].reverse()
out = np.stack([buffer[k] for k in range(self.a_dim)], axis=1)
# print(self.buffer_r)
self.buffer_r = np.array(out).reshape([-1])
def summarize(self, reward, i, tag):
summary = tf.Summary()
summary.value.add(tag=tag, simple_value=reward)
self.writer.add_summary(summary, i)
self.writer.flush()
def save_params(self,name,ep):
save_path = self.saver.save(self.sess,'my_net/rnn_discrete/{}_ep{}.ckpt'.format(name,ep))
print("Save to path:",save_path)
def restore_params(self,name,ep):
self.saver.restore(self.sess,'my_net/rnn_discrete/{}_ep{}.ckpt'.format(name,ep))
print("Restore params from")
#
# if __name__ == "__main__":
# import gym
# import matplotlib.pyplot as plt
# env = gym.make('CartPole-v0').unwrapped
# print(env.observation_space.shape)
# print(env.action_space.shape)
#
# ppo = PPO(s_dim=4, a_dim=2)
# all_ep_r = []
#
# for ep in range(EP_MAX):
# s = env.reset()
# buffer_s, buffer_a, buffer_r = [], [], []
# ep_r = 0
# for t in range(EP_LEN): # in one episode
#
# a = ppo.choose_action(s)
# s_, r, done, _ = env.step(a)
# if done:
# r = -10
# ppo.experience_store(s,a,r)
# s = s_
# ep_r += r
#
# # update ppo
# if (t + 1) % BATCH == 0 or t == EP_LEN - 1:
# ppo.trajction_process(s)
# ppo.update()
# ppo.empty_buffer()
#
# if done:
# break
# if ep == 0:
# all_ep_r.append(ep_r)
# else:
# all_ep_r.append(all_ep_r[-1] * 0.9 + ep_r * 0.1)
# print(
# 'Ep: %i' % ep,
# "|Ep_r: %i" % ep_r,
# )
#
# plt.plot(np.arange(len(all_ep_r)), all_ep_r)
# plt.xlabel('Episode')
# plt.ylabel('Moving averaged episode reward')
# plt.show()
```
#### File: flow/core/ppo_rnn.py
```python
import tensorflow as tf
import numpy as np
EP_MAX = 1000
EP_LEN = 400
GAMMA = 0.9
A_LR = 0.005
C_LR = 0.002
BATCH = 32
A_UPDATE_STEPS = 10
C_UPDATE_STEPS = 10
METHOD = [
dict(name='kl_pen', kl_target=0.01, lam=0.5), # KL penalty
dict(name='clip', epsilon=0.2), # Clipped surrogate objective, find this is better
][1] # choose the method for optimization
class PPO(object):
def __init__(self, s_dim=32, a_dim=1, name="meme"):
runner1 = '/cpu:0'
runner2 = '/gpu:0'
with tf.device('/cpu:0'):
self.sess = tf.Session()
self.tfs = tf.placeholder(tf.float32, [None, s_dim], 'state')
self.a_dim = a_dim
self.s_dim = s_dim
self.name = name
self.buffer_a = []
self.buffer_s = []
self.buffer_r = []
self.global_steps = 0
self.update_steps_a = 0
self.update_steps_c = 0
self.global_counter = 0
# self.rnn_input = tf.placeholder(tf.float32, [None, TIME_STEP, INPUT_SIZE]) # (batch, height, width, channel)
# critic
with tf.variable_scope(self.name + '_critic'):
w_init = tf.random_normal_initializer(0., .1)
l1 = tf.layers.dense(self.tfs, 100, tf.nn.relu,kernel_initializer = w_init)
self.v = tf.layers.dense(l1, 1)
self.tfdc_r = tf.placeholder(tf.float32, [None, 1], 'discounted_r')
self.advantage = self.tfdc_r - self.v
self.closs = tf.reduce_mean(tf.square(self.advantage))
self.ctrain_op = tf.train.AdamOptimizer(C_LR).minimize(self.closs)
# actor
self.pi, pi_params = self._build_anet(self.name + '_pi', trainable=True)
self.oldpi, oldpi_params = self._build_anet(self.name + '_oldpi', trainable=False)
self.tfa = tf.placeholder(tf.int32, [None, ], 'action')
self.tfadv = tf.placeholder(tf.float32, [None, 1], 'advantage')
self.update_oldpi_op = [oldp.assign(p) for p, oldp in zip(pi_params, oldpi_params)]
self.a_indices = tf.stack([tf.range(tf.shape(self.tfa)[0], dtype=tf.int32), self.tfa], axis=1)
self.pi_prob = tf.gather_nd(params=self.pi, indices=self.a_indices) # shape=(None, )
oldpi_prob = tf.gather_nd(params=self.oldpi, indices=self.a_indices) # shape=(None, )
self.ratio = self.pi_prob / (oldpi_prob + 1e-8)
surr = self.ratio * self.tfadv # surrogate loss
self.aloss = -tf.reduce_mean(tf.minimum( # clipped surrogate objective
surr,
tf.clip_by_value(self.ratio, 1. - 0.2, 1. + 0.2) * self.tfadv))
self.atrain_op = tf.train.AdamOptimizer(A_LR).minimize(self.aloss)
self.sess.run(tf.global_variables_initializer())
self.writer = tf.summary.FileWriter("baseline/rnn/" + self.name + "_log/", self.sess.graph)
self.saver = tf.train.Saver(max_to_keep=20)
tf.get_default_graph().finalize()
def out_put(self):
s = np.vstack(self.buffer_s)
a = self.buffer_a
prob = self.sess.run(self.pi_prob, {self.tfs: s, self.tfa: a})
print(prob)
index = self.sess.run(self.a_indices, {self.tfa: a})
print(index)
ratio = self.sess.run(self.ratio, {self.tfs: s, self.tfa: a})
print(ratio)
def update(self):
s = np.vstack(self.buffer_s)
r = np.array(self.buffer_r)[:, np.newaxis]
a = self.buffer_a
self.sess.run(self.update_oldpi_op)
adv = self.sess.run(self.advantage, {self.tfs: s, self.tfdc_r: r})
# self.summarize(adv,self.global_counter,'advantage')
# print(np.array(adv).shape)
# adv = (adv - adv.mean())/(adv.std()+1e-6) # sometimes helpful
actor_loss = self.sess.run(self.aloss, {self.tfs: s, self.tfa: a, self.tfadv: adv})
self.summarize(actor_loss,self.global_counter,'Actor_loss')
[self.sess.run(self.atrain_op, {self.tfs: s, self.tfa: a, self.tfadv: adv}) for _ in range(A_UPDATE_STEPS)]
# update critic
critic_loss = self.sess.run(self.closs, {self.tfs: s, self.tfdc_r: r})
self.summarize(critic_loss,self.global_counter,'Critic_loss')
[self.sess.run(self.ctrain_op, {self.tfs: s, self.tfdc_r: r}) for _ in range(C_UPDATE_STEPS)]
self.global_counter += 1
def _build_anet(self, name, trainable):
# with tf.variable_scope(name):
# self.l1 = tf.layers.dense(self.tfs, 32, tf.nn.relu, trainable=trainable)
# self.l2 = tf.layers.dense(self.l1, 32, tf.nn.relu, trainable=trainable)
# out = tf.layers.dense(self.l2, self.a_dim, tf.nn.softmax, trainable=trainable)
# params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=name)
with tf.variable_scope(name):
# RNN
rnn_input = tf.reshape(self.tfs,[-1,self.a_dim-1,int(self.s_dim/(self.a_dim - 1))])
rnn_cell = tf.nn.rnn_cell.LSTMCell(num_units=32,trainable = trainable)
outputs, (h_c, h_n) = tf.nn.dynamic_rnn(
rnn_cell, # cell you have chosen
rnn_input, # input
initial_state=None, # the initial hidden state
dtype=tf.float32, # must given if set initial_state = None
time_major=False, # False: (batch, time step, input); True: (time step, batch, input)
)
self.out = tf.layers.dense(outputs[:, -1, :], self.a_dim,tf.nn.softmax,trainable = trainable) # output based on the last output step
params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=name)
return self.out, params
def display_prob(self,s):
prob = self.sess.run(self.out, feed_dict={self.tfs: s})
print(prob)
def choose_action(self, s):
# _s = np.array(s).reshape([-1,TIME_STEP,INPUT_SIZE])
# prob_weights = self.sess.run(self.pi, feed_dict={self.rnn_input: _s})
# print(prob_weights)
# action = np.random.choice(range(prob_weights.shape[1]),
# p=prob_weights.ravel()) # select action w.r.t the actions prob
# prob = tf.nn.softmax(prob_weights)
prob = self.sess.run(self.pi,feed_dict={self.tfs: s})
action = np.random.choice(range(prob.shape[1]),
p=prob.ravel()) # select action w.r.t the actions prob
return action
def get_state(self, s):
s = s[np.newaxis, :]
h = self.sess.run(self.l2, {self.tfs: s})[0]
return h
def get_v(self, s):
_s = np.array(s)
if _s.ndim < 2:
s = _s[np.newaxis, :]
return self.sess.run(self.v, {self.tfs: s})[0, 0]
def experience_store(self, s, a, r):
self.buffer_a.append(a)
self.buffer_s.append(s)
self.buffer_r.append(r)
def empty_buffer(self):
self.buffer_s, self.buffer_r, self.buffer_a = [], [], []
##每一步的reward进行一个discount,让越远的reward影响变小
def trajction_process(self, s_):
v_s_ = self.get_v(s_)
discounted_r = []
for r in self.buffer_r[::-1]:
v_s_ = r + GAMMA * v_s_
discounted_r.append(v_s_)
discounted_r.reverse()
self.buffer_r = discounted_r
def summarize(self, reward, i, tag):
summary = tf.Summary()
summary.value.add(tag=tag, simple_value=reward)
self.writer.add_summary(summary, i)
self.writer.flush()
def save_params(self,name,ep):
save_path = self.saver.save(self.sess,'my_net/rnn/{}_ep{}.ckpt'.format(name,ep))
print("Save to path:",save_path)
def restore_params(self,name,ep):
self.saver.restore(self.sess,'my_net/rnn/{}_ep{}.ckpt'.format(name,ep))
print("Restore params from")
#
# if __name__ == "__main__":
# import gym
# import matplotlib.pyplot as plt
# env = gym.make('CartPole-v0').unwrapped
# print(env.observation_space.shape)
# print(env.action_space.shape)
#
# ppo = PPO(s_dim=4, a_dim=2)
# all_ep_r = []
#
# for ep in range(EP_MAX):
# s = env.reset()
# buffer_s, buffer_a, buffer_r = [], [], []
# ep_r = 0
# for t in range(EP_LEN): # in one episode
#
# a = ppo.choose_action(s)
# s_, r, done, _ = env.step(a)
# if done:
# r = -10
# ppo.experience_store(s,a,r)
# s = s_
# ep_r += r
#
# # update ppo
# if (t + 1) % BATCH == 0 or t == EP_LEN - 1:
# ppo.trajction_process(s)
# ppo.update()
# ppo.empty_buffer()
#
# if done:
# break
# if ep == 0:
# all_ep_r.append(ep_r)
# else:
# all_ep_r.append(all_ep_r[-1] * 0.9 + ep_r * 0.1)
# print(
# 'Ep: %i' % ep,
# "|Ep_r: %i" % ep_r,
# )
#
# plt.plot(np.arange(len(all_ep_r)), all_ep_r)
# plt.xlabel('Episode')
# plt.ylabel('Moving averaged episode reward')
# plt.show()
```
#### File: experiments/fix_time_DRL/visualize.py
```python
import numpy as np
import csv
from matplotlib import pyplot as plt
import numpy.random
def read_image(filename,num,gap = 1):
with open(filename) as f:
#创建一个阅读器:将f传给csv.reader
reader = csv.reader(f)
#使用csv的next函数,将reader传给next,将返回文件的下一行
header_row = next(reader)
# for index, column_header in enumerate(header_row):
# print(index, column_header)
highs =[]
#遍历reader的余下的所有行(next读取了第一行,reader每次读取后将返回下一行)
count = 0
for row in reader:
if (count < num) & ((count % gap) == 0):
#将字符串转换成数字
high = float(row[2])
highs.append(high)
count += 1
return highs
def plot_image(data,color,label):
plt.plot(data,c=color,label = label,lw=1)
def read_3image(f1,f2,f3,num):
f = [f1, f2, f3]
img = [open(f[i]) for i in range(len(f))]
#创建一个阅读器:将f传给csv.reader
reader = [csv.reader(img[i]) for i in range(len(f))]
header_row1 = next(reader[0])
header_row2 = next(reader[1])
header_row3 = next(reader[2])
data = [[],[],[]]
out = [[],[]]
count = 0
for row in reader[0]:
if count < num:
data[0].append(float(row[2]))
count += 1
count = 0
for row in reader[1]:
if count < num:
data[1].append(float(row[2]))
count += 1
count = 0
for row in reader[2]:
if count < num:
data[2].append(float(row[2]))
count += 1
print(num)
print(np.array(data).shape)
for i in range(num):
# print(i)
temp = sorted([data[0][i],data[1][i],data[2][i]])
mean = (temp[0] + temp[1] +temp[2])/3
std = ((temp[0] - mean) ** 2 + (temp[1] - mean) ** 2 + (temp[2] - mean) ** 2)/3
out[0].append(mean)
out[1].append(std ** (1/2))
return np.array(out)
def plot_3image(data,color,label,longer = 1,random=False):
random_num = np.zeros([len(data[0])])
if random == True:
random_num = np.random.randint(3,6,size=len(data[0])) / 4
plt.plot(np.array(range(len(data[0]))) * longer, data[0] - random_num,c=color,label = label,lw=1)
plt.fill_between(np.array(range(len(data[0]))) * longer, data[0] - data[1]-random_num,
data[0] + data[1]-random_num, facecolor=color, alpha=0.3)
def paint_3img(f1,f2,f3,color,label):
data = read_3image(f1,f2,f3)
plot_3image(data,color,label)
def plot_format(title,xLabel,yLabel):
plt.title(title, fontsize=24)
plt.xlabel(xLabel,fontsize=16)
plt.ylabel(yLabel, fontsize=16)
plt.tick_params(axis='both', which="major", labelsize=16)
if __name__ == "__main__":
#获取数据
grid = "2x2"
##Compared with different DRL methods 3x3 600 flowrate
data1 = read_3image('600_{}_ours_1.csv'.format(grid),'600_{}_ours_2.csv'.format(grid),
'600_{}_ours_3.csv'.format(grid),200)
data2 = read_3image('600_{}_single_1.csv'.format(grid),'600_{}_single_2.csv'.format(grid),
'600_{}_single_3.csv'.format(grid),200)
data3 = read_3image('600_{}_multi_1.csv'.format(grid),'600_{}_multi_2.csv'.format(grid),
'600_{}_multi_3.csv'.format(grid),200)
data4 = read_image('600_{}_fix.csv'.format(grid),200)
data5 = read_image('600_{}_actuated.csv'.format(grid),200)
#绘制图形
fig = plt.figure(dpi=128, figsize=(10,6))
##compared with different DRL methods
plot_3image(data1,'blue','ours')
plot_3image(data2,'red','Single-PPO')
plot_3image(data3,'black','Multi-PPO')
plot_image(data4,'green','Fixed-time')
plot_image(data5,'brown','Actuated')
##compared with different DRL methods
plot_format('{} grid under 600 flowrate'.format(grid),'Episodes','Rewards')
plt.legend(['ours','Single-PPO','Multi-PPO','Fixed-time','Actuated'],loc = 4,fontsize = 15)
plt.show()
```
#### File: experiments/Painter-master/Painter.py
```python
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import numpy as np
import os
import argparse
from collections import OrderedDict
class Painter():
"""docstring for Paiter."""
def __init__(self, category='line',
dir = None,
colorful = 'favorite',
axis = 0):
self.colorful = colorful
self.category = category
self.color_mark = 0
self.data = OrderedDict()
self.dir = dir
self.axis = axis
sns.set(style="whitegrid", color_codes=True)
if dir == '':
if self.category == 'line':
self.data['random-1'] = self.random_data()
self.data['random-2'] = self.random_data() - 2
self.data['random-3'] = self.random_data() - 4
elif self.category == '36d':
self.data['random'] = np.array([np.sin(np.linspace(0,20,201)) + np.random.rand() for _ in range(20)])
else:
files = os.listdir(dir)
self.wanted = [i for i in files if i[-3:] == 'csv']
self.read_data()
if self.colorful == 'favorite':
colors = ["blue","orange","violet","green","red"]
shadow_colors = ["light blue","light orange","light violet","light green","baby pink"]
self.color_palette = sns.xkcd_palette(colors)
self.shadow_color = sns.xkcd_palette(shadow_colors)
def read_data(self):
if len(self.wanted) == 0:
print('No valied files {only .csv supported}')
return
for name in self.wanted:
print(self.dir + name)
d = pd.read_csv(self.dir + name, encoding='utf-8')
if self.axis == 1: d = d.T
self.data[name[:-4]] = d
print("All %d files will be ploted." % len(self.wanted))
def random_data(self, times = 5):
return pd.DataFrame([self.random_feature() + np.random.randint(5) for i in range(times)])
def random_feature(self, length = 500):
x = np.arange(length)
y = np.log(x + 1) + np.random.standard_normal(length)
return y
def colorful_world(self, pair=True):
k = self.color_mark
self.color_mark += 1
if self.color_mark >= len(self.color_palette):self.color_mark -= len(self.color_palette)
if pair:
return self.color_palette[k], self.shadow_color[k]
else:
return self.color_palette[k]
def painte(self, gamma=0.6, stretch=10, shadow=True):
with self.color_palette:
if self.category == 'line':
for name, each_line in self.data.items():
self._plot_line(gamma, each_line, name, stretch, shadow)
plt.xlabel('Frames')
plt.ylabel('Rewards')
plt.legend(loc='upper left')
plt.savefig(self.dir + name + ".jpg")
plt.show()
elif self.category == '36d':
for name, each_plot in self.data.items():
self._plot_36d(name, each_plot)
def _plot_line(self, gamma, data, name, stretch,shadow=True):
mean = np.mean(data, axis=0)
c, sc = self.colorful_world()
for i in range(1,len(mean)):
mean[i] = mean[i] * (1 - gamma) + mean[i-1] * gamma
plt.plot(stretch*np.arange(len(mean)), mean, color=c, label=name)
if shadow:
std = np.std(data, axis=0)
for i in range(1,len(std)):
std[i] = std[i] * (1 - gamma) + std[i-1] * gamma
plt.fill_between(stretch*np.arange(len(mean)), mean-std, mean+std, facecolor=sc, alpha=0.4)
def _plot_36d(self, name, data, font="DejaVu Sans",color_bar=True):
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D
if not isinstance(data, (np.ndarray, pd.DataFrame)):
print("Type of data should be DataFrame.")
return
if isinstance(data, pd.DataFrame):
if isinstance(data.columns[0], int):
x = np.array(data.columns)
else:
x = np.arange(data.shape[1])
if isinstance(data.index[0], int):
y = np.array(data.index)
else:
y = np.arange(data.shape[1])
else:
x = np.arange(data.shape[1])
y = np.arange(data.shape[0])
x, y = np.meshgrid(x,y)
data = data.values
if not x.shape == data.shape:
print("Check 3D plot data shape, which should be: ", self.data['z'].shape)
return
fig = plt.figure()
ax = Axes3D(fig)
if color_bar:
surf = ax.plot_surface(x, y, data, cmap=cm.coolwarm, linewidth=0, antialiased=False)
bar = fig.colorbar(surf, shrink=0.5, aspect=5)
bar.ax.tick_params(labelsize=16)
barlabels = bar.ax.get_yticklabels()
[label.set_fontname(font) for label in barlabels]
else:
surf = ax.plot_wireframe(x, y, data)
labels = ax.get_xticklabels() + ax.get_yticklabels() + ax.get_zticklabels()
[label.set_fontname(font) for label in labels]
plt.tick_params(labelsize=16)
plt.savefig(self.dir + name + ".jpg", dpi=150)
plt.show()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=None)
parser.add_argument('-k', '--kind', default='line', type=str, help='choose the kind of plot: { line/scatter }')
parser.add_argument('-sh', '--shadow', default=True, type=bool, help='mean +- std | error area: { True/False }')
parser.add_argument('-st', '--stretch', default=10, type=int, help='times of x axis stretch: int')
parser.add_argument('-d', '--data_dir', default='', type=str, help='CSV data files directory: .../target/')
parser.add_argument('-g', '--gamma', default=0.6, type=float, help='discount of smooth')
args = parser.parse_args()
kind = args.kind
shadow = args.shadow
dir = args.data_dir
stretch = args.stretch
gamma = args.gamma
p = Painter(dir = dir, category = kind)
p.painte(stretch = stretch, shadow = shadow, gamma = gamma)
``` |
{
"source": "2867a0/exploitconsole",
"score": 2
} |
#### File: exploitconsole/api/parameters.py
```python
import re
from module.other.utils.DataUtil import database
class Parameters(object):
def __init__(self):
# 初始化程序基本信息
self.program_normal_info = {
"version": 1.0, # 程序版本信息
"poc_list": [], # 可用poc列表
"poc_number": 0, # poc数量
"backdoor_list": {}, # 当前已攻陷目标
"backdoor_number": 0, # 攻陷数量
"commandOperator": "[POE]: " # 当前控制台前缀符
}
# 全局变量信息
self.global_arguments = {
"url": None,
}
# poc信息
self.poc_options = {
"name": "",
"affected": "",
"cvss": "",
"severity": "",
"description": "",
"referer": "",
"argument": set()
}
# PoC参数的详细信息
self.registerArguments = {
}
self.occupied_options = {
"bid": 0,
"target": "",
"url": "",
"last_visit": "",
"alive": ""
}
for p in database.getPocListTableData():
self.program_normal_info["poc_list"].append(p[1])
for o in database.getBackdoorTableData():
self.program_normal_info["backdoor_list"][o[0]] = o[1:-1]
self.program_normal_info["poc_number"] = len(self.program_normal_info["poc_list"])
self.program_normal_info["backdoor_number"] = len(self.program_normal_info["backdoor_list"])
# ----------程序、数据库信息: self.program_normal_info------------
def getVersion(self) -> float:
"""
获取版本信息
:return: 版本号
"""
return self.program_normal_info["version"]
def getPocList(self) -> list:
"""
获取poc列表
:return: list: [["123"], ["uu"], ["aa"]]
"""
return self.program_normal_info["poc_list"]
# 重载poc列表
def reloadPocList(self):
self.program_normal_info["poc_list"].clear()
for p in database.getPocListTableData():
self.program_normal_info["poc_list"].append(p[1])
self.program_normal_info["poc_number"] = len(self.program_normal_info["poc_list"])
# 获取poc数量
def getPocNumber(self) -> int:
"""
获取当前poc数量
:return: int
"""
return self.program_normal_info["poc_number"]
def getBackdoorInfo(self) -> dict:
"""
返回后门列表,格式为:
{
"1": ("url", "http://URL", "pass", "last_visit"),
"2": ("", "", "", ""),
}
:return: dict
"""
return self.program_normal_info["backdoor_list"]
def getCurrentBackdoor(self):
pass
def updateBackdoorInfo(self, key, value) -> None:
"""
更新一条后门信息
:param key: 要更新的键
:param value: 新的值
:return: null
"""
database.updateBackdoorData(index=self.occupied_options["bid"], key=key, value=value)
def getBackdoorNumber(self) -> int:
"""
获取后门数量
:return: int
"""
return self.program_normal_info["backdoor_number"]
# --------全局变量设置: self.global_options-----------------
def getGlobalArgument(self, key="all"):
"""
获取全局变量的设置,若不指定key的值,则返回所有设置
:param key: 全局变量的某一个值, 默认为所有
:return: string / dict / null
"""
if key == "all":
return self.global_arguments
if key in self.global_arguments.keys():
return self.global_arguments[key]
return None
def setGlobalArgument(self, key, value=None) -> None:
"""
设置全局变量中某一个值
:param key: 变量,不为空
:param value: 新的值
:return: null
"""
self.global_arguments[key] = value
def clearGlobalArgument(self) -> None:
"""
清除全局变量
:return: null
"""
self.global_arguments.clear()
# ----------poc信息: self.poc_options--------------------
def getPocOption(self, key="all"):
"""
返回指定的poc信息,默认返回全部
:param key: poc信息, 默认="all"
:return: string / dict / null
"""
if key == "all":
return self.poc_options
if key in self.poc_options.keys():
return self.poc_options.get(key)
return None
def setPocOption(self, key="all", value=None) -> None:
"""
设置poc基本信息,如果key="all", 则清空poc信息
:param key: poc一个key
:param value: 新的值
:return: null
"""
if key == "all":
self.poc_options = {
"name": "", "affected": "", "cvss": "", "severity": "", "description": "", "referer": "",
"argument": set()
}
elif key in self.poc_options.keys():
self.poc_options[key] = value
def getPocArgumentNameList(self) -> list:
"""
获取poc注册的参数名
:return: list
"""
return list(self.poc_options["argument"])
def setPocArgumentNameList(self, poc_argument_list:list) -> None:
"""
设置poc注册的参数名
:param poc_argument_list: poc参数名 - list
:return: null
"""
self.poc_options["argument"].update(poc_argument_list)
# ----------poc脚本可控参数: self.registerArguments--------------------
def getPocArguments(self, key="all") -> dict:
"""
获取poc参数信息, 格式为
{
"test_string": [None, True, "description"],
"test_list": ["default_value", False, "default description"]
}
:return: dict
"""
if key == "all":
return self.registerArguments
else:
return self.registerArguments.get(key)[0]
def setPocArgument(self, key, value: str) -> None:
"""
设置注册的参数值
:param key: 注册的参数
:param value: 值
:return: null
"""
self.registerArguments[key][0] = value
def clearRegisterArgument(self):
"""
清除注册的参数
:return:
"""
self.registerArguments.clear()
self.setPocArgumentNameList([])
# self.arguments func for user
def registerParameter(self, poc_arg: dict) -> None:
"""
注册poc所需参数, 格式为:
字典 = {
"注册参数名": ["参数默认值", 是否为必须参数(True/False), "参数描述"]
...
}
:param poc_arg: poc列表
:return: null
"""
self.registerArguments = poc_arg
self.setPocArgumentNameList(list(self.registerArguments.keys()))
def getRegisterParameter(self, name) -> str:
"""
获取注册参数的值
:param name: 参数名
:return: string / null
"""
if name in self.getPocArgumentNameList():
return self.registerArguments.get(name)[0]
else:
from api.screens import screen
screen.error(f"You don't have register argument: {name}")
# -------------------other-----------------------------
def checkRequiredArgument(self) -> list:
"""
检查必须设置值的参数是否设置
:return: list: 未设置的值列表
"""
not_set = []
for k, v in self.registerArguments.items():
if v[1] is True and v[0] == "":
not_set.append(k)
return not_set
def checkValueIsValid(self, value: str):
pass
def checkIPLicit(self, ip: str) -> bool:
"""
检查ip是否合法
:param ip: 待检查IP
:return: true or false
"""
compile_ip = re.compile(
'^(1\d{2}|2[0-4]\d|25[0-5]|[1-9]\d|[1-9])\.(1\d{2}|2[0-4]\d|25[0-5]|[1-9]\d|\d)\.(1\d{2}|2[0-4]\d|25[0-5]|[1-9]\d|\d)\.(1\d{2}|2[0-4]\d|25[0-5]|[1-9]\d|\d)$')
if compile_ip.match(ip):
return True
else:
return False
param = Parameters()
```
#### File: 2867a0/exploitconsole/pocorexp.py
```python
import time
import module.args
from module.other.utils.DataUtil import database
from api.screens import screen
# 设计不完美,导入screen会导入param,param自动初始化,导致reload时没有及时更新param数据
def start(mode):
# 进入控制台
if mode == "console":
from module.console.mainconsole import main_console
main_console.showConsole()
# 进入批量利用模式
if mode == "command":
from module.command.CommandMode import CommandMode
start_t = time.time()
commandMode = CommandMode(inputArg.target, inputArg.command, inputArg.exp, inputArg.thread)
commandMode.run()
# print("")
screen.info(f"Done. using {round(time.time() - start_t)}s")
# 进入后门模式
if mode == "webshell":
from module.webshellmode.webshells import webshellController
webshellController.showPanel()
def ready(debug: bool, reload: bool):
if debug:
screen.setDebug()
screen.debug("Debug Mode Is On")
if reload:
database.reloadPocData()
from api.parameters import param
param.reloadPocList()
screen.success("reload payload list success")
if __name__ == '__main__':
inputArg = module.args.getArgs()
ready(inputArg.debug, inputArg.reload)
start(inputArg.sub_mode)
``` |
{
"source": "286844626/koalas",
"score": 2
} |
#### File: databricks/koalas/frame.py
```python
import warnings
from functools import partial, reduce
from typing import Any, List, Tuple, Union
import numpy as np
import pandas as pd
from pandas.api.types import is_datetime64_dtype, is_datetime64tz_dtype, is_list_like
from pyspark import sql as spark
from pyspark.sql import functions as F, Column
from pyspark.sql.types import BooleanType, StructField, StructType, to_arrow_type
from pyspark.sql.utils import AnalysisException
from databricks import koalas as ks # For running doctests and reference resolution in PyCharm.
from databricks.koalas.utils import default_session, validate_arguments_and_invoke_function
from databricks.koalas.exceptions import SparkPandasMergeError
from databricks.koalas.generic import _Frame, max_display_count
from databricks.koalas.metadata import Metadata
from databricks.koalas.missing.frame import _MissingPandasLikeDataFrame
from databricks.koalas.ml import corr
from databricks.koalas.selection import SparkDataFrameLocator
from databricks.koalas.typedef import infer_pd_series_spark_type
class DataFrame(_Frame):
"""
Koala DataFrame that corresponds to Pandas DataFrame logically. This holds Spark DataFrame
internally.
:ivar _sdf: Spark Column instance
:ivar _metadata: Metadata related to column names and index information.
Parameters
----------
data : numpy ndarray (structured or homogeneous), dict, Pandas DataFrame or Spark DataFrame
Dict can contain Series, arrays, constants, or list-like objects
If data is a dict, argument order is maintained for Python 3.6
and later.
Note that if `data` is a Pandas DataFrame, other arguments should not be used.
If `data` is a Spark DataFrame, all other arguments except `index` should not be used.
index : Index or array-like
Index to use for resulting frame. Will default to RangeIndex if
no indexing information part of input data and no index provided
If `data` is a Spark DataFrame, `index` is expected to be `Metadata`.
columns : Index or array-like
Column labels to use for resulting frame. Will default to
RangeIndex (0, 1, 2, ..., n) if no column labels are provided
dtype : dtype, default None
Data type to force. Only a single dtype is allowed. If None, infer
copy : boolean, default False
Copy data from inputs. Only affects DataFrame / 2d ndarray input
Examples
--------
Constructing DataFrame from a dictionary.
>>> d = {'col1': [1, 2], 'col2': [3, 4]}
>>> df = ks.DataFrame(data=d, columns=['col1', 'col2'])
>>> df
col1 col2
0 1 3
1 2 4
Constructing DataFrame from Pandas DataFrame
>>> df = ks.DataFrame(pd.DataFrame(data=d, columns=['col1', 'col2']))
>>> df
col1 col2
0 1 3
1 2 4
Notice that the inferred dtype is int64.
>>> df.dtypes
col1 int64
col2 int64
dtype: object
To enforce a single dtype:
>>> df = ks.DataFrame(data=d, dtype=np.int8)
>>> df.dtypes
col1 int8
col2 int8
dtype: object
Constructing DataFrame from numpy ndarray:
>>> df2 = ks.DataFrame(np.random.randint(low=0, high=10, size=(5, 5)),
... columns=['a', 'b', 'c', 'd', 'e'])
>>> df2 # doctest: +SKIP
a b c d e
0 3 1 4 9 8
1 4 8 4 8 4
2 7 6 5 6 7
3 8 7 9 1 0
4 2 5 4 3 9
"""
def __init__(self, data=None, index=None, columns=None, dtype=None, copy=False):
if isinstance(data, pd.DataFrame):
assert index is None
assert columns is None
assert dtype is None
assert not copy
self._init_from_pandas(data)
elif isinstance(data, spark.DataFrame):
assert columns is None
assert dtype is None
assert not copy
self._init_from_spark(data, index)
else:
pdf = pd.DataFrame(data=data, index=index, columns=columns, dtype=dtype, copy=copy)
self._init_from_pandas(pdf)
def _init_from_pandas(self, pdf):
metadata = Metadata.from_pandas(pdf)
reset_index = pdf.reset_index()
reset_index.columns = metadata.columns
schema = StructType([StructField(name, infer_pd_series_spark_type(col),
nullable=bool(col.isnull().any()))
for name, col in reset_index.iteritems()])
for name, col in reset_index.iteritems():
dt = col.dtype
if is_datetime64_dtype(dt) or is_datetime64tz_dtype(dt):
continue
reset_index[name] = col.replace({np.nan: None})
self._init_from_spark(default_session().createDataFrame(reset_index, schema=schema),
metadata)
def _init_from_spark(self, sdf, metadata=None):
self._sdf = sdf
if metadata is None:
self._metadata = Metadata(data_columns=self._sdf.schema.fieldNames())
else:
self._metadata = metadata
@property
def _index_columns(self):
return [self._sdf.__getitem__(field)
for field in self._metadata.index_columns]
def _reduce_for_stat_function(self, sfun):
"""
Applies sfun to each column and returns a pd.Series where the number of rows equal the
number of columns.
:param sfun: either an 1-arg function that takes a Column and returns a Column, or
a 2-arg function that takes a Column and its DataType and returns a Column.
"""
from inspect import signature
exprs = []
num_args = len(signature(sfun).parameters)
for col in self.columns:
col_sdf = self._sdf[col]
col_type = self._sdf.schema[col].dataType
if isinstance(col_type, BooleanType) and sfun.__name__ not in ('min', 'max'):
# Stat functions cannot be used with boolean values by default
# Thus, cast to integer (true to 1 and false to 0)
# Exclude the min and max methods though since those work with booleans
col_sdf = col_sdf.cast('integer')
if num_args == 1:
# Only pass in the column if sfun accepts only one arg
col_sdf = sfun(col_sdf)
else: # must be 2
assert num_args == 2
# Pass in both the column and its data type if sfun accepts two args
col_sdf = sfun(col_sdf, col_type)
exprs.append(col_sdf.alias(col))
sdf = self._sdf.select(*exprs)
pdf = sdf.toPandas()
assert len(pdf) == 1, (sdf, pdf)
row = pdf.iloc[0]
row.name = None
return row # Return first row as a Series
def corr(self, method='pearson'):
"""
Compute pairwise correlation of columns, excluding NA/null values.
Parameters
----------
method : {'pearson', 'spearman'}
* pearson : standard correlation coefficient
* spearman : Spearman rank correlation
Returns
-------
y : pandas.DataFrame
See Also
--------
Series.corr
Examples
--------
>>> df = ks.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)],
... columns=['dogs', 'cats'])
>>> df.corr('pearson')
dogs cats
dogs 1.000000 -0.851064
cats -0.851064 1.000000
>>> df.corr('spearman')
dogs cats
dogs 1.000000 -0.948683
cats -0.948683 1.000000
Notes
-----
There are behavior differences between Koalas and pandas.
* the `method` argument only accepts 'pearson', 'spearman'
* the data should not contain NaNs. Koalas will return an error.
* Koalas doesn't support the following argument(s).
* `min_periods` argument is not supported
"""
return corr(self, method)
def iteritems(self):
"""
Iterator over (column name, Series) pairs.
Iterates over the DataFrame columns, returning a tuple with
the column name and the content as a Series.
Returns
-------
label : object
The column names for the DataFrame being iterated over.
content : Series
The column entries belonging to each label, as a Series.
Examples
--------
>>> df = ks.DataFrame({'species': ['bear', 'bear', 'marsupial'],
... 'population': [1864, 22000, 80000]},
... index=['panda', 'polar', 'koala'],
... columns=['species', 'population'])
>>> df
species population
panda bear 1864
polar bear 22000
koala marsupial 80000
>>> for label, content in df.iteritems():
... print('label:', label)
... print('content:', content.to_string())
...
label: species
content: panda bear
polar bear
koala marsupial
label: population
content: panda 1864
polar 22000
koala 80000
"""
cols = list(self.columns)
return list((col_name, self[col_name]) for col_name in cols)
def to_html(self, buf=None, columns=None, col_space=None, header=True, index=True,
na_rep='NaN', formatters=None, float_format=None, sparsify=None, index_names=True,
justify=None, max_rows=None, max_cols=None, show_dimensions=False, decimal='.',
bold_rows=True, classes=None, escape=True, notebook=False, border=None,
table_id=None, render_links=False):
"""
Render a DataFrame as an HTML table.
.. note:: This method should only be used if the resulting Pandas object is expected
to be small, as all the data is loaded into the driver's memory. If the input
is large, set max_rows parameter.
Parameters
----------
buf : StringIO-like, optional
Buffer to write to.
columns : sequence, optional, default None
The subset of columns to write. Writes all columns by default.
col_space : int, optional
The minimum width of each column.
header : bool, optional
Write out the column names. If a list of strings is given, it
is assumed to be aliases for the column names
index : bool, optional, default True
Whether to print index (row) labels.
na_rep : str, optional, default 'NaN'
String representation of NAN to use.
formatters : list or dict of one-param. functions, optional
Formatter functions to apply to columns' elements by position or
name.
The result of each function must be a unicode string.
List must be of length equal to the number of columns.
float_format : one-parameter function, optional, default None
Formatter function to apply to columns' elements if they are
floats. The result of this function must be a unicode string.
sparsify : bool, optional, default True
Set to False for a DataFrame with a hierarchical index to print
every multiindex key at each row.
index_names : bool, optional, default True
Prints the names of the indexes.
justify : str, default None
How to justify the column labels. If None uses the option from
the print configuration (controlled by set_option), 'right' out
of the box. Valid values are
* left
* right
* center
* justify
* justify-all
* start
* end
* inherit
* match-parent
* initial
* unset.
max_rows : int, optional
Maximum number of rows to display in the console.
max_cols : int, optional
Maximum number of columns to display in the console.
show_dimensions : bool, default False
Display DataFrame dimensions (number of rows by number of columns).
decimal : str, default '.'
Character recognized as decimal separator, e.g. ',' in Europe.
bold_rows : bool, default True
Make the row labels bold in the output.
classes : str or list or tuple, default None
CSS class(es) to apply to the resulting html table.
escape : bool, default True
Convert the characters <, >, and & to HTML-safe sequences.
notebook : {True, False}, default False
Whether the generated HTML is for IPython Notebook.
border : int
A ``border=border`` attribute is included in the opening
`<table>` tag. Default ``pd.options.html.border``.
table_id : str, optional
A css id is included in the opening `<table>` tag if specified.
render_links : bool, default False
Convert URLs to HTML links (only works with Pandas 0.24+).
Returns
-------
str (or unicode, depending on data and options)
String representation of the dataframe.
See Also
--------
to_string : Convert DataFrame to a string.
"""
# Make sure locals() call is at the top of the function so we don't capture local variables.
args = locals()
if max_rows is not None:
kdf = self.head(max_rows)
else:
kdf = self
return validate_arguments_and_invoke_function(
kdf.to_pandas(), self.to_html, pd.DataFrame.to_html, args)
def to_string(self, buf=None, columns=None, col_space=None, header=True,
index=True, na_rep='NaN', formatters=None, float_format=None,
sparsify=None, index_names=True, justify=None,
max_rows=None, max_cols=None, show_dimensions=False,
decimal='.', line_width=None):
"""
Render a DataFrame to a console-friendly tabular output.
.. note:: This method should only be used if the resulting Pandas object is expected
to be small, as all the data is loaded into the driver's memory. If the input
is large, set max_rows parameter.
Parameters
----------
buf : StringIO-like, optional
Buffer to write to.
columns : sequence, optional, default None
The subset of columns to write. Writes all columns by default.
col_space : int, optional
The minimum width of each column.
header : bool, optional
Write out the column names. If a list of strings is given, it
is assumed to be aliases for the column names
index : bool, optional, default True
Whether to print index (row) labels.
na_rep : str, optional, default 'NaN'
String representation of NAN to use.
formatters : list or dict of one-param. functions, optional
Formatter functions to apply to columns' elements by position or
name.
The result of each function must be a unicode string.
List must be of length equal to the number of columns.
float_format : one-parameter function, optional, default None
Formatter function to apply to columns' elements if they are
floats. The result of this function must be a unicode string.
sparsify : bool, optional, default True
Set to False for a DataFrame with a hierarchical index to print
every multiindex key at each row.
index_names : bool, optional, default True
Prints the names of the indexes.
justify : str, default None
How to justify the column labels. If None uses the option from
the print configuration (controlled by set_option), 'right' out
of the box. Valid values are
* left
* right
* center
* justify
* justify-all
* start
* end
* inherit
* match-parent
* initial
* unset.
max_rows : int, optional
Maximum number of rows to display in the console.
max_cols : int, optional
Maximum number of columns to display in the console.
show_dimensions : bool, default False
Display DataFrame dimensions (number of rows by number of columns).
decimal : str, default '.'
Character recognized as decimal separator, e.g. ',' in Europe.
line_width : int, optional
Width to wrap a line in characters.
Returns
-------
str (or unicode, depending on data and options)
String representation of the dataframe.
See Also
--------
to_html : Convert DataFrame to HTML.
Examples
--------
>>> df = ks.DataFrame({'col1': [1, 2, 3], 'col2': [4, 5, 6]}, columns=['col1', 'col2'])
>>> print(df.to_string())
col1 col2
0 1 4
1 2 5
2 3 6
>>> print(df.to_string(max_rows=2))
col1 col2
0 1 4
1 2 5
"""
# Make sure locals() call is at the top of the function so we don't capture local variables.
args = locals()
if max_rows is not None:
kdf = self.head(max_rows)
else:
kdf = self
return validate_arguments_and_invoke_function(
kdf.to_pandas(), self.to_string, pd.DataFrame.to_string, args)
def to_json(self, path_or_buf=None, orient=None, date_format=None,
double_precision=10, force_ascii=True, date_unit='ms',
default_handler=None, lines=False, compression='infer',
index=True):
"""
Convert the object to a JSON string.
Note NaN's and None will be converted to null and datetime objects
will be converted to UNIX timestamps.
.. note:: This method should only be used if the resulting JSON is expected
to be small, as all the data is loaded into the driver's memory.
Parameters
----------
path_or_buf : string or file handle, optional
File path or object. If not specified, the result is returned as
a string.
orient : string
Indication of expected JSON string format.
* Series
- default is 'index'
- allowed values are: {'split','records','index','table'}
* DataFrame
- default is 'columns'
- allowed values are:
{'split','records','index','columns','values','table'}
* The format of the JSON string
- 'split' : dict like {'index' -> [index],
'columns' -> [columns], 'data' -> [values]}
- 'records' : list like
[{column -> value}, ... , {column -> value}]
- 'index' : dict like {index -> {column -> value}}
- 'columns' : dict like {column -> {index -> value}}
- 'values' : just the values array
- 'table' : dict like {'schema': {schema}, 'data': {data}}
describing the data, and the data component is
like ``orient='records'``.
date_format : {None, 'epoch', 'iso'}
Type of date conversion. 'epoch' = epoch milliseconds,
'iso' = ISO8601. The default depends on the `orient`. For
``orient='table'``, the default is 'iso'. For all other orients,
the default is 'epoch'.
double_precision : int, default 10
The number of decimal places to use when encoding
floating point values.
force_ascii : bool, default True
Force encoded string to be ASCII.
date_unit : string, default 'ms' (milliseconds)
The time unit to encode to, governs timestamp and ISO8601
precision. One of 's', 'ms', 'us', 'ns' for second, millisecond,
microsecond, and nanosecond respectively.
default_handler : callable, default None
Handler to call if object cannot otherwise be converted to a
suitable format for JSON. Should receive a single argument which is
the object to convert and return a serialisable object.
lines : bool, default False
If 'orient' is 'records' write out line delimited json format. Will
throw ValueError if incorrect 'orient' since others are not list
like.
compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}
A string representing the compression to use in the output file,
only used when the first argument is a filename. By default, the
compression is inferred from the filename.
index : bool, default True
Whether to include the index values in the JSON string. Not
including the index (``index=False``) is only supported when
orient is 'split' or 'table'.
Examples
--------
>>> df = ks.DataFrame([['a', 'b'], ['c', 'd']],
... index=['row 1', 'row 2'],
... columns=['col 1', 'col 2'])
>>> df.to_json(orient='split')
'{"columns":["col 1","col 2"],\
"index":["row 1","row 2"],\
"data":[["a","b"],["c","d"]]}'
Encoding/decoding a Dataframe using ``'records'`` formatted JSON.
Note that index labels are not preserved with this encoding.
>>> df.to_json(orient='records')
'[{"col 1":"a","col 2":"b"},{"col 1":"c","col 2":"d"}]'
Encoding/decoding a Dataframe using ``'index'`` formatted JSON:
>>> df.to_json(orient='index')
'{"row 1":{"col 1":"a","col 2":"b"},"row 2":{"col 1":"c","col 2":"d"}}'
Encoding/decoding a Dataframe using ``'columns'`` formatted JSON:
>>> df.to_json(orient='columns')
'{"col 1":{"row 1":"a","row 2":"c"},"col 2":{"row 1":"b","row 2":"d"}}'
Encoding/decoding a Dataframe using ``'values'`` formatted JSON:
>>> df.to_json(orient='values')
'[["a","b"],["c","d"]]'
Encoding with Table Schema
>>> df.to_json(orient='table') # doctest: +SKIP
'{"schema": {"fields":[{"name":"index","type":"string"},\
{"name":"col 1","type":"string"},\
{"name":"col 2","type":"string"}],\
"primaryKey":["index"],\
"pandas_version":"0.20.0"}, \
"data": [{"index":"row 1","col 1":"a","col 2":"b"},\
{"index":"row 2","col 1":"c","col 2":"d"}]}'
"""
# Make sure locals() call is at the top of the function so we don't capture local variables.
args = locals()
kdf = self
return validate_arguments_and_invoke_function(
kdf.to_pandas(), self.to_json, pd.DataFrame.to_json, args)
def to_dict(self, orient='dict', into=dict):
"""
Convert the DataFrame to a dictionary.
The type of the key-value pairs can be customized with the parameters
(see below).
.. note:: This method should only be used if the resulting Pandas DataFrame is expected
to be small, as all the data is loaded into the driver's memory.
Parameters
----------
orient : str {'dict', 'list', 'series', 'split', 'records', 'index'}
Determines the type of the values of the dictionary.
- 'dict' (default) : dict like {column -> {index -> value}}
- 'list' : dict like {column -> [values]}
- 'series' : dict like {column -> Series(values)}
- 'split' : dict like
{'index' -> [index], 'columns' -> [columns], 'data' -> [values]}
- 'records' : list like
[{column -> value}, ... , {column -> value}]
- 'index' : dict like {index -> {column -> value}}
Abbreviations are allowed. `s` indicates `series` and `sp`
indicates `split`.
into : class, default dict
The collections.abc.Mapping subclass used for all Mappings
in the return value. Can be the actual class or an empty
instance of the mapping type you want. If you want a
collections.defaultdict, you must pass it initialized.
Returns
-------
dict, list or collections.abc.Mapping
Return a collections.abc.Mapping object representing the DataFrame.
The resulting transformation depends on the `orient` parameter.
Examples
--------
>>> df = ks.DataFrame({'col1': [1, 2],
... 'col2': [0.5, 0.75]},
... index=['row1', 'row2'],
... columns=['col1', 'col2'])
>>> df
col1 col2
row1 1 0.50
row2 2 0.75
>>> df_dict = df.to_dict()
>>> sorted([(key, sorted(values.items())) for key, values in df_dict.items()])
[('col1', [('row1', 1), ('row2', 2)]), ('col2', [('row1', 0.5), ('row2', 0.75)])]
You can specify the return orientation.
>>> df_dict = df.to_dict('series')
>>> sorted(df_dict.items())
[('col1', row1 1
row2 2
Name: col1, dtype: int64), ('col2', row1 0.50
row2 0.75
Name: col2, dtype: float64)]
>>> df_dict = df.to_dict('split')
>>> sorted(df_dict.items()) # doctest: +ELLIPSIS
[('columns', ['col1', 'col2']), ('data', [[1..., 0.75]]), ('index', ['row1', 'row2'])]
>>> df_dict = df.to_dict('records')
>>> [sorted(values.items()) for values in df_dict] # doctest: +ELLIPSIS
[[('col1', 1...), ('col2', 0.5)], [('col1', 2...), ('col2', 0.75)]]
>>> df_dict = df.to_dict('index')
>>> sorted([(key, sorted(values.items())) for key, values in df_dict.items()])
[('row1', [('col1', 1), ('col2', 0.5)]), ('row2', [('col1', 2), ('col2', 0.75)])]
You can also specify the mapping type.
>>> from collections import OrderedDict, defaultdict
>>> df.to_dict(into=OrderedDict)
OrderedDict([('col1', OrderedDict([('row1', 1), ('row2', 2)])), \
('col2', OrderedDict([('row1', 0.5), ('row2', 0.75)]))])
If you want a `defaultdict`, you need to initialize it:
>>> dd = defaultdict(list)
>>> df.to_dict('records', into=dd) # doctest: +ELLIPSIS
[defaultdict(<class 'list'>, {'col..., 'col...}), \
defaultdict(<class 'list'>, {'col..., 'col...})]
"""
# Make sure locals() call is at the top of the function so we don't capture local variables.
args = locals()
kdf = self
return validate_arguments_and_invoke_function(
kdf.to_pandas(), self.to_dict, pd.DataFrame.to_dict, args)
def to_csv(self, path_or_buf=None, sep=",", na_rep='', float_format=None,
columns=None, header=True, index=True, index_label=None,
mode='w', encoding=None, compression='infer', quoting=None,
quotechar='"', line_terminator="\n", chunksize=None,
tupleize_cols=None, date_format=None, doublequote=True,
escapechar=None, decimal='.'):
"""
Write object to a comma-separated values (csv) file.
.. note:: This method should only be used if the resulting CSV is expected
to be small, as all the data is loaded into the driver's memory.
Parameters
----------
path_or_buf : str or file handle, default None
File path or object, if None is provided the result is returned as
a string. If a file object is passed it should be opened with
`newline=''`, disabling universal newlines.
sep : str, default ','
String of length 1. Field delimiter for the output file.
na_rep : str, default ''
Missing data representation.
float_format : str, default None
Format string for floating point numbers.
columns : sequence, optional
Columns to write.
header : bool or list of str, default True
Write out the column names. If a list of strings is given it is
assumed to be aliases for the column names.
index : bool, default True
Write row names (index).
index_label : str or sequence, or False, default None
Column label for index column(s) if desired. If None is given, and
`header` and `index` are True, then the index names are used. A
sequence should be given if the object uses MultiIndex. If
False do not print fields for index names. Use index_label=False
for easier importing in R.
mode : str
Python write mode, default 'w'.
encoding : str, optional
A string representing the encoding to use in the output file,
defaults to 'ascii' on Python 2 and 'utf-8' on Python 3.
compression : str, default 'infer'
Compression mode among the following possible values: {'infer',
'gzip', 'bz2', 'zip', 'xz', None}. If 'infer' and `path_or_buf`
is path-like, then detect compression from the following
extensions: '.gz', '.bz2', '.zip' or '.xz'. (otherwise no
compression).
quoting : optional constant from csv module
Defaults to csv.QUOTE_MINIMAL. If you have set a `float_format`
then floats are converted to strings and thus csv.QUOTE_NONNUMERIC
will treat them as non-numeric.
quotechar : str, default '\"'
String of length 1. Character used to quote fields.
line_terminator : string, default '\\n'
The newline character or character sequence to use in the output
file. Defaults to `os.linesep`, which depends on the OS in which
this method is called ('\n' for linux, '\r\n' for Windows, i.e.).
chunksize : int or None
Rows to write at a time.
tupleize_cols : bool, default False
Write MultiIndex columns as a list of tuples (if True) or in
the new, expanded format, where each MultiIndex column is a row
in the CSV (if False).
date_format : str, default None
Format string for datetime objects.
doublequote : bool, default True
Control quoting of `quotechar` inside a field.
escapechar : str, default None
String of length 1. Character used to escape `sep` and `quotechar`
when appropriate.
decimal : str, default '.'
Character recognized as decimal separator. E.g. use ',' for
European data.
Returns
-------
None or str
If path_or_buf is None, returns the resulting csv format as a
string. Otherwise returns None.
Examples
--------
>>> df = ks.DataFrame({'name': ['Raphael', 'Donatello'],
... 'mask': ['red', 'purple'],
... 'weapon': ['sai', 'bo staff']},
... columns=['name', 'mask', 'weapon'])
>>> df.to_csv(index=False)
'name,mask,weapon\\nRaphael,red,sai\\nDonatello,purple,bo staff\\n'
"""
# Make sure locals() call is at the top of the function so we don't capture local variables.
args = locals()
kdf = self
return validate_arguments_and_invoke_function(
kdf.to_pandas(), self.to_csv, pd.DataFrame.to_csv, args)
@property
def index(self):
"""The index (row labels) Column of the DataFrame.
Currently supported only when the DataFrame has a single index.
"""
from databricks.koalas.series import Series
if len(self._metadata.index_map) != 1:
raise KeyError('Currently supported only when the DataFrame has a single index.')
return Series(self._index_columns[0], anchor=self, index=[])
def set_index(self, keys, drop=True, append=False, inplace=False):
"""Set the DataFrame index (row labels) using one or more existing columns. By default
yields a new object.
:param keys: column label or list of column labels / arrays
:param drop: boolean, default True
Delete columns to be used as the new index
:param append: boolean, default False
Whether to append columns to existing index
:param inplace: boolean, default False
Modify the DataFrame in place (do not create a new object)
:return: :class:`DataFrame`
"""
if isinstance(keys, str):
keys = [keys]
else:
keys = list(keys)
for key in keys:
if key not in self.columns:
raise KeyError(key)
if drop:
data_columns = [column for column in self._metadata.data_columns if column not in keys]
else:
data_columns = self._metadata.data_columns
if append:
index_map = self._metadata.index_map + [(column, column) for column in keys]
else:
index_map = [(column, column) for column in keys]
metadata = self._metadata.copy(data_columns=data_columns, index_map=index_map)
# Sync Spark's columns as well.
sdf = self._sdf.select(['`{}`'.format(name) for name in metadata.columns])
if inplace:
self._metadata = metadata
self._sdf = sdf
else:
kdf = self.copy()
kdf._metadata = metadata
kdf._sdf = sdf
return kdf
def reset_index(self, level=None, drop=False, inplace=False):
"""For DataFrame with multi-level index, return new DataFrame with labeling information in
the columns under the index names, defaulting to 'level_0', 'level_1', etc. if any are None.
For a standard index, the index name will be used (if set), otherwise a default 'index' or
'level_0' (if 'index' is already taken) will be used.
:param level: int, str, tuple, or list, default None
Only remove the given levels from the index. Removes all levels by default
:param drop: boolean, default False
Do not try to insert index into dataframe columns. This resets the index to the
default integer index.
:param inplace: boolean, default False
Modify the DataFrame in place (do not create a new object)
:return: :class:`DataFrame`
"""
if len(self._metadata.index_map) == 0:
raise NotImplementedError('Can\'t reset index because there is no index.')
multi_index = len(self._metadata.index_map) > 1
def rename(index):
if multi_index:
return 'level_{}'.format(index)
else:
if 'index' not in self._metadata.data_columns:
return 'index'
else:
return 'level_{}'.format(index)
if level is None:
new_index_map = [(column, name if name is not None else rename(i))
for i, (column, name) in enumerate(self._metadata.index_map)]
index_map = []
else:
if isinstance(level, (int, str)):
level = [level]
level = list(level)
if all(isinstance(l, int) for l in level):
for lev in level:
if lev >= len(self._metadata.index_map):
raise IndexError('Too many levels: Index has only {} level, not {}'
.format(len(self._metadata.index_map), lev + 1))
idx = level
elif all(isinstance(lev, str) for lev in level):
idx = []
for l in level:
try:
i = self._metadata.index_columns.index(l)
idx.append(i)
except ValueError:
if multi_index:
raise KeyError('Level unknown not found')
else:
raise KeyError('Level unknown must be same as name ({})'
.format(self._metadata.index_columns[0]))
else:
raise ValueError('Level should be all int or all string.')
idx.sort()
new_index_map = []
index_map = self._metadata.index_map.copy()
for i in idx:
info = self._metadata.index_map[i]
index_column, index_name = info
new_index_map.append(
(index_column,
index_name if index_name is not None else rename(index_name)))
index_map.remove(info)
if drop:
new_index_map = []
metadata = self._metadata.copy(
data_columns=[column for column, _ in new_index_map] + self._metadata.data_columns,
index_map=index_map)
columns = [name for _, name in new_index_map] + self._metadata.data_columns
if inplace:
self._metadata = metadata
self.columns = columns
else:
kdf = self.copy()
kdf._metadata = metadata
kdf.columns = columns
return kdf
def isnull(self):
"""
Detects missing values for items in the current Dataframe.
Return a boolean same-sized Dataframe indicating if the values are NA.
NA values, such as None or numpy.NaN, gets mapped to True values.
Everything else gets mapped to False values.
See Also
--------
Dataframe.notnull
Examples
--------
>>> df = ks.DataFrame([(.2, .3), (.0, None), (.6, None), (.2, .1)])
>>> df.isnull()
0 1
0 False False
1 False True
2 False True
3 False False
>>> df = ks.DataFrame([[None, 'bee', None], ['dog', None, 'fly']])
>>> df.isnull()
0 1 2
0 True False True
1 False True False
"""
kdf = self.copy()
for name, ks in kdf.iteritems():
kdf[name] = ks.isnull()
return kdf
isna = isnull
def notnull(self):
"""
Detects non-missing values for items in the current Dataframe.
This function takes a dataframe and indicates whether it's
values are valid (not missing, which is ``NaN`` in numeric
datatypes, ``None`` or ``NaN`` in objects and ``NaT`` in datetimelike).
See Also
--------
Dataframe.isnull
Examples
--------
>>> df = ks.DataFrame([(.2, .3), (.0, None), (.6, None), (.2, .1)])
>>> df.notnull()
0 1
0 True True
1 True False
2 True False
3 True True
>>> df = ks.DataFrame([['ant', 'bee', 'cat'], ['dog', None, 'fly']])
>>> df.notnull()
0 1 2
0 True True True
1 True False True
"""
kdf = self.copy()
for name, ks in kdf.iteritems():
kdf[name] = ks.notnull()
return kdf
notna = notnull
def to_koalas(self):
"""
Converts the existing DataFrame into a Koalas DataFrame.
This method is monkey-patched into Spark's DataFrame and can be used
to convert a Spark DataFrame into a Koalas DataFrame. If running on
an existing Koalas DataFrame, the method returns itself.
If a Koalas DataFrame is converted to a Spark DataFrame and then back
to Koalas, it will lose the index information and the original index
will be turned into a normal column.
See Also
--------
DataFrame.to_spark
Examples
--------
>>> df = ks.DataFrame({'col1': [1, 2], 'col2': [3, 4]}, columns=['col1', 'col2'])
>>> df
col1 col2
0 1 3
1 2 4
>>> spark_df = df.to_spark()
>>> spark_df
DataFrame[__index_level_0__: bigint, col1: bigint, col2: bigint]
>>> kdf = spark_df.to_koalas()
>>> kdf
__index_level_0__ col1 col2
0 0 1 3
1 1 2 4
Calling to_koalas on a Koalas DataFrame simply returns itself.
>>> df.to_koalas()
col1 col2
0 1 3
1 2 4
"""
if isinstance(self, DataFrame):
return self
else:
return DataFrame(self)
def to_spark(self):
"""
Return the current DataFrame as a Spark DataFrame.
See Also
--------
DataFrame.to_koalas
"""
return self._sdf
def to_pandas(self):
"""
Return a Pandas DataFrame.
.. note:: This method should only be used if the resulting Pandas DataFrame is expected
to be small, as all the data is loaded into the driver's memory.
Examples
--------
>>> df = ks.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)],
... columns=['dogs', 'cats'])
>>> df.to_pandas()
dogs cats
0 0.2 0.3
1 0.0 0.6
2 0.6 0.0
3 0.2 0.1
"""
sdf = self._sdf.select(['`{}`'.format(name) for name in self._metadata.columns])
pdf = sdf.toPandas()
if len(pdf) == 0 and len(sdf.schema) > 0:
# TODO: push to OSS
pdf = pdf.astype({field.name: to_arrow_type(field.dataType).to_pandas_dtype()
for field in sdf.schema})
index_columns = self._metadata.index_columns
if len(index_columns) > 0:
append = False
for index_field in index_columns:
drop = index_field not in self._metadata.data_columns
pdf = pdf.set_index(index_field, drop=drop, append=append)
append = True
pdf = pdf[self._metadata.data_columns]
index_names = self._metadata.index_names
if len(index_names) > 0:
if isinstance(pdf.index, pd.MultiIndex):
pdf.index.names = index_names
else:
pdf.index.name = index_names[0]
return pdf
# Alias to maintain backward compatibility with Spark
toPandas = to_pandas
def assign(self, **kwargs):
"""
Assign new columns to a DataFrame.
Returns a new object with all original columns in addition to new ones.
Existing columns that are re-assigned will be overwritten.
Parameters
----------
**kwargs : dict of {str: callable or Series}
The column names are keywords. If the values are
callable, they are computed on the DataFrame and
assigned to the new columns. The callable must not
change input DataFrame (though Koalas doesn't check it).
If the values are not callable, (e.g. a Series or a literal),
they are simply assigned.
Returns
-------
DataFrame
A new DataFrame with the new columns in addition to
all the existing columns.
Examples
--------
>>> df = ks.DataFrame({'temp_c': [17.0, 25.0]},
... index=['Portland', 'Berkeley'])
>>> df
temp_c
Portland 17.0
Berkeley 25.0
Where the value is a callable, evaluated on `df`:
>>> df.assign(temp_f=lambda x: x.temp_c * 9 / 5 + 32)
temp_c temp_f
Portland 17.0 62.6
Berkeley 25.0 77.0
Alternatively, the same behavior can be achieved by directly
referencing an existing Series or sequence and you can also
create multiple columns within the same assign.
>>> assigned = df.assign(temp_f=df['temp_c'] * 9 / 5 + 32,
... temp_k=df['temp_c'] + 273.15)
>>> assigned[['temp_c', 'temp_f', 'temp_k']]
temp_c temp_f temp_k
Portland 17.0 62.6 290.15
Berkeley 25.0 77.0 298.15
Notes
-----
Assigning multiple columns within the same ``assign`` is possible
but you cannot refer to newly created or modified columns. This
feature is supported in pandas for Python 3.6 and later but not in
Koalas. In Koalas, all items are computed first, and then assigned.
"""
from databricks.koalas.series import Series
for k, v in kwargs.items():
if not (isinstance(v, (Series, spark.Column)) or
callable(v) or pd.api.types.is_scalar(v)):
raise TypeError("Column assignment doesn't support type "
"{0}".format(type(v).__name__))
if callable(v):
kwargs[k] = v(self)
pairs = list(kwargs.items())
sdf = self._sdf
for (name, c) in pairs:
if isinstance(c, Series):
sdf = sdf.withColumn(name, c._scol)
elif isinstance(c, Column):
sdf = sdf.withColumn(name, c)
else:
sdf = sdf.withColumn(name, F.lit(c))
data_columns = self._metadata.data_columns
metadata = self._metadata.copy(
data_columns=(data_columns +
[name for name, _ in pairs if name not in data_columns]))
return DataFrame(sdf, metadata)
def to_excel(self, excel_writer, sheet_name="Sheet1", na_rep="", float_format=None,
columns=None, header=True, index=True, index_label=None, startrow=0,
startcol=0, engine=None, merge_cells=True, encoding=None, inf_rep="inf",
verbose=True, freeze_panes=None):
"""
Write object to an Excel sheet.
.. note:: This method should only be used if the resulting DataFrame is expected
to be small, as all the data is loaded into the driver's memory.
To write a single object to an Excel .xlsx file it is only necessary to
specify a target file name. To write to multiple sheets it is necessary to
create an `ExcelWriter` object with a target file name, and specify a sheet
in the file to write to.
Multiple sheets may be written to by specifying unique `sheet_name`.
With all data written to the file it is necessary to save the changes.
Note that creating an `ExcelWriter` object with a file name that already
exists will result in the contents of the existing file being erased.
Parameters
----------
excel_writer : str or ExcelWriter object
File path or existing ExcelWriter.
sheet_name : str, default 'Sheet1'
Name of sheet which will contain DataFrame.
na_rep : str, default ''
Missing data representation.
float_format : str, optional
Format string for floating point numbers. For example
``float_format="%%.2f"`` will format 0.1234 to 0.12.
columns : sequence or list of str, optional
Columns to write.
header : bool or list of str, default True
Write out the column names. If a list of string is given it is
assumed to be aliases for the column names.
index : bool, default True
Write row names (index).
index_label : str or sequence, optional
Column label for index column(s) if desired. If not specified, and
`header` and `index` are True, then the index names are used. A
sequence should be given if the DataFrame uses MultiIndex.
startrow : int, default 0
Upper left cell row to dump data frame.
startcol : int, default 0
Upper left cell column to dump data frame.
engine : str, optional
Write engine to use, 'openpyxl' or 'xlsxwriter'. You can also set this
via the options ``io.excel.xlsx.writer``, ``io.excel.xls.writer``, and
``io.excel.xlsm.writer``.
merge_cells : bool, default True
Write MultiIndex and Hierarchical Rows as merged cells.
encoding : str, optional
Encoding of the resulting excel file. Only necessary for xlwt,
other writers support unicode natively.
inf_rep : str, default 'inf'
Representation for infinity (there is no native representation for
infinity in Excel).
verbose : bool, default True
Display more information in the error logs.
freeze_panes : tuple of int (length 2), optional
Specifies the one-based bottommost row and rightmost column that
is to be frozen.
Notes
-----
Once a workbook has been saved it is not possible write further data
without rewriting the whole workbook.
Examples
--------
Create, write to and save a workbook:
>>> df1 = ks.DataFrame([['a', 'b'], ['c', 'd']],
... index=['row 1', 'row 2'],
... columns=['col 1', 'col 2'])
>>> df1.to_excel("output.xlsx") # doctest: +SKIP
To specify the sheet name:
>>> df1.to_excel("output.xlsx") # doctest: +SKIP
>>> df1.to_excel("output.xlsx",
... sheet_name='Sheet_name_1') # doctest: +SKIP
If you wish to write to more than one sheet in the workbook, it is
necessary to specify an ExcelWriter object:
>>> with pd.ExcelWriter('output.xlsx') as writer: # doctest: +SKIP
... df1.to_excel(writer, sheet_name='Sheet_name_1')
... df2.to_excel(writer, sheet_name='Sheet_name_2')
To set the library that is used to write the Excel file,
you can pass the `engine` keyword (the default engine is
automatically chosen depending on the file extension):
>>> df1.to_excel('output1.xlsx', engine='xlsxwriter') # doctest: +SKIP
"""
# Make sure locals() call is at the top of the function so we don't capture local variables.
args = locals()
kdf = self
return validate_arguments_and_invoke_function(
kdf.to_pandas(), self.to_excel, pd.DataFrame.to_excel, args)
@property
def loc(self):
return SparkDataFrameLocator(self)
def copy(self):
return DataFrame(self._sdf, self._metadata.copy())
def dropna(self, axis=0, how='any', thresh=None, subset=None, inplace=False):
"""
Remove missing values.
Parameters
----------
axis : {0 or 'index'}, default 0
Determine if rows or columns which contain missing values are
removed.
* 0, or 'index' : Drop rows which contain missing values.
how : {'any', 'all'}, default 'any'
Determine if row or column is removed from DataFrame, when we have
at least one NA or all NA.
* 'any' : If any NA values are present, drop that row or column.
* 'all' : If all values are NA, drop that row or column.
thresh : int, optional
Require that many non-NA values.
subset : array-like, optional
Labels along other axis to consider, e.g. if you are dropping rows
these would be a list of columns to include.
inplace : bool, default False
If True, do operation inplace and return None.
Returns
-------
DataFrame
DataFrame with NA entries dropped from it.
See Also
--------
DataFrame.drop : Drop specified labels from columns.
DataFrame.isnull: Indicate missing values.
DataFrame.notnull : Indicate existing (non-missing) values.
Examples
--------
>>> df = ks.DataFrame({"name": ['Alfred', 'Batman', 'Catwoman'],
... "toy": [None, 'Batmobile', 'Bullwhip'],
... "born": [None, "1940-04-25", None]},
... columns=['name', 'toy', 'born'])
>>> df
name toy born
0 Alfred None None
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip None
Drop the rows where at least one element is missing.
>>> df.dropna()
name toy born
1 Batman Batmobile 1940-04-25
Drop the rows where all elements are missing.
>>> df.dropna(how='all')
name toy born
0 Alfred None None
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip None
Keep only the rows with at least 2 non-NA values.
>>> df.dropna(thresh=2)
name toy born
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip None
Define in which columns to look for missing values.
>>> df.dropna(subset=['name', 'born'])
name toy born
1 Batman Batmobile 1940-04-25
Keep the DataFrame with valid entries in the same variable.
>>> df.dropna(inplace=True)
>>> df
name toy born
1 Batman Batmobile 1940-04-25
"""
if axis == 0 or axis == 'index':
if subset is not None:
if isinstance(subset, str):
columns = [subset]
else:
columns = list(subset)
invalids = [column for column in columns
if column not in self._metadata.data_columns]
if len(invalids) > 0:
raise KeyError(invalids)
else:
columns = list(self.columns)
cnt = reduce(lambda x, y: x + y,
[F.when(self[column].notna()._scol, 1).otherwise(0)
for column in columns],
F.lit(0))
if thresh is not None:
pred = cnt >= F.lit(int(thresh))
elif how == 'any':
pred = cnt == F.lit(len(columns))
elif how == 'all':
pred = cnt > F.lit(0)
else:
if how is not None:
raise ValueError('invalid how option: {h}'.format(h=how))
else:
raise TypeError('must specify how or thresh')
sdf = self._sdf.filter(pred)
if inplace:
self._sdf = sdf
else:
return DataFrame(sdf, self._metadata.copy())
else:
raise NotImplementedError("dropna currently only works for axis=0 or axis='index'")
def fillna(self, value=None, axis=None, inplace=False):
"""Fill NA/NaN values.
:param value: scalar, dict, Series
Value to use to fill holes. alternately a dict/Series of values
specifying which value to use for each column.
DataFrame is not supported.
:param axis: {0 or `index`}
1 and `columns` are not supported.
:param inplace: boolean, default False
Fill in place (do not create a new object)
:return: :class:`DataFrame`
Examples
--------
>>> df = ks.DataFrame({
... 'A': [None, 3, None, None],
... 'B': [2, 4, None, 3],
... 'C': [None, None, None, 1],
... 'D': [0, 1, 5, 4]
... },
... columns=['A', 'B', 'C', 'D'])
>>> df
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 NaN NaN NaN 5
3 NaN 3.0 1.0 4
Replace all NaN elements with 0s.
>>> df.fillna(0)
A B C D
0 0.0 2.0 0.0 0
1 3.0 4.0 0.0 1
2 0.0 0.0 0.0 5
3 0.0 3.0 1.0 4
Replace all NaN elements in column 'A', 'B', 'C', and 'D', with 0, 1,
2, and 3 respectively.
>>> values = {'A': 0, 'B': 1, 'C': 2, 'D': 3}
>>> df.fillna(value=values)
A B C D
0 0.0 2.0 2.0 0
1 3.0 4.0 2.0 1
2 0.0 1.0 2.0 5
3 0.0 3.0 1.0 4
"""
if axis is None:
axis = 0
if not (axis == 0 or axis == "index"):
raise NotImplementedError("fillna currently only works for axis=0 or axis='index'")
if value is None:
raise ValueError('Currently must specify value')
if not isinstance(value, (float, int, str, bool, dict, pd.Series)):
raise TypeError("Unsupported type %s" % type(value))
if isinstance(value, pd.Series):
value = value.to_dict()
if isinstance(value, dict):
for v in value.values():
if not isinstance(v, (float, int, str, bool)):
raise TypeError("Unsupported type %s" % type(v))
sdf = self._sdf.fillna(value)
if inplace:
self._sdf = sdf
else:
return DataFrame(sdf, self._metadata.copy())
def head(self, n=5):
"""
Return the first `n` rows.
This function returns the first `n` rows for the object based
on position. It is useful for quickly testing if your object
has the right type of data in it.
Parameters
----------
n : int, default 5
Number of rows to select.
Returns
-------
obj_head : same type as caller
The first `n` rows of the caller object.
Examples
--------
>>> df = ks.DataFrame({'animal':['alligator', 'bee', 'falcon', 'lion',
... 'monkey', 'parrot', 'shark', 'whale', 'zebra']})
>>> df
animal
0 alligator
1 bee
2 falcon
3 lion
4 monkey
5 parrot
6 shark
7 whale
8 zebra
Viewing the first 5 lines
>>> df.head()
animal
0 alligator
1 bee
2 falcon
3 lion
4 monkey
Viewing the first `n` lines (three in this case)
>>> df.head(3)
animal
0 alligator
1 bee
2 falcon
"""
return DataFrame(self._sdf.limit(n), self._metadata.copy())
@property
def columns(self):
"""The column labels of the DataFrame."""
return pd.Index(self._metadata.data_columns)
@columns.setter
def columns(self, names):
old_names = self._metadata.data_columns
if len(old_names) != len(names):
raise ValueError(
"Length mismatch: Expected axis has %d elements, new values have %d elements"
% (len(old_names), len(names)))
sdf = self._sdf.select(self._metadata.index_columns +
[self[old_name]._scol.alias(new_name)
for (old_name, new_name) in zip(old_names, names)])
self._sdf = sdf
self._metadata = self._metadata.copy(data_columns=names)
@property
def dtypes(self):
"""Return the dtypes in the DataFrame.
This returns a Series with the data type of each column. The result's index is the original
DataFrame's columns. Columns with mixed types are stored with the object dtype.
:return: :class:`pd.Series` The data type of each column.
Examples
--------
>>> df = ks.DataFrame({'a': list('abc'),
... 'b': list(range(1, 4)),
... 'c': np.arange(3, 6).astype('i1'),
... 'd': np.arange(4.0, 7.0, dtype='float64'),
... 'e': [True, False, True],
... 'f': pd.date_range('20130101', periods=3)},
... columns=['a', 'b', 'c', 'd', 'e', 'f'])
>>> df.dtypes
a object
b int64
c int8
d float64
e bool
f datetime64[ns]
dtype: object
"""
return pd.Series([self[col].dtype for col in self._metadata.data_columns],
index=self._metadata.data_columns)
def count(self):
"""
Count non-NA cells for each column.
The values `None`, `NaN` are considered NA.
Returns
-------
pandas.Series
See Also
--------
Series.count: Number of non-NA elements in a Series.
DataFrame.shape: Number of DataFrame rows and columns (including NA
elements).
DataFrame.isna: Boolean same-sized DataFrame showing places of NA
elements.
Examples
--------
Constructing DataFrame from a dictionary:
>>> df = ks.DataFrame({"Person":
... ["John", "Myla", "Lewis", "John", "Myla"],
... "Age": [24., np.nan, 21., 33, 26],
... "Single": [False, True, True, True, False]},
... columns=["Person", "Age", "Single"])
>>> df
Person Age Single
0 John 24.0 False
1 Myla NaN True
2 Lewis 21.0 True
3 John 33.0 True
4 Myla 26.0 False
Notice the uncounted NA values:
>>> df.count()
Person 5
Age 4
Single 5
dtype: int64
"""
return self._reduce_for_stat_function(_Frame._count_expr)
def drop(self, labels=None, axis=1, columns: Union[str, List[str]] = None):
"""
Drop specified labels from columns.
Remove columns by specifying label names and axis=1 or columns.
When specifying both labels and columns, only labels will be dropped.
Removing rows is yet to be implemented.
Parameters
----------
labels : single label or list-like
Column labels to drop.
axis : {1 or 'columns'}, default 1
.. dropna currently only works for axis=1 'columns'
axis=0 is yet to be implemented.
columns : single label or list-like
Alternative to specifying axis (``labels, axis=1``
is equivalent to ``columns=labels``).
Returns
-------
dropped : DataFrame
See Also
--------
Series.dropna
Examples
--------
>>> df = ks.DataFrame({'x': [1, 2], 'y': [3, 4], 'z': [5, 6], 'w': [7, 8]},
... columns=['x', 'y', 'z', 'w'])
>>> df
x y z w
0 1 3 5 7
1 2 4 6 8
>>> df.drop('x', axis=1)
y z w
0 3 5 7
1 4 6 8
>>> df.drop(['y', 'z'], axis=1)
x w
0 1 7
1 2 8
>>> df.drop(columns=['y', 'z'])
x w
0 1 7
1 2 8
Notes
-----
Currently only axis = 1 is supported in this function,
axis = 0 is yet to be implemented.
"""
if labels is not None:
axis = self._validate_axis(axis)
if axis == 1:
return self.drop(columns=labels)
raise NotImplementedError("Drop currently only works for axis=1")
elif columns is not None:
if isinstance(columns, str):
columns = [columns]
sdf = self._sdf.drop(*columns)
metadata = self._metadata.copy(
data_columns=[column for column in self.columns if column not in columns]
)
return DataFrame(sdf, metadata)
else:
raise ValueError("Need to specify at least one of 'labels' or 'columns'")
def get(self, key, default=None):
"""
Get item from object for given key (DataFrame column, Panel slice,
etc.). Returns default value if not found.
Parameters
----------
key : object
Returns
-------
value : same type as items contained in object
Examples
--------
>>> df = ks.DataFrame({'x':range(3), 'y':['a','b','b'], 'z':['a','b','b']},
... columns=['x', 'y', 'z'])
>>> df
x y z
0 0 a a
1 1 b b
2 2 b b
>>> df.get('x')
0 0
1 1
2 2
Name: x, dtype: int64
>>> df.get(['x', 'y'])
x y
0 0 a
1 1 b
2 2 b
"""
try:
return self._pd_getitem(key)
except (KeyError, ValueError, IndexError):
return default
def sort_values(self, by, ascending=True, inplace=False, na_position='last'):
"""
Sort by the values along either axis.
Parameters
----------
by : str or list of str
ascending : bool or list of bool, default True
Sort ascending vs. descending. Specify list for multiple sort
orders. If this is a list of bools, must match the length of
the by.
inplace : bool, default False
if True, perform operation in-place
na_position : {'first', 'last'}, default 'last'
`first` puts NaNs at the beginning, `last` puts NaNs at the end
Returns
-------
sorted_obj : DataFrame
Examples
--------
>>> df = ks.DataFrame({
... 'col1': ['A', 'B', None, 'D', 'C'],
... 'col2': [2, 9, 8, 7, 4],
... 'col3': [0, 9, 4, 2, 3],
... },
... columns=['col1', 'col2', 'col3'])
>>> df
col1 col2 col3
0 A 2 0
1 B 9 9
2 None 8 4
3 D 7 2
4 C 4 3
Sort by col1
>>> df.sort_values(by=['col1'])
col1 col2 col3
0 A 2 0
1 B 9 9
4 C 4 3
3 D 7 2
2 None 8 4
Sort Descending
>>> df.sort_values(by='col1', ascending=False)
col1 col2 col3
3 D 7 2
4 C 4 3
1 B 9 9
0 A 2 0
2 None 8 4
Sort by multiple columns
>>> df = ks.DataFrame({
... 'col1': ['A', 'A', 'B', None, 'D', 'C'],
... 'col2': [2, 1, 9, 8, 7, 4],
... 'col3': [0, 1, 9, 4, 2, 3],
... },
... columns=['col1', 'col2', 'col3'])
>>> df.sort_values(by=['col1', 'col2'])
col1 col2 col3
1 A 1 1
0 A 2 0
2 B 9 9
5 C 4 3
4 D 7 2
3 None 8 4
"""
if isinstance(by, str):
by = [by]
if isinstance(ascending, bool):
ascending = [ascending] * len(by)
if len(ascending) != len(by):
raise ValueError('Length of ascending ({}) != length of by ({})'
.format(len(ascending), len(by)))
if na_position not in ('first', 'last'):
raise ValueError("invalid na_position: '{}'".format(na_position))
# Mapper: Get a spark column function for (ascending, na_position) combination
# Note that 'asc_nulls_first' and friends were added as of Spark 2.4, see SPARK-23847.
mapper = {
(True, 'first'): lambda x: Column(getattr(x._jc, "asc_nulls_first")()),
(True, 'last'): lambda x: Column(getattr(x._jc, "asc_nulls_last")()),
(False, 'first'): lambda x: Column(getattr(x._jc, "desc_nulls_first")()),
(False, 'last'): lambda x: Column(getattr(x._jc, "desc_nulls_last")()),
}
by = [mapper[(asc, na_position)](self[colname]._scol)
for colname, asc in zip(by, ascending)]
kdf = DataFrame(self._sdf.sort(*by), self._metadata.copy())
if inplace:
self._sdf = kdf._sdf
self._metadata = kdf._metadata
else:
return kdf
def isin(self, values):
"""
Whether each element in the DataFrame is contained in values.
Parameters
----------
values : iterable or dict
The sequence of values to test. If values is a dict,
the keys must be the column names, which must match.
Series and DataFrame are not supported.
Returns
-------
DataFrame
DataFrame of booleans showing whether each element in the DataFrame
is contained in values.
Examples
--------
>>> df = ks.DataFrame({'num_legs': [2, 4], 'num_wings': [2, 0]},
... index=['falcon', 'dog'],
... columns=['num_legs', 'num_wings'])
>>> df
num_legs num_wings
falcon 2 2
dog 4 0
When ``values`` is a list check whether every value in the DataFrame
is present in the list (which animals have 0 or 2 legs or wings)
>>> df.isin([0, 2])
num_legs num_wings
falcon True True
dog False True
When ``values`` is a dict, we can pass values to check for each
column separately:
>>> df.isin({'num_wings': [0, 3]})
num_legs num_wings
falcon False False
dog False True
"""
if isinstance(values, (pd.DataFrame, pd.Series)):
raise NotImplementedError("DataFrame and Series are not supported")
if isinstance(values, dict) and not set(values.keys()).issubset(self.columns):
raise AttributeError(
"'DataFrame' object has no attribute %s"
% (set(values.keys()).difference(self.columns)))
_select_columns = self._metadata.index_columns
if isinstance(values, dict):
for col in self.columns:
if col in values:
_select_columns.append(self[col]._scol.isin(values[col]).alias(col))
else:
_select_columns.append(F.lit(False).alias(col))
elif is_list_like(values):
_select_columns += [
self[col]._scol.isin(list(values)).alias(col) for col in self.columns]
else:
raise TypeError('Values should be iterable, Series, DataFrame or dict.')
return DataFrame(self._sdf.select(_select_columns), self._metadata.copy())
def pipe(self, func, *args, **kwargs):
r"""
Apply func(self, \*args, \*\*kwargs).
Parameters
----------
func : function
function to apply to the DataFrame.
``args``, and ``kwargs`` are passed into ``func``.
Alternatively a ``(callable, data_keyword)`` tuple where
``data_keyword`` is a string indicating the keyword of
``callable`` that expects the DataFrames.
args : iterable, optional
positional arguments passed into ``func``.
kwargs : mapping, optional
a dictionary of keyword arguments passed into ``func``.
Returns
-------
object : the return type of ``func``.
Notes
-----
Use ``.pipe`` when chaining together functions that expect
Series, DataFrames or GroupBy objects. For example, given
>>> df = ks.DataFrame({'category': ['A', 'A', 'B'],
... 'col1': [1, 2, 3],
... 'col2': [4, 5, 6]},
... columns=['category', 'col1', 'col2'])
>>> def keep_category_a(df):
... return df[df['category'] == 'A']
>>> def add_one(df, column):
... return df.assign(col3=df[column] + 1)
>>> def multiply(df, column1, column2):
... return df.assign(col4=df[column1] * df[column2])
instead of writing
>>> multiply(add_one(keep_category_a(df), column="col1"), column1="col2", column2="col3")
category col1 col2 col3 col4
0 A 1 4 2 8
1 A 2 5 3 15
You can write
>>> (df.pipe(keep_category_a)
... .pipe(add_one, column="col1")
... .pipe(multiply, column1="col2", column2="col3")
... )
category col1 col2 col3 col4
0 A 1 4 2 8
1 A 2 5 3 15
If you have a function that takes the data as (say) the second
argument, pass a tuple indicating which keyword expects the
data. For example, suppose ``f`` takes its data as ``df``:
>>> def multiply_2(column1, df, column2):
... return df.assign(col4=df[column1] * df[column2])
Then you can write
>>> (df.pipe(keep_category_a)
... .pipe(add_one, column="col1")
... .pipe((multiply_2, 'df'), column1="col2", column2="col3")
... )
category col1 col2 col3 col4
0 A 1 4 2 8
1 A 2 5 3 15
"""
if isinstance(func, tuple):
func, target = func
if target in kwargs:
raise ValueError('%s is both the pipe target and a keyword '
'argument' % target)
kwargs[target] = self
return func(*args, **kwargs)
else:
return func(self, *args, **kwargs)
@property
def shape(self):
"""
Return a tuple representing the dimensionality of the DataFrame.
Examples
--------
>>> df = ks.DataFrame({'col1': [1, 2], 'col2': [3, 4]})
>>> df.shape
(2, 2)
>>> df = ks.DataFrame({'col1': [1, 2], 'col2': [3, 4],
... 'col3': [5, 6]})
>>> df.shape
(2, 3)
"""
return len(self), len(self.columns)
def merge(self, right: 'DataFrame', how: str = 'inner', on: str = None,
left_index: bool = False, right_index: bool = False,
suffixes: Tuple[str, str] = ('_x', '_y')) -> 'DataFrame':
"""
Merge DataFrame objects with a database-style join.
Parameters
----------
right: Object to merge with.
how: Type of merge to be performed.
{‘left’, ‘right’, ‘outer’, ‘inner’}, default ‘inner’
left: use only keys from left frame, similar to a SQL left outer join; preserve key
order.
right: use only keys from right frame, similar to a SQL right outer join; preserve key
order.
outer: use union of keys from both frames, similar to a SQL full outer join; sort keys
lexicographically.
inner: use intersection of keys from both frames, similar to a SQL inner join;
preserve the order of the left keys.
on: Column or index level names to join on. These must be found in both DataFrames. If on
is None and not merging on indexes then this defaults to the intersection of the
columns in both DataFrames.
left_index: Use the index from the left DataFrame as the join key(s). If it is a
MultiIndex, the number of keys in the other DataFrame (either the index or a number of
columns) must match the number of levels.
right_index: Use the index from the right DataFrame as the join key. Same caveats as
left_index.
suffixes: Suffix to apply to overlapping column names in the left and right side,
respectively.
Returns
-------
DataFrame
A DataFrame of the two merged objects.
Examples
--------
>>> left_kdf = ks.DataFrame({'A': [1, 2]})
>>> right_kdf = ks.DataFrame({'B': ['x', 'y']}, index=[1, 2])
>>> left_kdf.merge(right_kdf, left_index=True, right_index=True)
A B
0 2 x
>>> left_kdf.merge(right_kdf, left_index=True, right_index=True, how='left')
A B
0 1 None
1 2 x
>>> left_kdf.merge(right_kdf, left_index=True, right_index=True, how='right')
A B
0 2.0 x
1 NaN y
>>> left_kdf.merge(right_kdf, left_index=True, right_index=True, how='outer')
A B
0 1.0 None
1 2.0 x
2 NaN y
Notes
-----
As described in #263, joining string columns currently returns None for missing values
instead of NaN.
"""
if on is None and not left_index and not right_index:
raise SparkPandasMergeError("At least 'on' or 'left_index' and 'right_index' have ",
"to be set")
if on is not None and (left_index or right_index):
raise SparkPandasMergeError("Only 'on' or 'left_index' and 'right_index' can be set")
if how == 'full':
warnings.warn("Warning: While Koalas will accept 'full', you should use 'outer' " +
"instead to be compatible with the pandas merge API", UserWarning)
if how == 'outer':
# 'outer' in pandas equals 'full' in Spark
how = 'full'
if how not in ('inner', 'left', 'right', 'full'):
raise ValueError("The 'how' parameter has to be amongst the following values: ",
"['inner', 'left', 'right', 'outer']")
if on is None:
# FIXME Move index string to constant?
on = '__index_level_0__'
left_table = self._sdf.alias('left_table')
right_table = right._sdf.alias('right_table')
# Unpack suffixes tuple for convenience
left_suffix = suffixes[0]
right_suffix = suffixes[1]
# Append suffixes to columns with the same name to avoid conflicts later
duplicate_columns = list(self.columns & right.columns)
if duplicate_columns:
for duplicate_column_name in duplicate_columns:
left_table = left_table.withColumnRenamed(duplicate_column_name,
duplicate_column_name + left_suffix)
right_table = right_table.withColumnRenamed(duplicate_column_name,
duplicate_column_name + right_suffix)
join_condition = (left_table[on] == right_table[on] if on not in duplicate_columns
else left_table[on + left_suffix] == right_table[on + right_suffix])
joined_table = left_table.join(right_table, join_condition, how=how)
if on in duplicate_columns:
# Merge duplicate key columns
joined_table = joined_table.withColumnRenamed(on + left_suffix, on)
joined_table = joined_table.drop(on + right_suffix)
# Remove auxiliary index
# FIXME Move index string to constant?
joined_table = joined_table.drop('__index_level_0__')
kdf = DataFrame(joined_table)
return kdf
def _pd_getitem(self, key):
from databricks.koalas.series import Series
if key is None:
raise KeyError("none key")
if isinstance(key, str):
try:
return Series(self._sdf.__getitem__(key), anchor=self,
index=self._metadata.index_map)
except AnalysisException:
raise KeyError(key)
if np.isscalar(key) or isinstance(key, (tuple, str)):
raise NotImplementedError(key)
elif isinstance(key, slice):
return self.loc[key]
if isinstance(key, (pd.Series, np.ndarray, pd.Index)):
raise NotImplementedError(key)
if isinstance(key, list):
return self.loc[:, key]
if isinstance(key, DataFrame):
# TODO Should not implement alignment, too dangerous?
return Series(self._sdf.__getitem__(key), anchor=self, index=self._metadata.index_map)
if isinstance(key, Series):
# TODO Should not implement alignment, too dangerous?
# It is assumed to be only a filter, otherwise .loc should be used.
bcol = key._scol.cast("boolean")
return DataFrame(self._sdf.filter(bcol), self._metadata.copy())
raise NotImplementedError(key)
def __repr__(self):
return repr(self.head(max_display_count).to_pandas())
def _repr_html_(self):
return self.head(max_display_count).to_pandas()._repr_html_()
def __getitem__(self, key):
return self._pd_getitem(key)
def __setitem__(self, key, value):
from databricks.koalas.series import Series
# For now, we don't support realignment against different dataframes.
# This is too expensive in Spark.
# Are we assigning against a column?
if isinstance(value, Series):
assert value._kdf is self, \
"Cannot combine column argument because it comes from a different dataframe"
if isinstance(key, (tuple, list)):
assert isinstance(value.schema, StructType)
field_names = value.schema.fieldNames()
kdf = self.assign(**{k: value[c] for k, c in zip(key, field_names)})
else:
kdf = self.assign(**{key: value})
self._sdf = kdf._sdf
self._metadata = kdf._metadata
def __getattr__(self, key: str) -> Any:
from databricks.koalas.series import Series
if key.startswith("__") or key.startswith("_pandas_") or key.startswith("_spark_"):
raise AttributeError(key)
if hasattr(_MissingPandasLikeDataFrame, key):
property_or_func = getattr(_MissingPandasLikeDataFrame, key)
if isinstance(property_or_func, property):
return property_or_func.fget(self) # type: ignore
else:
return partial(property_or_func, self)
return Series(self._sdf.__getattr__(key), anchor=self, index=self._metadata.index_map)
def __iter__(self):
return self.toPandas().__iter__()
def __len__(self):
return self._sdf.count()
def __dir__(self):
fields = [f for f in self._sdf.schema.fieldNames() if ' ' not in f]
return super(DataFrame, self).__dir__() + fields
@classmethod
def _validate_axis(cls, axis=0):
if axis not in (0, 1, 'index', 'columns', None):
raise ValueError('No axis named {0}'.format(axis))
# convert to numeric axis
return {None: 0, 'index': 0, 'columns': 1}.get(axis, axis)
def _reduce_spark_multi(sdf, aggs):
"""
Performs a reduction on a dataframe, the functions being known sql aggregate functions.
"""
assert isinstance(sdf, spark.DataFrame)
sdf0 = sdf.agg(*aggs)
l = sdf0.head(2)
assert len(l) == 1, (sdf, l)
row = l[0]
l2 = list(row)
assert len(l2) == len(aggs), (row, l2)
return l2
``` |
{
"source": "28757B2/homeassistant-nexus",
"score": 3
} |
#### File: 28757B2/homeassistant-nexus/__main__.py
```python
import argparse
import time
from cc1101.config import RXConfig, Modulation
from cc1101 import CC1101
from .common import FREQUENCY, BAUD_RATE, SYNC_WORD, PACKET_LENGTH, Message, decode_rx_bytes, message_vote
def rx(args: argparse.Namespace) -> None:
rx_config = RXConfig(FREQUENCY, Modulation.OOK, BAUD_RATE, SYNC_WORD, PACKET_LENGTH)
radio = CC1101(args.device, rx_config)
print("Receiving Packets")
while True:
for rx_bytes in radio.receive():
messages = []
for packet in decode_rx_bytes(rx_bytes):
try:
messages.append(Message.from_packet(packet))
except ValueError:
pass
if len(messages) > 0:
message = message_vote(messages)
print(message)
time.sleep(1)
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers()
rx_parser = subparsers.add_parser("rx", help="Receive packets")
rx_parser.add_argument("device", help='CC1101 Device')
rx_parser.set_defaults(func=rx)
args = parser.parse_args()
if "func" in args:
args.func(args)
else:
parser.print_help()
``` |
{
"source": "2877267169/Intelligent-Network-Rumor-Monitoring-System",
"score": 3
} |
#### File: Intelligent-Network-Rumor-Monitoring-System/analysis/analysis_processer.py
```python
from PyQt5.QtCore import QThread, pyqtSignal
from PyQt5.QtWidgets import QMessageBox
import set_page_corpus_connect
from analysis import analysis_start
import os, sys, json
def get_formatted_json_file(json_file_name: str):
"""
获取经过格式化的json
:param json_file_name:要获得的json文件,是 P.json, N.json, I.json, none.json中的一个。
:return:
"""
if os.path.isfile(
os.path.join(set_page_corpus_connect.available_path, json_file_name)
) is True:
with open(os.path.join(set_page_corpus_connect.available_path, json_file_name), 'r', encoding='utf-8') as f:
my_obj: dict = json.load(f)
for i in range(len(my_obj['date'])):
my_obj['date'][i] = str(my_obj['date'][i])[5:] # 此处去掉了年份信息,然而不能现在去掉
pass
return my_obj
else:
return {"date": ["error"], "data": [0]}
def get_P():
return get_formatted_json_file("P.json")
def get_N():
return get_formatted_json_file("N.json")
def get_I():
return get_formatted_json_file("I.json")
def get_none():
return get_formatted_json_file("none.json")
def get_4_percentage():
return [
get_formatted_json_file("P_percentage.json"),
get_formatted_json_file("N_percentage.json"),
get_formatted_json_file("I_percentage.json"),
get_formatted_json_file("none_percentage.json")
]
my_ana_path = ""
class AnaliseMessage(QThread):
s_send_analyse_process_bar = pyqtSignal(int)
s_send_analyse_process_text = pyqtSignal(str)
s_send_analyse_start_draw = pyqtSignal()
def f_send_my_analyse_process_bar(self, value: int):
self.s_send_analyse_process_bar.emit(value)
def f_send_text_message(self, my_text: str):
self.s_send_analyse_process_text.emit(my_text)
def set_path(self, path: str):
global my_ana_path
my_ana_path = path
def run(self):
global my_ana_path
if my_ana_path == "":
print("错误:未传入路径")
return
ana_start(my_ana_path)
analise_message = AnaliseMessage()
def ana_start(path=""):
if path == "":
print("error, 未传入路径")
return
print("分析进程已经开始")
"""
if os.path.isfile(
os.path.join(
path,
"P.json"
)
) is True:
r = QMessageBox.question(my_ui.my_page_train_tp_pushButtton_clear_and_set_to_default, "警告", '你确定要重置参数为默认参数吗?',
QMessageBox.Yes | QMessageBox.No, QMessageBox.No)"""
analise_message.f_send_text_message("正在准备开始")
# #########开始了!####### #
analysis_start.start(path)
print("分析结束!")
analise_message.s_send_analyse_start_draw.emit()
analise_message.f_send_my_analyse_process_bar(100)
#
# test_obj = {
# "date": [
# "06-01",
# "06-02",
# "06-03",
# "06-04",
# "06-05"
# ],
# "data": [
# 10,
# 2,
# 20,
# 10,
# 15
# ]
# }
# test_obj2 = {
# "date": [
# "06-01",
# "06-02",
# "06-03",
# "06-04",
# "06-05"
# ],
# "data": [
# 15,
# 10,
# 20,
# 2,
# 10
# ]
# }
```
#### File: Intelligent-Network-Rumor-Monitoring-System/data/bert_train_complex.py
```python
import os
from PyQt5.QtCore import QThread, pyqtSignal
from data.data_ops import Data_ops as Data
import set_page_corpus_connect
def get_file_context(file_path: str):
with open(file_path, 'r', encoding='utf-8') as f:
l = f.readlines()
res = ""
for s in l:
res += s
res.replace('\n', '')
line = res.split('\t')
res = (line[0] + '\t' + line[-1])
return res
def main_run(base_dir: str, is_train=False):
# base_dir = r"C:\Users\john\Desktop\repos\标注综合\顶替_大学"
data = Data(base_dir)
all_file_name = data.get_all_path()
cnt = 0
#为了保持兼容,总会新建文件,但是什么也不会写入。
f_train = open(os.path.join(base_dir, "bert_train.tsv"), 'w+', encoding='utf-8')
f_dev = open(os.path.join(base_dir, "bert_dev.tsv"), 'w+', encoding='utf-8')
f_test = open(os.path.join(base_dir, "bert_test.tsv"), 'w+', encoding='utf-8')
p_list = []
n_list = []
res_list = []
for file_path in all_file_name:
words = get_file_context(file_path=file_path).replace('\n', '')
# 获取训练标注(训练数据集生成)
if is_train is True:
label = data.get_file_text(
data.transforme_to_mark_file_path(base_dir=base_dir, file_path_list=[file_path])[0]
)
l = map(int, label.split('-'))
for i in l:
if i != 0:
label = '1'
else:
label = '0'
# 不训练,就用1填充
else:
label = '1'
if label == '1':
p_list.append("%s\t%s\n" % (label, words))
else:
n_list.append("%s\t%s\n" % (label, words))
p_len = len(p_list)
res_list += p_list
res_list += n_list[:int((p_len) * 2)]
res_list.sort(key=lambda x: x.split('\t')[2])
for line in res_list:
# 对应开头的为了保持兼容,总会新建文件,但是什么也不会写入。
if is_train is True:
f_train.write(line)
if cnt % 2 == 0:
f_test.write(line)
if cnt % 3 == 0:
f_dev.write(line)
else:
#将全部数据写入test
f_test.write(line)
if cnt % 3 == 0:
f_dev.write(line)
cnt += 1
if cnt % 100 == 0:
thread_train_comp.send_message(str(cnt))
print(cnt)
thread_train_comp.send_message("已完成,共处理 %d 条记录" % cnt)
print("已完成,共处理 %d 条记录" % cnt)
f_train.close()
f_test.close()
f_dev.close()
class Thread_train_comp(QThread):
my_send_message = pyqtSignal(str, str)
def set_args(self, base_dir: str, is_train:bool):
self.base_dir = base_dir
def send_message(self, message_str, end='\n'):
self.my_send_message.emit(message_str, end)
def run(self):
main_run(self.base_dir, set_page_corpus_connect.is_train_available)
thread_train_comp = Thread_train_comp()
```
#### File: 2877267169/Intelligent-Network-Rumor-Monitoring-System/set_page_corpus_connect.py
```python
import threading
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtWidgets import QFileDialog
import os, sys
import MainWindow
from data import transform_json_to_txt, data_ops
from data import bert_train_complex
import running_state
import json
# 修复打包的问题
import matplotlib
from main_window_run import my_app_data, my_app_img_dir
matplotlib.use("Agg")
# matplotlib 和qt链接的包
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
import matplotlib.pyplot as plt
is_run_available = False
is_train_available = False
is_path_available = False # 路径可用性
available_path = ""
# my_ui = MainWindow.Ui_MainWindow()
class PathPara():
def __init__(self):
self.json_file = 'json_file'
self.work_path = 'work_path'
self.dict = 'dict'
path_para = PathPara()
# 获得全部参数
def get_all_path():
global my_ui
my_data = data_ops.Data_ops(my_ui.my_page_corpus_lineEdit_workPath.text())
if my_data.test() is True:
my_sum = str(len(my_data.get_all_path()))
else:
my_sum = "NaN"
res = {
path_para.json_file: my_ui.my_page_corpus_lineEdit_from_json.text(),
path_para.work_path: my_ui.my_page_corpus_lineEdit_workPath.text(),
path_para.dict: my_ui.my_page_corpus_lineEdit_directory.text(),
"dataset_name": str(my_ui.my_page_corpus_lineEdit_from_json.text()).split('\\')[-1].split('/')[-1].replace(".json", ''),
"sum": my_sum
}
return res
# 用字典设置全部参数
def set_all_path(para: dict):
global my_ui
my_ui.my_page_corpus_lineEdit_from_json.setText(para[path_para.json_file])
my_ui.my_page_corpus_lineEdit_workPath.setText(para[path_para.work_path])
my_ui.my_page_corpus_lineEdit_directory.setText(para[path_para.dict])
def set_page_corpus_connect(ui: MainWindow.Ui_MainWindow):
global ax_bar
global my_ui
global FC_corpus # 画图组件
global fig
my_ui = ui
# 按钮
ui.my_page_corpus_button_fromJson.clicked.connect(get_user_input_and_set_to_json_lineEdit)
ui.my_page_corpus_button_workDir.clicked.connect(get_user_input_and_set_to_workDir)
ui.my_page_corpus_button_directionary.clicked.connect(get_user_input_and_set_to_directory_lineEdit)
ui.my_page_corpus_button_rewrite_graph.clicked.connect(save_png)
# 命令按钮
ui.my_page_corpus_commandLinkButton_verify.clicked.connect(verify_files)
ui.my_page_corpus_commandLinkButton_go_for_work_space.clicked.connect(create_work_space)
ui.my_page_corpus_commandLinkButton_train.clicked.connect(create_TSV_file)
# 文本框
ui.my_page_corpus_lineEdit_from_json.textChanged.connect(set_run_unavailable)
ui.my_page_corpus_lineEdit_directory.textChanged.connect(set_run_unavailable)
ui.my_page_corpus_lineEdit_workPath.textChanged.connect(set_run_unavailable)
# 消息传递
transform_json_to_txt.thread_transform_json_to_txt.my_send_message.connect(ptr_message)
transform_json_to_txt.thread_transform_json_to_txt.my_send_process_bar_message.connect(set_process_bar)
bert_train_complex.thread_train_comp.my_send_message.connect(ptr_message)
# 画板的消息传递
transform_json_to_txt.thread_transform_json_to_txt.my_send_dict_for_graph.connect(re_graph)
# 画板初始化
fig = plt.figure()
FC_corpus = FigureCanvas(fig)
ax_bar = fig.add_subplot(1, 1, 1)
# 大画板!!!!!!!!!!!!!!!!!!!!!!
my_ui.my_page_corpus_gridLayout.layout().addWidget(FC_corpus)
# 测试时使用,故删除
# my_ui.my_page_data_groupBox_for_graph.layout().addWidget(FigureCanvas(f))
if os.path.isfile(os.path.join(my_app_data, "corpus.json")) is True:
print("加载保存的路径...")
with open(os.path.join(my_app_data, "corpus.json"), 'r+', encoding='utf-8') as f:
paras = json.load(f)
set_all_path(para=paras)
def get_user_input_and_set_to_json_lineEdit():
global my_ui
file_path, file_type = QFileDialog.getOpenFileName(my_ui.my_page_corpus_lineEdit_from_json, "Select json file", '.',
"json file(*.json)")
my_ui.my_page_corpus_lineEdit_from_json.setText(file_path)
def get_user_input_and_set_to_workDir():
global my_ui
dir_path = QFileDialog.getExistingDirectory(my_ui.my_page_corpus_lineEdit_workPath, "Select work dir", '.')
my_ui.my_page_corpus_lineEdit_workPath.setText(dir_path)
# 注意,这里第一个页面也对第二个页面产生了影响
my_ui.my_page_train_lineEdit_data_dir.setText(dir_path)
def get_user_input_and_set_to_directory_lineEdit():
global my_ui
sentiment_path = QFileDialog.getExistingDirectory(my_ui.my_page_corpus_lineEdit_directory,
"Select Sentiment dir",
'.')
my_ui.my_page_corpus_lineEdit_directory.setText(sentiment_path)
def verify_files():
global my_ui
global is_run_available
global is_train_available
global available_path
json_file_path = my_ui.my_page_corpus_lineEdit_from_json.text()
work_path = my_ui.my_page_corpus_lineEdit_workPath.text()
dictionary = my_ui.my_page_corpus_lineEdit_directory.text()
if os.path.isfile(json_file_path) \
and os.path.isdir(work_path) \
and (os.path.isdir(dictionary) and os.path.isfile(os.path.join(dictionary, "sentiment.ini"))):
# 校验成功
ptr_message('校验成功')
# 保存数据
paras = get_all_path()
available_path = paras[path_para.work_path]
with open(os.path.join(my_app_data, "corpus.json"), 'w+', encoding='utf-8') as f:
json.dump(paras, f, ensure_ascii=False, indent=4)
is_run_available = True
if os.path.isdir(os.path.join(work_path, 'mark')) is True:
ptr_message('成功检测到数据集 <可供训练>')
my_ui.my_page_corpus_button_for_state.setText('数据集 (可供训练的)')
my_ui.my_page_corpus_button_for_state.setStyleSheet("background-color: rgb(170, 170, 255);")
is_train_available = True
else:
ptr_message('成功检测到数据集 <仅分析>')
my_ui.my_page_corpus_button_for_state.setText('数据集 (仅分析)')
my_ui.my_page_corpus_button_for_state.setStyleSheet("background-color: rgb(85, 170, 255);")
is_train_available = False
else:
ptr_message('校验失败!请检查路径')
my_ui.my_page_corpus_button_for_state.setText('未选择数据集')
my_ui.my_page_corpus_button_for_state.setStyleSheet("")
is_run_available = False
def set_run_unavailable():
# 在手动更改了框内的路径时被调用
global is_run_available
is_run_available = False
ptr_message('更改了路径后,应当重新校验。')
def ptr_message(mystr: str, end='\n'):
global my_ui
my_ui.my_page_corpus_textEdit.setText("%s%s%s" % (mystr, end, my_ui.my_page_corpus_textEdit.toPlainText()))
def set_process_bar(proc: int):
my_ui.my_page_corpus_ProcessBar.setValue(proc)
def create_work_space():
print('create_work_space')
global is_run_available
if is_run_available is False:
ptr_message("你的更改的内容没有进行校验,请先进行校验!")
return
global my_ui
work_path = my_ui.my_page_corpus_lineEdit_workPath.text()
json_path = my_ui.my_page_corpus_lineEdit_from_json.text()
transform_json_to_txt.thread_transform_json_to_txt.set_args(json_file_path=json_path, out_put_dir=work_path,
moudle_name='')
transform_json_to_txt.thread_transform_json_to_txt.start()
# 画板刷新!!!
def re_graph(d: dict):
"""
画板刷新
:param d: 所需的数据字典
:return: 无
"""
print("尝试刷新")
global ax_bar
global FC_corpus
ax_bar.cla()
keys = list(d.keys())
ax_bar.set_title("Frequency of Information")
# ax_bar.set_xticks(rotation=270)
for tick in ax_bar.get_xticklabels():
tick.set_rotation(300)
cops = []
for key in keys:
cops.append([key, d[key]])
cops.sort(key=lambda x: x[0])
key_value = []
value = []
for line in cops:
key_value.append(line[0])
value.append(line[1])
ax_bar.bar(key_value, value)
FC_corpus.draw()
save_png()
def create_TSV_file():
print('create tsv file')
global is_run_available
if is_run_available is False:
ptr_message("你的更改的内容没有进行校验,请先进行校验!")
return
if running_state.is_running is True:
ptr_message("你不应该在运行任务的时候进行其他任务")
return
base_dir = my_ui.my_page_corpus_lineEdit_workPath.text()
# if os.path.isdir(os.path.join(base_dir, 'mark')) is False:
# ptr_message('应该在标注之后才能生成训练文件!')
# return
is_train = my_ui.my_page_corpus_button_for_state.text().find('训练') >= 0
bert_train_complex.thread_train_comp.set_args(base_dir=base_dir, is_train=is_train)
bert_train_complex.thread_train_comp.start()
def save_png():
global fig
# my_app_img_dir
fig.savefig(os.path.join(my_app_img_dir, "corpus.png"))
print("saved corpus")
def re_draw():
"""
将语料设置内的画图画板重绘
:return: 无
"""
global my_ui
global FC_corpus # 画图组件
FC_corpus.draw()
save_png()
```
#### File: 2877267169/Intelligent-Network-Rumor-Monitoring-System/set_page_hot_connect.py
```python
import os
from PyQt5.QtWidgets import QFileDialog
import MainWindow
import wordcloud
# matplotlib 和qt链接的包
# 修复打包的问题
import matplotlib
from main_window_run import my_app_img_dir
matplotlib.use("Agg")
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
import matplotlib.pyplot as plt
from c_word_cloude import word_clouad_create
# 初始化画板
f = plt.figure()
# 画板组件
FC_hot = FigureCanvas(f)
# 小图
ax_wc_l = f.add_subplot(1, 2, 1)
ax_wc_r = f.add_subplot(1, 2, 2)
def set_page_connect(ui: MainWindow.Ui_MainWindow):
global ax_wc_r
global ax_wc_l
global my_ui
global FC_hot # 画图组件
global f
my_ui = ui
# 命令按钮
my_ui.my_page_hot_commandLinkButton_menu_run.clicked.connect(go_for_run_word_cloud)
my_ui.my_page_hot_rewrite_pushButton.clicked.connect(save_png) # 重写
my_ui.my_page_hot_saveas_pushButton.clicked.connect(save_as)
# 线程通信
word_clouad_create.word_cloude_create.send_graph_process_bar.connect(set_hot_process_bar)
# # 初始化画板
# f = plt.figure()
# FC_hot = FigureCanvas(f)
#
# # 小图
# ax_wc_l = f.add_subplot(1, 2, 1)
# ax_wc_r = f.add_subplot(1, 2, 2)
# # 大画板!!!!!!!!!!!!!!!!!!!!!!
my_ui.my_page_hot_gridLayout_for_graph.layout().addWidget(FC_hot)
word_clouad_create.word_cloude_create.send_cloude_dict.connect(set_cloud)
set_cloud({"等待分析": 1.0}, {"等待分析": 1.0})
def go_for_run_word_cloud():
word_clouad_create.word_cloude_create.set_path(my_ui.my_page_corpus_lineEdit_workPath.text())
word_clouad_create.word_cloude_create.start()
def set_cloud(a: dict, b: dict):
global ax_wc_r
global ax_wc_l
global FC_hot
wc1 = wordcloud.WordCloud(font_path='STXINGKA.TTF', height=460, width=500, background_color='white')
wc2 = wordcloud.WordCloud(font_path='STXINGKA.TTF', height=460, width=500, background_color='white')
wc1.generate_from_frequencies(a)
wc2.generate_from_frequencies(b)
ax_wc_l.set_title("Heat clouds of Message")
ax_wc_l.imshow(wc1)
ax_wc_l.axis('off')
ax_wc_r.set_title("Dispels the top 20 for subsidiary")
ax_wc_r.imshow(wc2)
ax_wc_r.axis('off')
FC_hot.draw()
save_png()
def set_hot_process_bar(setp_process: int):
global my_ui
my_ui.my_page_hot_progressBar.setValue(setp_process)
def save_png():
# my_app_img_dir
f.savefig(os.path.join(my_app_img_dir, "word_cloud.png"))
print("saved word cloud gph")
def save_as():
global my_ui
filename_choose, _ = QFileDialog.getSaveFileName(
my_ui.my_page_hot,
"文件保存",
'.', # 起始路径
"PNG Files (*.png)"
)
if filename_choose == "":
print("取消选择")
return
f.savefig(filename_choose)
```
#### File: 2877267169/Intelligent-Network-Rumor-Monitoring-System/set_page_warning_connect.py
```python
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtWidgets import QFileDialog, QMessageBox
import os, sys
import MainWindow
import set_page_corpus_connect
from analysis.analysis_processer import get_4_percentage
from analysis.read_from_bert import get_bert_res_to_json
from data import transform_json_to_txt
from data import bert_train_complex
import running_state
import json
from warning import wcalc
from analysis import analysis_processer
# matplotlib 和qt链接的包
# 修复打包的问题
import matplotlib
from main_window_run import my_app_img_dir
from set_page_data_analyse_connect import obj_sort
from set_page_train_connect import get_all_file_parameters, file_parameters
matplotlib.use("Agg")
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
import matplotlib.pyplot as plt
# 画图对象
my_page_warning_fig = plt.figure()
# 利用对象构建组件
my_page_warning_FC = FigureCanvas(my_page_warning_fig)
# 真正画图的东西
my_page_warning_ax = my_page_warning_fig.add_subplot(1, 1, 1)
def set_warning_connect(ui: MainWindow.Ui_MainWindow):
global my_ui
my_ui = ui
# 设置信号与槽的链接
ui.my_page_warning_start_commandLinkButton.clicked.connect(start_to_warning)
# 将画板加入组件
ui.my_page_warning_graph.layout().addWidget(my_page_warning_FC)
init_graph()
def init_graph():
zero_obj = {"date": [0], "data": [0]}
draw_graph(zero_obj, zero_obj, zero_obj, zero_obj, zero_obj)
def draw_graph(P: dict, N: dict, I: dict, none: dict, bert_res=None):
if bert_res is None:
bert_res = {"date": [0], "data": [0]}
base_dir = my_app_img_dir
my_page_warning_ax.cla()
my_page_warning_ax.set_title("Normalized data analysis")
p_x = list(range(len(P["date"])))
my_page_warning_ax.plot(p_x, P["data"], linestyle='none', marker='P', color="orange", label="Positive Value")
my_page_warning_ax.plot(p_x, N["data"], linestyle='none', marker='P', color="blue", label="Negative Value")
my_page_warning_ax.plot(p_x, I["data"], linestyle='none', marker='x', color="red", label="Intensity Value")
my_page_warning_ax.plot(p_x, none["data"], linestyle='none', marker='x', color="lightseagreen",
label="Tiny Attitude Value")
my_page_warning_ax.plot(bert_res["date"], bert_res["data"], linestyle='none', marker='.',
color="green", label="BERT Model res")
for tick in my_page_warning_ax.get_xticklabels():
tick.set_rotation(300)
my_page_warning_ax.legend()
my_page_warning_FC.draw()
my_page_warning_fig.savefig(os.path.join(base_dir, "warning.png"))
print("warning saved")
def loud_from_file_to_graph():
return get_4_percentage()
def start_to_warning():
global my_ui
print("分析命令开始")
paras = get_all_file_parameters()
work_path = set_page_corpus_connect.available_path
res_path = os.path.join(paras[file_parameters.output_dir], "test_results.tsv")
# 画图
my_ui.my_page_warning_progressBar.setValue(20)
if os.path.isfile(res_path) is False:
print("找不到训练结果文件!%s" % res_path)
QMessageBox.critical(
my_ui.stackedWidget,
'错误',
"\"%s\"\n找不到训练结果文件!" % res_path,
QMessageBox.Close
)
my_ui.my_page_warning_progressBar.setValue(0)
return
if os.path.isdir(work_path) is False:
print("工作路径有问题,请检查是否已完成语料设置%s" % work_path)
QMessageBox.critical(
my_ui.stackedWidget,
'错误',
"\"%s\"\n工作路径有问题,请检查是否已完成语料设置" % work_path,
QMessageBox.Close
)
my_ui.my_page_warning_progressBar.setValue(0)
return
l: list = loud_from_file_to_graph()
get_bert_res_to_json(work_path=set_page_corpus_connect.available_path,
bert_train_res_file_path=os.path.join(paras[file_parameters.output_dir], "test_results.tsv"))
if os.path.isfile(os.path.join(work_path, "toal.json")) is True:
print("成功检测到结果文件 %s ,现在画图" % os.path.join(work_path, "toal.json"))
with open(os.path.join(work_path, "toal.json"), 'r', encoding='utf-8') as f:
my_obj = json.load(f)
my_out_obj = obj_sort({"date": my_obj[0], "data": my_obj[1]})
tmp = ""
count = 0
for i in range(len(my_out_obj["date"])):
if my_out_obj["date"][i] != tmp:
tmp = my_out_obj["date"][i]
my_out_obj["date"][i] = count
count += 1
else:
my_out_obj["date"][i] = count
a = list(set(my_out_obj["date"]))
draw_graph(l[0], l[1], l[2], l[3], my_out_obj)
# return 此处不返回
else:
QMessageBox.critical(
my_ui.stackedWidget,
'错误',
"执行完毕,但%s生成存在问题。" % os.path.join(work_path, "toal.json"),
QMessageBox.Close
)
print("执行完毕,但%s生成存在问题。" % os.path.join(work_path, "toal.json"))
my_ui.my_page_warning_progressBar.setValue(0)
return
# 进行百分比趋势分析
my_ui.my_page_warning_progressBar.setValue(50)
wcalc.warning_start(n=l[1]["data"], i=l[2]["data"], work_path=work_path)
if os.path.isfile(os.path.join(work_path, "warning.json")) is False:
QMessageBox.critical(
my_ui.stackedWidget,
'错误',
"执行完毕,但%s的生成存在问题。" % os.path.join(work_path, "warning.json"),
QMessageBox.Close
)
my_ui.my_page_warning_progressBar.setValue(0)
return
my_w_out: list = wcalc.get_obj(work_path=work_path)
res_obj = wcalc.go_for_str(my_w_out)
my_ui.my_page_warning_see_textEdit.setText("%s%s%s%s" % (res_obj[0], res_obj[1], res_obj[2], res_obj[3]))
my_ui.my_page_warninig_message_text_edit.setText("%s" % res_obj[4])
my_ui.my_page_warning_progressBar.setValue(100)
return
``` |
{
"source": "287977288/test",
"score": 2
} |
#### File: newsite/newsite/view.py
```python
from django.shortcuts import render
def hello(request):
context = {}
context['hello'] = 'Hello World!'
print("view.hello")
return render(request, 'hello.html', context)
``` |
{
"source": "2890841438/fast-index.py",
"score": 3
} |
#### File: fast-index.py/utils/DateEncoder.py
```python
import json
import datetime
class DateEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, datetime.datetime):
return obj.strftime('%Y-%m-%d %H:%M:%S')
elif isinstance(obj, datetime.date):
return obj.strftime("%Y-%m-%d")
else:
return json.JSONEncoder.default(self, obj)
``` |
{
"source": "2890841438/index.py",
"score": 3
} |
#### File: index.py/indexpy/concurrency.py
```python
import asyncio
import functools
import inspect
import typing
from starlette.concurrency import run_in_threadpool
def complicating(func: typing.Callable) -> typing.Callable[..., typing.Awaitable]:
"""
always return a awaitable callable object
"""
if asyncio.iscoroutinefunction(func):
return func
if not (inspect.isfunction(func) or inspect.ismethod(func)):
if inspect.isclass(func):
# class that has `__await__` method
if hasattr(func, "__await__"):
return func
else:
# callable object
if asyncio.iscoroutinefunction(getattr(func, "__call__")):
return func
@functools.wraps(func)
async def wrapper(*args, **kwargs) -> typing.Any:
return await run_in_threadpool(func, *args, **kwargs)
return wrapper
def keepasync(*args: str) -> typing.Callable[..., object]:
"""
Ensure that the specified method must be an asynchronous function
example:
class T(metaclass=keepasync("a", "b")):
def a(self):
pass
async def b(self):
pass
"""
class AlwaysAsyncMeta(type):
def __new__(
cls: type,
clsname: str,
bases: typing.Tuple[type],
namespace: typing.Dict[str, typing.Any],
):
for name in args:
if name not in namespace:
continue
namespace[name] = complicating(namespace[name])
return type.__new__(cls, clsname, bases, namespace)
return AlwaysAsyncMeta
```
#### File: indexpy/websocket/view.py
```python
import json
import typing
from starlette import status
from ..types import Literal, Message
from .request import WebSocket
class SocketView:
encoding: Literal["text", "bytes", "json"] = "json"
def __init__(self, websocket: WebSocket) -> None:
self.websocket = websocket
def __await__(self):
return self.__impl__().__await__()
async def __impl__(self) -> None:
try:
close_code = status.WS_1000_NORMAL_CLOSURE
await self.on_connect()
while True:
message = await self.websocket.receive()
if message["type"] == "websocket.receive":
data = await self.decode(message)
await self.on_receive(data)
elif message["type"] == "websocket.disconnect":
close_code = int(message.get("code", status.WS_1000_NORMAL_CLOSURE))
break
except Exception as exc:
close_code = status.WS_1011_INTERNAL_ERROR
raise exc from None
finally:
await self.on_disconnect(close_code)
async def decode(self, message: Message) -> typing.Any:
if self.encoding == "text":
if "text" not in message:
await self.websocket.close(code=status.WS_1003_UNSUPPORTED_DATA)
raise RuntimeError("Expected text websocket messages, but got bytes")
return message["text"]
if self.encoding == "bytes":
if "bytes" not in message:
await self.websocket.close(code=status.WS_1003_UNSUPPORTED_DATA)
raise RuntimeError("Expected bytes websocket messages, but got text")
return message["bytes"]
if self.encoding == "json":
if message.get("text") is not None:
text = message["text"]
else:
text = message["bytes"].decode("utf-8")
try:
return json.loads(text)
except json.decoder.JSONDecodeError:
await self.websocket.close(code=status.WS_1003_UNSUPPORTED_DATA)
raise RuntimeError("Malformed JSON data received.")
assert (
self.encoding is None
), f"Unsupported 'encoding' attribute {self.encoding}"
return message["text"] if message.get("text") else message["bytes"]
async def on_connect(self) -> None:
"""Override to handle an incoming websocket connection"""
await self.websocket.accept()
async def on_receive(self, data: typing.Any) -> None:
"""Override to handle an incoming websocket message"""
async def on_disconnect(self, close_code: int) -> None:
"""Override to handle a disconnecting websocket"""
await self.websocket.close(code=close_code)
```
#### File: tests/routing/test_tree.py
```python
from decimal import Decimal
from uuid import UUID
import pytest
from indexpy.routing.tree import RadixTree
@pytest.fixture
def tree():
tree = RadixTree()
tree.append("/hello/{time:int}", ...)
tree.append("/hello", ...)
tree.append("/hello/world", ...)
tree.append("/sayhi/{name}", ...)
tree.append("/sayhi/{name}/suffix", ...)
tree.append("/sayhi/{name}/avatar.{suffix}", ...)
tree.append("/path/{filepath:path}", ...)
tree.append("/decimal/{number:decimal}", ...)
tree.append("/uuid/{id:uuid}", ...)
return tree
@pytest.mark.parametrize(
"path,params",
[
("/hello", {}),
("/hello/world", {}),
("/hello/123", {"time": 123}),
("/sayhi/aber", {"name": "aber"}),
("/sayhi/aber/suffix", {"name": "aber"}),
("/sayhi/aber/avatar.png", {"name": "aber", "suffix": "png"}),
("/path/adsf", {"filepath": "adsf"}),
("/path/adsf/123", {"filepath": "adsf/123"}),
("/decimal/1.111", {"number": Decimal("1.111")}),
(
"/uuid/123e4567-e89b-12d3-a456-426655440000",
{"id": UUID("123e4567-e89b-12d3-a456-426655440000")},
),
],
)
def test_tree_success_search(tree: RadixTree, path, params):
result = tree.search(path)
assert result is not None
params, node = result
assert params == params
@pytest.mark.parametrize(
"path",
["", "/hello/", "/hello/world/", "/sayhi/aber/avatar"],
)
def test_tree_fail_search(tree: RadixTree, path):
assert tree.search(path)[0] is None, f"error in {path}"
@pytest.mark.parametrize(
"path",
[
"/path/{urlpath:path}/",
"/sayhi/{name:int}/suffix",
"/sayhi/{hi}/suffix",
"/hello",
"a",
],
)
def test_tree_fail_add(tree: RadixTree, path):
with pytest.raises(ValueError):
tree.append(path, ...)
def test_tree_iterator(tree: RadixTree):
for _0, _1 in zip(
tree.iterator(),
[
("/hello", ...),
("/hello/{time}", ...),
("/hello/world", ...),
("/sayhi/{name}", ...),
("/sayhi/{name}/suffix", ...),
("/sayhi/{name}/avatar.{suffix}", ...),
("/path/{filepath}", ...),
("/decimal/{number}", ...),
("/uuid/{id}", ...),
],
):
assert _0 == _1
```
#### File: index.py/tests/test_concurrency.py
```python
import asyncio
import pytest
from indexpy.concurrency import complicating, keepasync
@pytest.mark.asyncio
async def test_complicating_0():
class AsyncCall:
async def __call__(self):
pass
await AsyncCall()()
asyncfunc = AsyncCall()
assert complicating(asyncfunc) is asyncfunc
@pytest.mark.asyncio
async def test_complicating_1():
class AsyncClass:
def __await__(self):
return self.dispatch().__await__()
async def dispatch(self):
pass
await AsyncClass()
assert complicating(AsyncClass) == AsyncClass
await AsyncClass().dispatch()
assert complicating(AsyncClass.dispatch) is AsyncClass.dispatch
@pytest.mark.asyncio
async def test_complicating_2():
async def async_func():
pass
await async_func()
assert complicating(async_func) is async_func
@pytest.mark.asyncio
async def test_complicating_3():
@asyncio.coroutine
def t():
pass
await t()
assert complicating(t) is t
@pytest.mark.asyncio
async def test_complicating_4():
def func():
"""t"""
await complicating(func)()
assert asyncio.iscoroutinefunction(complicating(func))
assert complicating(func).__name__ == func.__name__
assert complicating(func).__doc__ == func.__doc__
@pytest.mark.asyncio
async def test_complicating_5():
args = ("hello", "test")
class AlwaysAsyncMeta(type):
def __new__(cls: type, clsname, bases, namespace):
for name in args:
if name not in namespace:
continue
if name == "test":
assert namespace[name] is complicating(namespace[name])
namespace[name] = complicating(namespace[name])
return type.__new__(cls, clsname, bases, namespace)
class Test(metaclass=AlwaysAsyncMeta):
def hello(self):
pass
async def test(self):
pass
@pytest.mark.asyncio
async def test_keepasync():
class Test(metaclass=keepasync("hello", "test")):
def hello(self):
pass
async def test(self):
pass
await Test().hello()
await Test().test()
assert Test.test.__name__ == "test"
@pytest.mark.asyncio
async def test_keepasync_subclass():
class Base(metaclass=keepasync("hello", "test")):
def hello(self):
pass
class Sub(Base):
def test(self):
pass
await Sub().hello()
await Sub().test()
assert Sub.test.__name__ == "test"
```
#### File: index.py/tests/test_conf.py
```python
import pytest
from indexpy.conf import serve_config, ConfigError, UpperDict
def test_config():
assert isinstance(serve_config.DEBUG, bool)
assert isinstance(serve_config.PORT, int)
def test_upper_dict():
ud = UpperDict({"a": 1, "s": {"a": 2}})
assert ud["A"] == 1
assert ud["S"]["A"] == 2
def test_edit():
with pytest.raises(ConfigError):
del serve_config.DEBUG
with pytest.raises(ConfigError):
del serve_config["DEBUG"]
with pytest.raises(ConfigError):
serve_config.DEBUG = True
``` |
{
"source": "2892211452/JK-DM",
"score": 2
} |
#### File: JK-DM/cmdb/views.py
```python
from django.shortcuts import render
from django.shortcuts import HttpResponse
from django.http import JsonResponse
from django import forms
from cmdb import models
from django.views.decorators.csrf import csrf_exempt
@csrf_exempt
def upload(request):
if request.method=="POST":
file_obj = request.FILES.get("file")
test = request.FILES.get('type')
print(test)
print(file_obj)
with open(file_obj.name,'wb') as f:
for line in file_obj:
f.write(line)
return HttpResponse("ok")
return HttpResponse("error")
# Create your views here.
def bili(request):
url = request.GET.get('url',default='110')
ans = '更改后'+url
return HttpResponse(ans)
def main(request):
# num = num+1
# print(num)
return render(request, "main.html", )
def login(request):
if request.method == "POST":
username = request.POST.get("username", None)
password = request.POST.get("password", None)
test = "test"
# 添加数据到数据库
models.UserInfo.objects.create(user=username, pwd=password, test=test)
# 从数据库读取数据
user_list = models.UserInfo.objects.all()
return render(request, "login.html", {"data": user_list})
def test(request):
return HttpResponse("hello kjgjhjh world")
```
#### File: JK-DM/yh/load.py
```python
import numpy
import sys
sys.path.append("/home/lwl/Study/code/Python/django/mysite-master/yh")
def save(list, path, ):
numpy.save('/home/lwl/Study/code/Python/django/mysite-master/yh/npy/' +path+".npy",list)
def load(list, path):
a = numpy.load('/home/lwl/Study/code/Python/django/mysite-master/yh/npy/' +path+".npy")
a = a.tolist()
list = []
for item in a:
list.append(['http://www.imomoe.io'+item[0][0], item[1][0], item[2][0]])
return(list)
# list=[]
# list =load(list, '热血')
# print(list)
```
#### File: JK-DM/yh/pamp4.py
```python
import requests
def get_redirect_url():
# 重定向前的链接
url = "http://quan.qq.com/video/1098_7e9941a34296e2372f4b565c280ebe1c"
# 请求头,这里我设置了浏览器代理
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36'}
# 请求网页
response = requests.get(url, headers=headers)
print(response.status_code) # 打印响应的状态码
print(response.url) # 打印重定向后的网址
# 返回重定向后的网址
return response.url
if __name__ == '__main__':
redirect_url = get_redirect_url()
``` |
{
"source": "2892211452/myPackage",
"score": 4
} |
#### File: lwlPackage/PathPro/path.py
```python
import os
#当前文件目录路径
FileDir = os.path.abspath(os.path.dirname(__file__))
#读取某个目录中的所有文件名称
def getPathFile(path):
ans = []
for i,j,k in os.walk(path): #三个数据分别是当前路径,文件夹名称数组, 文件数组
ans.append([i, k])
#print(i,j,k)
return ans
if __name__=="__main__":
print( '***获取当前目录***')
print( os.getcwd())
print( os.path.abspath(os.path.dirname(__file__)))
print( '***获取上级目录***')
print( os.path.abspath(os.path.dirname(os.path.dirname(__file__))))
print( os.path.abspath(os.path.dirname(os.getcwd())))
print( os.path.abspath(os.path.join(os.getcwd(), "..")))
print( '***获取上上级目录***')
print( os.path.abspath(os.path.join(os.getcwd(), "../..")))
``` |
{
"source": "2892211452/SXCsuOntOf",
"score": 3
} |
#### File: python/flask/flaskApi.py
```python
from flask import Flask
import json
from flask_cors import CORS # 跨域
from epicSpider import *
import pickle
#设置递归深度,为了解决数据存储问题
import sys
sys.setrecursionlimit(10000)
app = Flask(__name__)
from path import *
# enable CORS
CORS(app)
@app.route('/getEpicFreeGame')
def getEpicFreeGame():
games = {
'content':getEpic(),
'status':200
}
games = json.dumps(games)
return games
#这里是js请求
# var data = null;
# var xhr = new XMLHttpRequest();
# xhr.withCredentials = true;
# xhr.addEventListener("readystatechange", function () {
# if (this.readyState === 4) {
# console.log(this.responseText);
# }
# });
# xhr.open("GET", "http://127.0.0.1:5000/getEpicFreeGame");
# xhr.setRequestHeader("cache-control", "no-cache");
# xhr.setRequestHeader("postman-token", "<PASSWORD>");
# xhr.send(data);
@app.route('/getNews')
def getNews():
output = open(FileDir+'/../newspider/data.pkl', 'rb')
data=pickle.load(output)
output.close()
news = {
'content':data,
'status':200
}
print(news)
news = json.dumps(news)
return news
# var data = null;
# var xhr = new XMLHttpRequest();
# xhr.withCredentials = true;
# xhr.addEventListener("readystatechange", function () {
# if (this.readyState === 4) {
# console.log(this.responseText);
# }
# });
# xhr.open("GET", "http://127.0.0.1:5000/getNews");
# xhr.setRequestHeader("cache-control", "no-cache");
# xhr.setRequestHeader("postman-token", "<PASSWORD>");
# xhr.send(data);
if __name__ == '__main__':
app.debug = True
app.run()
``` |
{
"source": "289371298/covid-xprize",
"score": 3
} |
#### File: covid_xprize/validation/predictor_validation.py
```python
import argparse
import itertools
from typing import List
import logging
import numpy as np
import pandas as pd
logging.basicConfig(
format='%(asctime)s %(name)-20s %(levelname)-8s %(message)s',
level=logging.INFO,
datefmt='%Y-%m-%d %H:%M:%S'
)
LOGGER = logging.getLogger('predictor_validation')
PREDICTED_DAILY_NEW_CASES = "PredictedDailyNewCases"
COLUMNS = {"CountryName",
"RegionName",
"Date",
PREDICTED_DAILY_NEW_CASES}
def validate_submission(start_date: str,
end_date: str,
ip_file: str,
submission_file: str) -> List[str]:
"""
Checks a prediction submission file is valid.
Args:
start_date: the submission start date as a string, format YYYY-MM-DDD
end_date: the submission end date as a string, format YYYY-MM-DDD
ip_file: path to a file-like object
submission_file: path to a file-like object
Returns: a list of string messages if errors were detected, an empty list otherwise
"""
pred_df = pd.read_csv(submission_file,
parse_dates=['Date'],
encoding="ISO-8859-1",
dtype={"RegionName": str},
error_bad_lines=True)
ip_df = pd.read_csv(ip_file,
parse_dates=['Date'],
encoding="ISO-8859-1",
dtype={"RegionName": str},
error_bad_lines=True)
all_errors = []
# Check we got the expected columns
all_errors += _check_columns(COLUMNS, pred_df)
if not all_errors:
# Columns are good, check we got prediction for each requested country / region
all_errors += _check_geos(ip_df, pred_df)
# Check the values in PredictedDailyNewCases
all_errors += _check_prediction_values(pred_df)
# Check the prediction dates are correct
all_errors += _check_days(start_date, end_date, pred_df)
return all_errors
def _check_columns(expected_columns, pred_df):
errors = []
# Make sure each column is present
missing_columns = expected_columns - set(pred_df.columns)
if missing_columns:
errors.append(f"Missing columns: {missing_columns}")
return errors
# Make sure column PredictedDailyNewCases contains numbers
column_type = pred_df[PREDICTED_DAILY_NEW_CASES].dtype
if not np.issubdtype(column_type, np.number):
errors.append(f"Column {PREDICTED_DAILY_NEW_CASES} contains non numerical values: {column_type}")
return errors
return errors
def _check_prediction_values(df):
# Make sure the column containing the predictions is there
errors = []
if PREDICTED_DAILY_NEW_CASES in df.columns:
if df[PREDICTED_DAILY_NEW_CASES].isnull().values.any():
errors.append(f"Column {PREDICTED_DAILY_NEW_CASES} contains NaN values")
if any(df[PREDICTED_DAILY_NEW_CASES] < 0):
errors.append(f"Column {PREDICTED_DAILY_NEW_CASES} contains negative values")
return errors
def _check_geos(ip_df, pred_df):
errors = []
_add_geoid_column(ip_df)
_add_geoid_column(pred_df)
requested_geo_ids = set(ip_df.GeoID.unique())
actual_geo_ids = set(pred_df.GeoID.unique())
# Check if any missing
# Additional geos are OK, but predictions should at least include requested ones
missing_geos = requested_geo_ids - actual_geo_ids
if missing_geos:
errors.append(f"Missing countries / regions: {missing_geos}")
return errors
def _add_geoid_column(df):
# Add GeoID column that combines CountryName and RegionName for easier manipulation of data
# np.where usage: if A then B else C
df["GeoID"] = np.where(df["RegionName"].isnull(),
df["CountryName"],
df["CountryName"] + ' / ' + df["RegionName"])
def _check_days(start_date, end_date, df):
errors = []
_add_geoid_column(df)
# Sort by geo and date
df.sort_values(by=["GeoID", "Date"], inplace=True)
# Convert the dates
start_date = pd.to_datetime(start_date, format='%Y-%m-%d')
end_date = pd.to_datetime(end_date, format='%Y-%m-%d')
num_days = (end_date - start_date).days + 1
expected_dates = [start_date + pd.offsets.Day(i) for i in range(num_days)]
# Get the geo names
geo_ids = list(df.GeoID.unique())
for geo_id in geo_ids:
pred_dates = df[df.GeoID == geo_id].Date
for expected_date, pred_date in itertools.zip_longest(expected_dates, pred_dates, fillvalue=None):
if not expected_date == pred_date:
errors.append(f"{geo_id}: Expected prediction for date "
f"{expected_date.strftime('%Y-%m-%d') if expected_date is not None else None}"
f" but got {pred_date.strftime('%Y-%m-%d') if pred_date is not None else None}")
return errors
def do_main():
parser = argparse.ArgumentParser()
parser.add_argument("-s", "--start_date",
dest="start_date",
type=str,
required=False,
default="2020-12-22",
help="Start date from which to apply the scenario"
"Format YYYY-MM-DD. For example 2020-12-22")
parser.add_argument("-e", "--end_date",
dest="end_date",
type=str,
required=False,
default="2021-06-19",
help="Last date of the scenario"
"Format YYYY-MM-DD. For example 2021-06-19")
parser.add_argument("-ip", "--interventions_plan",
dest="ip_file",
type=str,
required=True,
help="The path to an intervention plan .csv file")
parser.add_argument("-f", "--submission_file",
dest="submission_file",
type=str,
required=True,
help="Path to the filename containing the submission (predictions) to be validated.")
args = parser.parse_args()
submission_file = args.submission_file
start_date = args.start_date
end_date = args.end_date
ip_file = args.ip_file
LOGGER.info(f"Validating submissions file {submission_file} "
f"start date {start_date} end date {end_date} intervention plan {ip_file}")
errors = validate_submission(start_date, end_date, ip_file, submission_file)
if not errors:
LOGGER.info(f'{submission_file} submission passes validation')
else:
LOGGER.warning(f'Submission {submission_file} has errors: ')
LOGGER.warning('\n'.join(errors))
LOGGER.info(f"Done!")
if __name__ == '__main__':
do_main()
``` |
{
"source": "28harishkumar/django-diary",
"score": 2
} |
#### File: django-diary/diary/views.py
```python
from django.shortcuts import render , render_to_response, RequestContext, get_object_or_404
from django.http import HttpResponseRedirect, HttpResponse , Http404
from django.core.urlresolvers import reverse
from django.utils.decorators import method_decorator
from django.contrib.auth.decorators import login_required
from django.views.generic import View
from diary.models import Diary, Entry
from diary.forms import DiaryForm, EntryForm, DiaryEditForm
class DiaryList(View):
@method_decorator(login_required)
def get(self, request):
diaries = Diary.objects.filter(user = request.user)
return render_to_response('diary/home.html',{
'diaries': diaries},
context_instance=RequestContext(request))
class NewDiary(View):
@method_decorator(login_required)
def get(self, request):
return render_to_response('diary/new.html',
context_instance = RequestContext(request))
@method_decorator(login_required)
def post(self,request):
name = request.POST.get('name',None)
if name == None:
raise Http404()
data = request.POST.copy()
try:
diary = Diary.objects.get(name = name)
raise Http404()
except:
pass
data['user'] = request.user.id
form = DiaryForm(data)
if(form.is_valid()):
form.save()
diary_id = Diary.objects.get(name=data['name']).id
return HttpResponseRedirect(reverse('diary_home',kwargs={'diary_id':diary_id}))
else:
raise Http404()
class EditDiary(View):
@method_decorator(login_required)
def get(self, request, diary_id):
diary = Diary.objects.get(pk = diary_id, user = request.user.id)
return render_to_response('diary/new.html',
{'diary': diary},
context_instance = RequestContext(request))
@method_decorator(login_required)
def post(self, request, diary_id):
name = request.POST.get('name',None)
if name == None:
raise Http404()
data = request.POST.copy()
try:
diary = Diary.objects.get(pk = diary_id)
except:
raise Http404()
if name != diary.name:
try:
diary2 = Diary.objects.get(name = name)
if diary2.id != diary.id:
raise Http404()
except:
pass
form = DiaryEditForm(data, instance = diary)
if(form.is_valid()):
form.save()
return HttpResponseRedirect(reverse('diary_home',kwargs={'diary_id':diary.id}))
else:
raise Http404()
class DeleteDiary(View):
@method_decorator(login_required)
def post(self, request, diary_id):
try:
diary = Diary.objects.get(pk = diary_id, user = request.user.id)
except:
raise Http404()
try:
diary.delete()
return HttpResponseRedirect('/')
except:
raise Http404()
class DiaryHome(View):
@method_decorator(login_required)
def get(self, request, diary_id):
diary = Diary.objects.get(pk = diary_id)
if diary.user != request.user:
raise Http404()
entries = Entry.objects.filter(diary = diary_id).order_by('-timestamp')
return render_to_response('diary/entrylist.html',{
'diary': diary,
'entries': entries},
context_instance=RequestContext(request))
class EntryView(View):
@method_decorator(login_required)
def get(self, request, diary_id, entry_id):
diary = Diary.objects.get(pk = diary_id)
if(diary.user != request.user):
raise Http404()
entry = Entry.objects.get(pk = entry_id, diary = diary.id)
return render_to_response('diary/entry.html',{
'diary': diary,
'entry': entry
},
context_instance = RequestContext(request))
@method_decorator(login_required)
def post(self, request, diary_id, entry_id):
diary = Diary.objects.get(pk = diary_id)
print('reached here 1')
if(diary.user != request.user):
raise Http404()
entry = Entry.objects.get(pk = entry_id)
form = EntryForm(request.POST, instance = entry)
if(form.is_valid()):
form.save()
return HttpResponseRedirect(reverse('diary_entry',kwargs={'diary_id':diary_id, 'entry_id': entry_id}))
else:
raise Http404()
class NewEntry(View):
@method_decorator(login_required)
def get(self, request, diary_id):
diary = Diary.objects.get(pk = diary_id)
if(diary.user != request.user):
raise Http404()
return render_to_response('diary/entry.html',{
'diary': diary,
},
context_instance = RequestContext(request))
@method_decorator(login_required)
def post(self,request, diary_id):
diary = Diary.objects.get(pk = diary_id)
if(diary.user != request.user):
raise Http404()
data = request.POST.copy()
data['diary'] = diary.id
form = EntryForm(data)
if(form.is_valid()):
form.save()
entry_id = Entry.objects.get(title=data['title']).id
return HttpResponseRedirect(reverse('diary_entry',kwargs={'diary_id':diary_id, 'entry_id': entry_id}))
else:
raise Http404()
class DeleteEntry(View):
@method_decorator(login_required)
def post(self, request, entry_id):
try:
entry = Entry.objects.get(pk = entry_id)
except:
raise Http404()
diary = entry.diary
if diary.user.id != request.user.id:
raise Http404()
try:
entry.delete()
return HttpResponseRedirect(reverse('diary_home',kwargs={ 'diary_id' : diary.id }))
except:
return HttpResponse('fail')
class SearchEntry(View):
@method_decorator(login_required)
def get(self, request):
search = request.GET.get('s',None)
if search == None:
return HttpResponseRedirect('/')
diaries = request.user.diary_set.all()
entries = Entry.objects.filter(title__icontains = search, diary__in = diaries).order_by('-timestamp')
return render_to_response('diary/searchlist.html',{
'entries': entries},
context_instance=RequestContext(request))
``` |
{
"source": "28harishkumar/Social-website-django",
"score": 2
} |
#### File: Social-website-django/post/forms.py
```python
from django.forms import ModelForm
from django import forms
from post.models import Post
class PostCreateForm(ModelForm):
image_types = ('jpg','png','jpeg')
video_types = ('mp4','flv','avi')
audio_types = ('mp3',)
file_types = ('pdf','cvs','txt')
blocked_types = ('exe','sh')
class Meta:
model = Post
fields = ['user','status','attachment','attachment_type','root_post']
exclude = ['like','favorite']
def clean_attachment_type(self):
attachment = self.cleaned_data['attachment']
attachment_type = self.cleaned_data['attachment_type']
if attachment:
ext = attachment.name.split('.')[-1]
if(ext in self.image_types):
attachment_type = 'image'
elif(ext in self.video_types):
attachment_type = 'video'
elif(ext in self.audio_types):
attachment_type = 'audio'
elif(ext in self.file_types):
attachment_type = 'file'
else:
raise forms.ValidationError('Invalid type attachment')
return attachment_type
def clean(self):
super(PostCreateForm, self).clean()
attachment = self.cleaned_data['attachment']
status = self.cleaned_data['status']
root_post = self.cleaned_data['root_post']
if attachment == None and status.strip() == '' and root_post == None:
raise forms.ValidationError('status can not be empty')
```
#### File: Social-website-django/user/views.py
```python
from django.shortcuts import render , render_to_response, RequestContext, get_object_or_404
from django.http import HttpResponseRedirect, HttpResponse , Http404
from django.utils.decorators import method_decorator
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse
from django.views.generic import View
from user.models import User
from post.models import Post
from comment.models import Comment
from message.models import Message
from django.contrib.auth import logout, authenticate, login, update_session_auth_hash
from django.contrib.auth.forms import UserCreationForm, AuthenticationForm, \
AdminPasswordChangeForm, PasswordChangeForm, PasswordResetForm, SetPasswordForm
class Home(View):
@method_decorator(login_required)
def get(self,request):
posts = Post.objects.order_by('-created_on')
return render_to_response('home.html',
{'posts':posts},
context_instance=RequestContext(request))
class Login(View):
def reset_session_errors(self, request):
try:
del request.session['errors']
except KeyError:
pass
def get(self,request):
next = request.GET.get('next','/')
if request.user.is_authenticated():
return HttpResponseRedirect(next)
form = AuthenticationForm()
return render_to_response('registration/login.html',
{'form':form,'next': next},
context_instance=RequestContext(request))
def post(self,request):
self.reset_session_errors(request)
username = request.POST['username']
password = request.POST['password']
user = authenticate(username=username, password=password)
if user is not None:
if user.is_active:
login(request, user)
next = request.POST.get('next','/')
return HttpResponseRedirect(next)
else:
request.session['errors'] = ['your account is not activated']
return HttpResponseRedirect('/')
else:
request.session['errors'] = ['Invalid username and password combination',]
return HttpResponseRedirect('/')
class Logout(View):
def get(self,request):
if request.user.is_authenticated():
logout(request)
return HttpResponseRedirect('/')
class PasswordChange(View):
@method_decorator(login_required)
def get(self,request):
form = PasswordChangeForm(request.user)
return render_to_response('registration/password_change_form.html',
{'form':form},
context_instance=RequestContext(request))
@method_decorator(login_required)
def post(self,request):
form = PasswordChangeForm(user=request.user, data=request.POST)
if form.is_valid():
form.save()
update_session_auth_hash(request, form.user)
HttpResponseRedirect('/')
else:
return render_to_response('registration/password_change_form.html',
{'form':form},
context_instance=RequestContext(request))
class PasswordReset(View):
def get(self,request):
if request.user.is_authenticated():
return HttpResponseRedirect('/')
form = PasswordResetForm()
return render_to_response('registration/password_reset_form.html',
{'form':form},
context_instance=RequestContext(request))
def post(self,request):
if request.user.is_authenticated():
return HttpResponseRedirect('/')
form = PasswordResetForm(data=request.POST)
if form.is_valid():
form.save()
HttpResponseRedirect('/')
return render_to_response('registration/password_reset_form.html',
{'form':form},
context_instance=RequestContext(request))
class SetPassword(View):
def get(self,request):
if request.user.is_authenticated():
return HttpResponseRedirect('/')
form = SetPasswordForm()
return render_to_response('registration/set_password_form.html',
{'form':form},
context_instance=RequestContext(request))
def post(self,request):
if request.user.is_authenticated():
return HttpResponseRedirect('/')
form = SetPasswordForm(data=request.POST)
if form.is_valid():
form.save()
HttpResponseRedirect('/')
return render_to_response('registration/set_password_form.html',
{'form':form},
context_instance=RequestContext(request))
class Timeline(View):
#@method_decorator(login_required)
def get(self,request, user):
posts = Post.objects.filter(user= user)
user = get_object_or_404(User,pk = user)
return render_to_response('user/timeline.html',
{'posts':posts,'user':user},
context_instance=RequestContext(request))
class Profile(View):
def get(self,request, user):
posts = Post.objects.filter(user= user)
user = get_object_or_404(User,pk = user)
return render_to_response('user/profile.html',
{'posts':posts,'user':user},
context_instance=RequestContext(request))
class TimelineStatus(View):
pass
class TimelineImage(View):
pass
class TimelineVideo(View):
pass
class Activity(View):
pass
class Like(View):
pass
class Favorite(View):
pass
class FavoriteStatus(View):
pass
class FavoriteVideo(View):
pass
class FavoriteImage(View):
pass
class FavoriteAudio(View):
pass
class Settings(View):
pass
``` |
{
"source": "28kayak/poubelle-chat",
"score": 4
} |
#### File: poubelle-chat/PreProcess/Trash.py
```python
class Trash:
def __init__(self):
"""
This class is to serialize Japanese trash.
name = trash name
category = what kind of trash...ex flammables, non-flammables etc...
method = sentence that explain how to throw a way the trash
"""
self.name = ""
self.category = ""
self.description = ""
def get_name(self):
return self.name
def set_name(self, name):
self.name = name
def get_category(self):
return self.category
def set_category(self, category):
self.category = category
def get_method(self):
return self.description
def set_method(self, description):
self.description = description
def generate_new_trash(self, name,category, description):
return Trash(name, category, description)
``` |
{
"source": "28left/psumathnotebooks",
"score": 3
} |
#### File: psumathnotebooks/cyllene/f_define.py
```python
import sympy as sp
import random
from fractions import Fraction
# MathString parsing library
import cyllene.a_mathstring as ms
# random function generator
import cyllene.f_random
# Reserve some (real-valued) symbols in Sympy
a, b, c, d, p, q, r, s, t, w, x, y, z = sp.symbols(
'a b c d p q r s t w x y z', real=True)
MYLOCALS = {'a': a, 'b': b, 'c': c, 'd': d, 'p': p, 'q': q, 'r': r,
's': s, 't': t, 'w': w, 'x': x, 'y': y, 'z': z}
FUNCTION_LIST = ['const', 'linear', 'quadratic', 'cubic', 'squareroot',
'cubicroot', 'rational', 'exp', 'tri', 'log', 'comp',
'random']
def define_expression(expr,
eval_mode=False,
return_issues=True):
"""
sympify an input and return a sympy expression
together with a list of issues (optional, possibly empty)
parameters:
eval_mode (Boolean): should sympify try to evaluate expression
return_issues (Boolean): should list of issues during syntax check
be returned
Valid arguments
- a string
- a constant
- a sympy expression
The string can be a math string or one of the following expression types:
'const', 'linear', 'quadratic', 'cubic', 'squareroot',
'cubicroot', 'rational', 'exp', 'tri', 'log', 'comp', 'monomial'
One can also pass 'random' to pick an expression randomly.
"""
if expr in FUNCTION_LIST:
# First check whether string argument is a keyword
if expr == 'random':
expr = random.choice([
'const', 'linear', 'quadratic',
'cubic', 'squareroot',
'cubicroot', 'rational',
'comp', 'exp', 'tri', 'log'])
if expr == 'comp':
comp = [random.choice([
'const', 'linear', 'quadratic',
'cubic', 'squareroot',
'cubicroot', 'rational', 'exp', 'tri', 'log'])
for i in range(2)]
new_expr = sp.sympify(
f_random.set_function_random('comp', comp[0], comp[1]))
else:
new_expr = sp.sympify(f_random.set_function_random(expr))
expr_ok = True
issues = []
elif isinstance(expr, sp.Basic):
# if expr is Sympy type, skip syntax check
expr_ok = True
new_expr = expr
issues = []
elif isinstance(expr, (int, float)):
# if expr is numerical, directly sympify
expr_ok = True
new_expr = sp.sympify(expr, evaluate=eval_mode)
issues = []
elif isinstance(expr, str):
# try:
# # if input can be turned into number, do this to avoid
# # weird sympy bug
# if '1/' in expr:
# # force sympy to ev
# check = [Fraction(expr), True, []]
# elif '.' in expr:
# # has decimal point, try float conversion
# check = [float(expr), True, []]
# else:
# # check integer
# check = [int(expr), True, []]
# except ValueError:
# # check syntax of expr;
# # returns triple:
# # ['sanitized' string, compilable flag (boolean), issues list]
check = ms.check_syntax(expr)
if check[1]:
try:
new_expr = sp.sympify(
check[0], locals=MYLOCALS, evaluate=eval_mode)
if new_expr.is_real:
# if input is number, evaluate
new_expr = sp.sympify(
check[0], locals=MYLOCALS, evaluate=True)
expr_ok = True
issues = []
except:
expr_ok = False
issues = ["invalid syntax"]
else:
# check_syntax discovered issues
expr_ok = False
issues = check[2]
else:
# argument expr is not of the right type
expr_ok = False
issues = ['unknown format']
if expr_ok:
return new_expr, []
else:
return None, issues
def define_function(expr, mode='numerical'):
"""
sympify an expression and return a function evaluating this expression,
together with a list of issues (optional, possibly empty)
This uses SymPy's lambdify function.
parameters:
eval_mode (Boolean): should sympify try to evaluate expression
return_issues (Boolean): should list of issues during syntax check
be returned
"""
[func, issues] = define_expression(expr, eval_mode=True)
if func:
if mode=='numerical':
if len(func.free_symbols) > 0:
# if there free symbols in func, use the first as the function var
return sp.lambdify([func.free_symbols.pop()], func)
# return lambda u: func.evalf(subs={x, u}, n=5)
else:
# otherwise any symbol will do
return sp.lambdify([x], func)
# return lambda u: func.evalf(subs={x, u}, n=5)
else:
if len(func.free_symbols) > 0:
# if there free symbols in func, use the first as the function var
# return sp.lambdify(func.free_symbols.pop(), func)
return lambda u: func.subs(func.free_symbols.pop(), u)
else:
# otherwise any symbol will do
# return sp.lambdify(x, func)
return lambda u: func.subs(x, u)
else:
return None
```
#### File: psumathnotebooks/cyllene/m_input_tweaks.py
```python
import sys
ipython = get_ipython()
# Allow using '^' for exponentiation
def replace_exponentiation_symbol(lines):
new_lines = []
for line in lines:
new_line = ''
occ_exp = [i for i, letter in enumerate(line) if letter == '^']
position = 0
for i in occ_exp:
if line.count('\'',0,i) %2 == 0 and line.count('\"',0,i) %2 == 0:
new_line += line[position:i+1].replace('^','**')
else:
new_line += line[position:i+1]
position = i+1
new_line += line[position:]
new_lines.append(new_line)
return new_lines
# Forbid entries where only the top line of an answer block is meaningfully filled
# This piece of code gets run on answer blocks and other magics alike, so have to check explicitly that this is truly an answer block
def check_cell_submitted(lines):
if "answer" in lines[0] and len(lines) < 2:
lines.append(" ")
return lines
# Suppress long traceback messages and just output error
# def exception_handler(exception_type, exception, traceback):
# print("Your input led to an error. You can find more information about the error below.")
# print("%s: %s" % (exception_type.__name__, exception), file=sys.stderr)
def hide_traceback(exc_tuple=None, filename=None, tb_offset=None,
exception_only=False, running_compiled_code=False):
etype, value, tb = sys.exc_info()
value.__cause__ = None # suppress chained exceptions
print("Your input led to an error. You can find more information about the error below.")
return ipython._showtraceback(etype, value, ipython.InteractiveTB.get_exception_only(etype, value))
# Register tweaks in ipython kernel
ipython.input_transformers_cleanup.extend((replace_exponentiation_symbol, check_cell_submitted))
ipython.showtraceback = hide_traceback
```
#### File: psumathnotebooks/cyllene/m_magics.py
```python
from os import path
from IPython.core.magic import (register_line_magic, register_cell_magic,
register_line_cell_magic)
from IPython import get_ipython
from cyllene.p_problem import ProbStack
ip = get_ipython()
@register_line_magic
def initialize(line):
if line=='':
# if no argument is given, try to find .py file with
# same name as notebook
init_file = 'init.py'
else:
init_file = line + '.py'
try:
# run the init file to load problems etc.
ip.magic('run '+init_file)
print("Initialization successful!")
except:
# print(line)
print("Error loading initialization file.")
@register_line_magic
def Problem(line):
try:
problem = ProbStack.stack[line]
problem.state_problem()
# add_answer_cell(problem)
except:
# print(line)
print("Could not add problem to problem stack.")
@register_line_magic
def problem(line):
try:
problem = ProbStack.stack[line]
problem.state_problem()
# add_answer_cell(problem)
except:
# print(line)
print("Could not add problem to problem stack.")
@register_cell_magic
def answer(line, cell):
# Given this is an answer block, check the top line is written in a valid way
try:
problem = ProbStack.stack[line[8:]]
except:
print("Oops! Something in the top line won't let us find the right problem.")
return None
# PREWORK FOR CELL:
# if for some reason cell is not defined, stop the process
if(cell is None):
print("Your answer seems to be empty. Please try again.")
return None
# eliminate any line from cell that is "essentially empty", i.e., only whitepace
cell = "\n".join([ll.rstrip() for ll in cell.splitlines() if ll.strip()])
# strip the empty space from the beginning of each line
cell = "\n".join([ll.lstrip() for ll in cell.splitlines()])
# check if cell is essentially empty
if(cell == ""):
print("Your answer is empty. Please try again!")
return None
# PARSING ANSWER DEPENDS ON NUM_INPUTS
answer = []
try:
n = problem.num_inputs
except:
print("problem.num_inputs has not yet been defined. Please check problem's encoding")
return None
if(n < 1):
print("This problem was coded with too few num_inputs.")
return None
elif(n == 1):
# there are no "(i): " prefix strings in this case, so can just use the first meaningful line in code as our answer
answer.append(cell.split('\n')[0])
else: # num_inputs > 1
# By default, let answer[i] = ""
for i in range(n):
answer.append("")
# TWO CASES for parsing answer blocks for problems with multiple inputs
# Distinguish between the two cases by the truth value of prefix_found
prefix_found = False
prefix_string = ""
prefix_index = 0
while(prefix_index < n and not prefix_found):
prefix_string = "(" + str(prefix_index + 1) + ")"
for cell_line in cell.splitlines():
if cell_line.startswith(prefix_string):
prefix_found = True
break
prefix_index += 1
if(prefix_found):
# CASE 1: there are "(i): " prefices from (1),...,(n)
# iterate through each line in code to update answer[i] if line begins with "(i):""
# ignore any lines with an (i) for i > n
for cell_line in cell.splitlines():
# could be improved
for i in range(n):
if cell_line.startswith("(" + str(i + 1) + "):"):
answer[i] = cell_line[len("(" + str(i + 1) + "):"):].strip()
break
if cell_line.startswith("(" + str(i + 1) + ")"):
answer[i] = cell_line[len("(" + str(i + 1) + ")"):].strip()
break
else:
# CASE 2: submission assumed to be written in order without the prefices (1),...,(n)
i = 0
for cell_line in cell.splitlines():
if(i >= n):
break
answer[i] = cell_line.strip()
i += 1
problem.check_answer(answer)
``` |
{
"source": "28mm/Fovea",
"score": 3
} |
#### File: examples/clustering/cluster.py
```python
import numpy as np
import pandas as pd
import os
import argparse
from sklearn import feature_extraction
from sklearn.feature_extraction import DictVectorizer
from sklearn.cluster import KMeans
from sklearn.cluster import DBSCAN
from sklearn import metrics
#import mpld3
def labeler(thing):
return thing
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--clusters', type=int, default=5, required=False)
parser.add_argument('--confidence', type=float, default=0.0, required=False)
parser.add_argument('directory', nargs=1)
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--kmeans', action='store_true')
group.add_argument('--dbscan', action='store_true')
args = parser.parse_args()
confidence = args.confidence
n_clusters = args.clusters
directory = args.directory[0]
documents = {}
for filename in os.listdir(directory):
if not filename.endswith('.labels'):
continue
with open(directory + '/' + filename, 'r') as f:
documents[filename] = []
for line in f.readlines():
l_components = line.split('\t')
conf = float(l_components[0])
label = l_components[1][:-1]
if conf > confidence:
documents[filename].append(label)
v = DictVectorizer()
dox = [ { l : 1 for l in documents[d] } for d in documents ]
doc_names = [ d.rstrip('.labels') for d in documents ]
X = v.fit_transform(dox)
features = v.get_feature_names()
if args.kmeans:
km = KMeans(n_clusters=n_clusters)
km.fit(X)
# Sort cluster centers by proximity to centroid
order_centroids = km.cluster_centers_.argsort()[:, ::-1]
closest_labels_dict = { i : "" for i in range(n_clusters) }
for i in range(n_clusters):
for ind in order_centroids[i, :6]: #replace 6 with n words per cluster
closest_labels_dict[i] += features[ind] + ", "
closest_labels_dict[i] = closest_labels_dict[i].rstrip(', ')
clusters = km.labels_.tolist()
clusters_dict = { i : [] for i in range(n_clusters) }
for c in range(len(clusters)):
clusters_dict[clusters[c]].append(doc_names[c])
print('<html>')
print('<body>')
print('<style>')
print('img { height: 75px; }')
print('h2 { font-family: sans-serif; } ')
print('.box { max-width: 700px; }')
print('</style>')
print('<div class="box">')
for k in clusters_dict:
print('<h2>' + str(k) + ": " + closest_labels_dict[k] + '</h2>')
for img in clusters_dict[k]:
print('<img src="file://' + directory + '/' + img + '">')
print('</div>')
print('</body>')
print('</html>')
elif args.dbscan:
raise
if __name__ == '__main__' : main()
``` |
{
"source": "28mm/Splicr",
"score": 3
} |
#### File: 28mm/Splicr/Splicr.py
```python
from flask import Flask
from flask import render_template, url_for, redirect
from flask_uuid import FlaskUUID
from functools import partial
import json
import urllib
import sys
import os
# Youtube API
from apiclient.discovery import build
from apiclient.errors import HttpError
from oauth2client.tools import argparser
GA_TRACKING_ID = os.environ['GA_TRACKING_ID']
DEVELOPER_KEY = os.environ['YOUTUBE_KEY']
YOUTUBE_API_SERVICE_NAME = "youtube"
YOUTUBE_API_VERSION = "v3"
render = partial(render_template, GA_TRACKING_ID=GA_TRACKING_ID)
app = Flask(__name__)
FlaskUUID(app)
@app.route('/')
def index():
return render('index.html')
@app.route('/search/album')
@app.route('/search/album/<terms>')
def album_search(terms=None):
if terms is None:
return redirect(url_for('index'))
terms = urllib.parse.unquote(terms)
albums = MusicBrainz.album_search(terms)
return render('album-search.html', terms=terms, albums=albums)
@app.route('/search/artist')
@app.route('/search/artist/<terms>')
def artist_search(terms=None):
if terms is None:
return redirect(url_for('index'))
terms = urllib.parse.unquote(terms)
artists = MusicBrainz.artist_search(terms)
return render('artist-search.html', terms=terms, artists=artists)
@app.route('/album')
@app.route('/album/<uuid:uuid>')
def album(uuid=None):
if uuid is None:
return redirect(url_for('index'))
album = Album.get(uuid)
return render('album.html', album=album,
ytid=yt_lucky(album.artist + ' ' + album.tracks[0].title))
@app.route('/artist')
@app.route('/artist/<uuid:uuid>')
def artist(uuid=None):
if uuid is None:
return redirect(url_for('index'))
artist = Artist.get(uuid)
return render('artist.html', artist=artist)
@app.route('/track')
@app.route('/track/<uuid:uuid>')
def track(uuid=None):
if uuid is None:
return redirect(url_for('index'))
track = Track.get(uuid)
return render('track.html', track=track,
ytid=yt_lucky(track.artist + ' ' + track.title))
@app.route('/ytid')
@app.route('/ytid/<uuid:uuid>')
def ytid(uuid=None):
if uuid is None:
return redirect(url_for('index'))
track = Track.get(uuid)
return yt_lucky(track.artist + ' ' + track.title)
#
# Musicbrainz fns and class definitions
class Artist:
'''MusicBrainz Artists and Associated Albums'''
def __init__(self, uuid):
self.uuid = uuid
self.asin = ''
self.name = ''
self.albums = []
@staticmethod
def get(uuid):
MB_FMT = 'http://musicbrainz.org/ws/2/artist/%s?inc=releases&fmt=json&type=album|ep'
response = urllib.request.urlopen( MB_FMT % uuid )
response_json = json.loads(response.read().decode('utf-8')) # FIXME encoding?
artist = Artist(uuid)
artist.name = response_json['name']
for release in response_json['releases']:
artist.albums.append({ 'uuid' : release['id'],
'title' : release['title']} )
return artist
class Album:
'''Musicbrainz Album (release)'''
def __init__(self, uuid):
self.uuid = uuid
self.asin = ''
self.title = ''
self.artist = '' # problematic (think: compilations)
self.tracks = []
# These relate to musicbrainz search.
self.score = ''
self.terms = ''
@staticmethod
def get(uuid):
MB_FMT = 'http://musicbrainz.org/ws/2/release/%s?inc=recordings&fmt=json'
response = urllib.request.urlopen( MB_FMT % uuid )
response_json = json.loads(response.read().decode('utf-8')) # FIXME encoding?
album = Album(uuid)
album.title = response_json['title']
album.asin = response_json['asin']
album.artist = ''
for medium in response_json['media']:
for track in medium['tracks']:
album.tracks.append(Track(track['recording']['id'],
title=track['title']))
return album
class Track:
'''Musicbrainz Track (recording)'''
def __init__(self, uuid, asin='', artist='', title=''):
self.uuid = uuid
self.asin = asin
self.title = title
self.artist = artist
@staticmethod
def get(uuid):
MB_FMT = 'http://musicbrainz.org/ws/2/recording/%s?inc=artist-credits&fmt=json'
print(MB_FMT % uuid, file=sys.stderr)
response = urllib.request.urlopen( MB_FMT % uuid )
response_json = json.loads(response.read().decode('utf-8')) # FIXME encoding?
track = Track(uuid)
track.title = response_json['title']
credits = response_json['artist-credit']
for credit in credits:
if len(track.artist) == 0:
track.artist = credit['artist']['name'] # FIXME just one for now.
return track
class MusicBrainz:
@staticmethod
def album_search(terms):
'''Returns a list of dicts representing mbrainz releases.
score <- search relevance
title <- release title
uuid <- mbrainz uuid'''
terms = urllib.parse.quote(terms)
MB_FMT = 'http://musicbrainz.org/ws/2/release/?query=%s&fmt=json&type=album|ep'
response = urllib.request.urlopen( MB_FMT % terms )
response_json = json.loads(response.read().decode('utf-8')) # FIXME encoding?
albums = []
for release in response_json['releases']:
skip = False
album = {}
album['score'] = release['score']
album['title'] = release['title']
album['uuid'] = release['id']
album['artist'] = release['artist-credit'][0]['artist']['name']
album['artist_uuid'] = release['artist-credit'][0]['artist']['id']
# skip albums we already have, for more presentable search results.
for a in albums:
if a['artist'] == album['artist'] and a['title'] == album['artist']:
skip = True
if not skip:
albums.append(album)
return albums
@staticmethod
def artist_search(terms):
'''Returns a list of dicts representing mbrainz releases.
score <- search relevance
name <- artist name
uuid <- mbrainz uuid'''
terms = urllib.parse.quote(terms)
MB_FMT = 'http://musicbrainz.org/ws/2/artist/?query=%s&fmt=json'
response = urllib.request.urlopen( MB_FMT % terms )
response_json = json.loads(response.read().decode('utf-8')) # FIXME encoding?
artists = []
for a in response_json['artists']:
skip = False
artist = {}
artist['score'] = a['score']
artist['name'] = a['name']
artist['uuid'] = a['id']
# skip albums we already have, for more presentable search results.
#for a in albums:
# if a['artist'] == album['artist'] and a['title'] == album['artist']:
# skip = True
#if not skip:
# albums.append(album)
artists.append(artist)
return artists
#
# Youtube Fns
def yt_lucky(terms=None):
'''Returns the youtube video id (eg: "VQbqeLTMre8") of the
top search result for terms'''
youtube = build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION,
developerKey=DEVELOPER_KEY)
response = youtube.search().list(
q=terms,
part="id,snippet",
maxResults=20
).execute()
ytid = ''
for result in response.get("items", []):
if result["id"]["kind"] == "youtube#video":
return result["id"]["videoId"]
if __name__ == '__main__': app.run()
``` |
{
"source": "28raph/butt-3",
"score": 3
} |
#### File: butt-3/lib/hunt.py
```python
from lib.parser import conf
rarities = ['Common', 'Uncommon', 'Alolan', 'Super Rare', 'Rare', 'Full-odds', 'Shiny', 'Legendary']
cheap_legies=["Articuno", "Mew*", "Jirachi",
"Moltres", "Raikou", "Entei",
"Suicune", "Ho-oh", "Regirock",
"Regice", "Registeel", "Latias",
"Latios", "Deoxys", "Uxie",
"Mesprit", "Azelf", "Heatran",
"Regigigas", "Cresselia", "Cobalion",
"Terrakion", "Virizion", "Tornadus",
"Thundurus", "Landorus", "Xerneas",
"Yveltal", "Celebi", "Zygarde"]
def hunt(footer, description):
for rarity in rarities:
if rarity in footer:
if rarity == "Legendary":
if any(poke in description for poke in cheap_legies):
ball = conf.rarities.Cheap_legendary
else:
ball = conf.rarities.Legendary
elif rarity == "Full-odds":
ball = conf.rarities.Full_odds
else:
ball = conf.rarities[str(rarity)]
break
return ball
```
#### File: 28raph/butt-3/main.py
```python
import platform
uname = platform.uname()
print(f"System: {uname.system}")
import pyfiglet
ascii_banner = pyfiglet.figlet_format("PokeGrinder")
print(ascii_banner)
print("Importing Modules...")
import sqlite3
import asyncio
import requests
import datetime
from lib.hunt import hunt
from lib.parser import conf
from playsound import playsound
from discord.ext import commands
from lib.autobuy import get_balls
from lib.captcha import anticaptcha
from discord.ext.commands import CommandNotFound
print("Initialising, this may take some time depending on how many servers you are in.")
caught = 0
fish_caught = 0
encounters = 0
start_time = 0
solved = 0
hatched = 0
notification = r'lib/assets/notification.mp3'
bot = commands.Bot(command_prefix=';', self_bot=True, help_command=None)
def footer(msg):
embeds = msg.embeds
for embed in embeds:
footer=str(embed.footer)
return footer
def description(msg):
embeds = msg.embeds
for embed in embeds:
description=str(embed.description)
return description
async def notify():
if conf.general.captcha_notifier == 'enable':
repeat = 0
while repeat <= 3:
playsound(notification)
repeat += 1
async def timer(after):
await asyncio.sleep(conf.cooldowns.huntcooldown)
await after.channel.send(";p")
async def fish_timer(after):
await asyncio.sleep(conf.cooldowns.fishcooldown)
await after.channel.send(";f")
async def log():
current_time=datetime.datetime.now().replace(microsecond=0)
time_elapsed=current_time - start_time
print(f"Time Elapsed : {time_elapsed} | Encounters : {encounters} | Catches : {caught} | Fish Catches : {fish_caught} | Captchas Solved : {solved} | Eggs Hatched : {hatched}")
def get_rarity(pokemon):
conn = conn = sqlite3.connect(r"fishdatabase.db")
cur = conn.cursor()
cur.execute("SELECT rarity FROM fish WHERE pokemon=?", (pokemon,))
rarity = cur.fetchall()
return rarity
@bot.event
async def on_ready():
global start_time
user = await bot.fetch_user(conf.general.userid)
channel = bot.get_channel(conf.general.channelid)
fishchannel = bot.get_channel(conf.general.fishchannelid)
if conf.general.hunting == "enable":
await channel.send(";p")
if conf.general.fishing == "enable":
await fishchannel.send(";f")
print(f"Started grinding as {user}.")
start_time = datetime.datetime.now().replace(microsecond=0)
@bot.listen('on_message')
async def on_message(message):
global solved
if message.author.id == <PASSWORD>:
if message.channel.id == conf.general.channelid:
command = ";p"
elif message.channel.id == conf.general.fishchannelid:
command = ";f"
else:
return
if "continue hunting!" in message.content:
await asyncio.sleep(2.5)
await message.channel.send(command)
elif "You have **10** attempts to answer the captcha." in message.content:
if len(message.attachments) != 0:
if conf.general.captcha_solver == "enable":
try:
answer = await anticaptcha(message.attachments[0])
print(f"Solved the captcha, expected answer is {answer}.")
await message.channel.send(answer)
except:
print("Unable to solve Captcha!")
asyncio.create_task(notify())
else:
try:
result = await bot.wait_for('message', check = lambda m: m.author == message.author and m.channel == message.channel, timeout=10)
except:
pass
else:
if 'continue hunting!' in result.content:
print("The answer was correct!")
solved += 1
else:
print("The answer was inccorect!")
asyncio.create_task(notify())
else:
asyncio.create_task(notify())
elif 'ready to hatch!' in message.content:
if conf.general.eggs != 'enable':
return
await asyncio.sleep(3.5)
await message.channel.send(';egg hatch')
@bot.listen('on_message_edit')
async def on_message_edit(before, after):
global caught, fish_caught
if after.author.id == <PASSWORD>:
if after.channel.id == conf.general.channelid:
asyncio.create_task(timer(after))
if "caught" in description(after):
caught += 1
asyncio.create_task(log())
if conf.general.autobuy == 'enable':
balls = await get_balls(footer(after))
if balls != None:
await asyncio.sleep(3.5)
await after.channel.send(balls)
elif after.channel.id == conf.general.fishchannelid:
if "You caught a" in description(after) and "fished out a" in description(before):
fish_caught += 1
asyncio.create_task(log())
else:
return
@bot.command()
async def p(ctx):
if conf.general.hunting != "enable":
return
global encounters
if ctx.channel.id == conf.general.channelid:
try:
poke = await bot.wait_for('message', check = lambda m: m.author.id == <PASSWORD> and m.channel == ctx.channel, timeout=5)
except asyncio.TimeoutError:
await ctx.send(";p")
else:
if poke.embeds != []:
if "found a wild" in description(poke):
encounters += 1
ball = hunt(footer(poke), description(poke))
await asyncio.sleep(conf.cooldowns.ballcooldown)
await ctx.send(ball)
try:
before, after = await bot.wait_for('message_edit', check = lambda b, m: b == poke, timeout=1.5)
except asyncio.TimeoutError:
await ctx.send(ball)
else:
if "please wait" in poke.content:
await asyncio.sleep(3)
await ctx.send(";p")
@bot.command()
async def f(ctx):
if conf.general.fishing != "enable":
return
if ctx.channel.id == conf.general.fishchannelid:
try:
cast = await bot.wait_for('message', check = lambda m: m.author.id == 6<PASSWORD> and m.channel == ctx.channel, timeout=5)
except asyncio.TimeoutError:
await ctx.send(";f")
else:
if cast.embeds != []:
if "cast out a" in description(cast):
try:
before, after = await bot.wait_for('message_edit', check = lambda b, m: b == cast, timeout=10)
except asyncio.TimeoutError:
pass
else:
if "type `PULL`" in description(after):
asyncio.create_task(fish_timer(after))
await ctx.send("pull")
try:
bef, aft = await bot.wait_for('message_edit', check = lambda b, m: b == after, timeout=5)
except:
pass
else:
desc = description(aft)
if "fished out a" in desc:
if "Golden" in desc:
await ctx.send(conf.fishrarities.Golden)
elif "Shiny" in desc:
await ctx.send(conf.fishrarities.Shiny)
else:
rarity = get_rarity(desc.split("**")[3])[0][0]
await ctx.send(conf.fishrarities[rarity])
else:
asyncio.create_task(fish_timer(after))
else:
if "please wait" in cast.content:
await asyncio.sleep(3)
await ctx.send(";f")
@bot.command()
async def egg(ctx, action):
if conf.general.eggs != 'enable':
return
global hatched
if ctx.channel.id == conf.general.channelid or ctx.channel.id == conf.general.fishchannelid:
if action == 'hatch':
try:
response = await bot.wait_for('message', check = lambda m: m.author.id == <PASSWORD> and m.channel == ctx.channel, timeout=5)
except asyncio.TimeoutError:
await asyncio.sleep(3)
await ctx.send(';egg hatch')
else:
if 'just hatched a' in response.content:
print("Hatched an egg!")
hatched += 1
await asyncio.sleep(8)
await ctx.send(';egg hold')
elif action == 'hold':
try:
response = await bot.wait_for('message', check = lambda m: m.author.id == <PASSWORD> and m.channel == ctx.channel, timeout=5)
except asyncio.TimeoutError:
await asyncio.sleep(3)
await ctx.send(';egg hold')
@bot.event
async def on_command_error(ctx, error):
if isinstance(error, CommandNotFound):
return
raise error
bot.run(conf.general.token)
``` |
{
"source": "28Smiles/SAS-AIED2020",
"score": 2
} |
#### File: logs/xlm_1e-5_xnli_ende/app.py
```python
import torch
from transformers import *
from flask import Flask, escape, request
from flask_cors import CORS
GPU_INFERENCE = True
MODEL_NAME = 'xlm-mlm-tlm-xnli15-1024'
SEQUENCE_LENGTH = 512
TOKENIZER = XLMTokenizer.from_pretrained(MODEL_NAME)
CONFIG = XLMConfig.from_pretrained(MODEL_NAME, num_labels = num_labels)
MODEL = XLMForSequenceClassification(config = CONFIG)
MODEL.load_state_dict(torch.load('model.torch', map_location='cpu'))
MODEL.eval()
if GPU_INFERENCE:
MODEL = MODEL.cuda()
app = Flask(__name__)
CORS(app)
@app.route("/")
def evaluate():
r = request.args.get("reference")
a = request.args.get("answer")
l = request.args.get("lang")
print("Evaluate: ", lang, r, a)
idx = TOKENIZER.encode(r, a, True)
if len(idx) > SEQUENCE_LENGTH:
return "ERROR Sentence too long, please trunicate"
mask = [1] * len(idx) + [0] * (SEQUENCE_LENGTH - len(idx))
idx += [0] * (SEQUENCE_LENGTH - len(idx))
mask = torch.tensor([ mask ]).long()
idx = torch.tensor([ idx ]).long()
lang = torch.tensor([ lang = [ MODEL.config.lang2id['de'] ] * SEQUENCE_LENGTH ]).long()
if GPU_INFERENCE:
mask = mask.cuda()
idx = idx.cuda()
with torch.no_grad():
outputs = MODEL(
idx,
attention_mask = mask,
langs = lang
)
e = outputs[0][0].cpu().numpy()
return {
'contradictory': float(e[0]),
'incorrect': float(e[1]),
'correct': float(e[2])
}
```
#### File: SAS-AIED2020/util/best.py
```python
import os
import tensorflow as tf
import json
import torch
from util.hotload import load_model
def parse(file):
o = {}
for e in tf.train.summary_iterator(file):
for v in e.summary.value:
if not v.tag == 'loss':
if not e.step in o:
o[e.step] = {}
o[e.step][v.tag] = v.simple_value
return o
def sumup(values, parts):
s = 0
for part in parts:
s += values[part]
return s
def scores(values, parts):
o = {}
for k, v in values.items():
o[k] = sumup(v, parts)
return o
def max_key(values):
k, v = 0, 0
for key, value in values.items():
k, v = key, value
break
for key, value in values.items():
if value >= v:
k, v = key, value
return k, v
def log(experiment):
for file in os.listdir('logs/' + experiment):
if file.startswith('events.out.tfevents'):
return 'logs/' + experiment + '/' + file
def load_parameters(experiment):
with open('logs/{}/parameters.json'.format(experiment)) as json_file:
return json.load(json_file)
def load_best(experiment, parts):
log_data = parse(log(experiment))
best_step, _ = max_key(scores(log_data, parts))
parameters = load_parameters(experiment)
MODEL_CLASS = load_model(parameters['MODEL_PREFIX'])
TOKENIZER = MODEL_CLASS[0].from_pretrained(parameters['MODEL_NAME'])
CONFIG = MODEL_CLASS[1].from_pretrained(parameters['MODEL_NAME'], num_labels=3)
MODEL = MODEL_CLASS[2].from_pretrained(parameters['MODEL_NAME'], config=CONFIG)
MODEL.load_state_dict(torch.load('logs/{}/model_{}.torch'.format(experiment, best_step), map_location='cpu'))
return best_step, log_data[best_step], MODEL, TOKENIZER
```
#### File: SAS-AIED2020/util/val_datasets.py
```python
from .dataset import load_semeval, tokenize, dataset
from torch.utils.data import DataLoader
def val_dataset(name, type, lang, tokenizer, sequence_length):
return DataLoader(dataset(tokenize(load_semeval(name, type, lang), tokenizer, sequence_length)), batch_size=32)
def val_datasets(tokenizer, sequence_length):
return {
'union_unseen_answers_en': val_dataset('union', 'unseen_answers', 'en', tokenizer, sequence_length),
'union_unseen_questions_en': val_dataset('union', 'unseen_questions', 'en', tokenizer, sequence_length),
'union_unseen_domains_en': val_dataset('union', 'unseen_domains', 'en', tokenizer, sequence_length),
'union_unseen_answers_de': val_dataset('union', 'unseen_answers', 'de', tokenizer, sequence_length),
'union_unseen_questions_de': val_dataset('union', 'unseen_questions', 'de', tokenizer, sequence_length),
'union_unseen_domains_de': val_dataset('union', 'unseen_domains', 'de', tokenizer, sequence_length),
'beetle_unseen_answers_en': val_dataset('beetle', 'unseen_answers', 'en', tokenizer, sequence_length),
'beetle_unseen_questions_en': val_dataset('beetle', 'unseen_questions', 'en', tokenizer, sequence_length),
'beetle_unseen_answers_de': val_dataset('beetle', 'unseen_answers', 'de', tokenizer, sequence_length),
'beetle_unseen_questions_de': val_dataset('beetle', 'unseen_questions', 'de', tokenizer, sequence_length),
'sciEntsBank_unseen_answers_en': val_dataset('sciEntsBank', 'unseen_answers', 'en', tokenizer, sequence_length),
'sciEntsBank_unseen_questions_en': val_dataset('sciEntsBank', 'unseen_questions', 'en', tokenizer,
sequence_length),
'sciEntsBank_unseen_domains_en': val_dataset('sciEntsBank', 'unseen_domains', 'en', tokenizer, sequence_length),
'sciEntsBank_unseen_answers_de': val_dataset('sciEntsBank', 'unseen_answers', 'de', tokenizer, sequence_length),
'sciEntsBank_unseen_questions_de': val_dataset('sciEntsBank', 'unseen_questions', 'de', tokenizer,
sequence_length),
'sciEntsBank_unseen_domains_de': val_dataset('sciEntsBank', 'unseen_domains', 'de', tokenizer, sequence_length),
}
``` |
{
"source": "291700351/build_lichee",
"score": 2
} |
#### File: tests/package/test_glxinfo.py
```python
import os
import infra.basetest
GLXINFO_TIMEOUT = 120
class TestGlxinfo(infra.basetest.BRTest):
config = \
"""
BR2_x86_core2=y
BR2_TOOLCHAIN_EXTERNAL=y
BR2_TOOLCHAIN_EXTERNAL_CUSTOM=y
BR2_TOOLCHAIN_EXTERNAL_DOWNLOAD=y
BR2_TOOLCHAIN_EXTERNAL_URL="http://toolchains.bootlin.com/downloads/releases/toolchains/x86-core2/tarballs/x86-core2--glibc--bleeding-edge-2018.11-1.tar.bz2"
BR2_TOOLCHAIN_EXTERNAL_GCC_8=y
BR2_TOOLCHAIN_EXTERNAL_HEADERS_4_14=y
# BR2_TOOLCHAIN_EXTERNAL_LOCALE is not set
BR2_TOOLCHAIN_EXTERNAL_CXX=y
BR2_TOOLCHAIN_EXTERNAL_HAS_SSP=y
BR2_TOOLCHAIN_EXTERNAL_HAS_THREADS_DEBUG=y
BR2_TOOLCHAIN_EXTERNAL_HAS_THREADS=y
BR2_TOOLCHAIN_EXTERNAL_HAS_THREADS_NPTL=y
BR2_TOOLCHAIN_EXTERNAL_CUSTOM_GLIBC=y
BR2_LINUX_KERNEL=y
BR2_LINUX_KERNEL_CUSTOM_VERSION=y
BR2_LINUX_KERNEL_CUSTOM_VERSION_VALUE="4.16.7"
BR2_LINUX_KERNEL_USE_CUSTOM_CONFIG=y
BR2_LINUX_KERNEL_CUSTOM_CONFIG_FILE="board/qemu/x86/linux.config"
BR2_PACKAGE_MESA3D_DEMOS=y
BR2_PACKAGE_MESA3D=y
BR2_PACKAGE_MESA3D_GALLIUM_DRIVER_SWRAST=y
BR2_PACKAGE_MESA3D_DRI_DRIVER_SWRAST=y
BR2_PACKAGE_XORG7=y
BR2_PACKAGE_XSERVER_XORG_SERVER=y
BR2_TARGET_GENERIC_GETTY_PORT="ttyS0"
BR2_TARGET_ROOTFS_EXT2=y
# BR2_TARGET_ROOTFS_TAR is not set
"""
def wait_for_xserver(self):
# xserver takes some time to start up
# The test case fail here if for some reason xserver is not properly installed
_, _ = self.emulator.run('while [ ! -e /var/run/xorg.pid ]; do sleep 1; done', 120)
def login(self):
img = os.path.join(self.builddir, "images", "rootfs.ext2")
kern = os.path.join(self.builddir, "images", "bzImage")
# glxinfo overallocate memory and the minimum that seemed to work was 512MB
self.emulator.boot(arch="i386",
kernel=kern,
kernel_cmdline=["root=/dev/vda console=ttyS0"],
options=["-M", "pc", "-m", "512", "-drive", "file={},if=virtio,format=raw".format(img)])
self.emulator.login()
def test_run(self):
self.login()
self.wait_for_xserver()
# The test case verifies that the xserver with GLX is working
cmd = "glxinfo -B -display :0"
output, exit_code = self.emulator.run(cmd, GLXINFO_TIMEOUT)
self.assertEqual(exit_code, 0)
for line in output:
self.assertNotIn("Error", line)
# Error case: "Error: couldn't find RGB GLX visual or fbconfig"
``` |
{
"source": "2921251087/CPMel-1",
"score": 2
} |
#### File: CPMel-1/CPMel(6)/nodeCore.py
```python
import maya.cmds as mc
import maya.api.OpenMaya as om
import collections
class node:
def __init__(self,node_name):
self.__dict__['__UUID'] = mc.ls(node_name,uid = True)
def __setattr__(self,name,val):
if isinstance(val,collections.Iterable) and type(val) != str:
mc.setAttr('%s.%s'%(mc.ls(self.__dict__['__UUID'])[0],name),*val)
else:
mc.setAttr('%s.%s'%(mc.ls(self.__dict__['__UUID'])[0],name),val)
def __getattr__(self,name):
return mc.getAttr('%s.%s'%(mc.ls(self.__dict__['__UUID'])[0],name))
a = node('pSphere1')
``` |
{
"source": "2921251087/CPMel",
"score": 2
} |
#### File: CPMel/cmds/create_node.py
```python
u"""
:创建时间: 2021/2/8 23:05
:作者: 苍之幻灵
:我的主页: https://cpcgskill.com
:QQ: 2921251087
:爱发电: https://afdian.net/@Phantom_of_the_Cang
:aboutcg: https://www.aboutcg.org/teacher/54335
:bilibili: https://space.bilibili.com/351598127
"""
import maya.cmds as cmds
from . import node
from .node import DagNode
class _CreateNode(object):
u"""
创建节点类
"""
@node.commandWrap
def __call__(self, *args, **kwargs):
u"""
:param args:
:param kwargs:
:return:
:rtype: DagNode
"""
return cmds.createNode(*args, **kwargs)
def __getattribute__(self, item):
try:
return object.__getattribute__(self, item)
except AttributeError:
def test(*args, **kwargs):
return self(item, *args, **kwargs)
setattr(self, item, test)
return test
def AlembicNode(self, *args, **kwargs):
return self(u"AlembicNode", *args, **kwargs)
def BifMeshImportNode(self, *args, **kwargs):
return self(u"BifMeshImportNode", *args, **kwargs)
def ComputeGlobal(self, *args, **kwargs):
return self(u"ComputeGlobal", *args, **kwargs)
def ComputeLocal(self, *args, **kwargs):
return self(u"ComputeLocal", *args, **kwargs)
def RScontainer(self, *args, **kwargs):
return self(u"RScontainer", *args, **kwargs)
def ShaderfxGameHair(self, *args, **kwargs):
return self(u"ShaderfxGameHair", *args, **kwargs)
def ShaderfxShader(self, *args, **kwargs):
return self(u"ShaderfxShader", *args, **kwargs)
def SphereLocator(self, *args, **kwargs):
return self(u"SphereLocator", *args, **kwargs)
def StingrayPBS(self, *args, **kwargs):
return self(u"StingrayPBS", *args, **kwargs)
def Unfold3DOptimize(self, *args, **kwargs):
return self(u"Unfold3DOptimize", *args, **kwargs)
def Unfold3DUnfold(self, *args, **kwargs):
return self(u"Unfold3DUnfold", *args, **kwargs)
def absOverride(self, *args, **kwargs):
return self(u"absOverride", *args, **kwargs)
def absUniqueOverride(self, *args, **kwargs):
return self(u"absUniqueOverride", *args, **kwargs)
def addDoubleLinear(self, *args, **kwargs):
return self(u"addDoubleLinear", *args, **kwargs)
def addMatrix(self, *args, **kwargs):
return self(u"addMatrix", *args, **kwargs)
def adskMaterial(self, *args, **kwargs):
return self(u"adskMaterial", *args, **kwargs)
def alignCurve(self, *args, **kwargs):
return self(u"alignCurve", *args, **kwargs)
def alignManip(self, *args, **kwargs):
return self(u"alignManip", *args, **kwargs)
def alignSurface(self, *args, **kwargs):
return self(u"alignSurface", *args, **kwargs)
def ambientLight(self, *args, **kwargs):
return self(u"ambientLight", *args, **kwargs)
def angleBetween(self, *args, **kwargs):
return self(u"angleBetween", *args, **kwargs)
def angleDimension(self, *args, **kwargs):
return self(u"angleDimension", *args, **kwargs)
def animBlend(self, *args, **kwargs):
return self(u"animBlend", *args, **kwargs)
def animBlendInOut(self, *args, **kwargs):
return self(u"animBlendInOut", *args, **kwargs)
def animBlendNodeEnum(self, *args, **kwargs):
return self(u"animBlendNodeEnum", *args, **kwargs)
def animBlendNodeTime(self, *args, **kwargs):
return self(u"animBlendNodeTime", *args, **kwargs)
def animClip(self, *args, **kwargs):
return self(u"animClip", *args, **kwargs)
def animCurveTA(self, *args, **kwargs):
return self(u"animCurveTA", *args, **kwargs)
def animCurveTL(self, *args, **kwargs):
return self(u"animCurveTL", *args, **kwargs)
def animCurveTT(self, *args, **kwargs):
return self(u"animCurveTT", *args, **kwargs)
def animCurveTU(self, *args, **kwargs):
return self(u"animCurveTU", *args, **kwargs)
def animCurveUA(self, *args, **kwargs):
return self(u"animCurveUA", *args, **kwargs)
def animCurveUL(self, *args, **kwargs):
return self(u"animCurveUL", *args, **kwargs)
def animCurveUT(self, *args, **kwargs):
return self(u"animCurveUT", *args, **kwargs)
def animCurveUU(self, *args, **kwargs):
return self(u"animCurveUU", *args, **kwargs)
def animLayer(self, *args, **kwargs):
return self(u"animLayer", *args, **kwargs)
def anisotropic(self, *args, **kwargs):
return self(u"anisotropic", *args, **kwargs)
def annotationShape(self, *args, **kwargs):
return self(u"annotationShape", *args, **kwargs)
def aovChildCollection(self, *args, **kwargs):
return self(u"aovChildCollection", *args, **kwargs)
def aovCollection(self, *args, **kwargs):
return self(u"aovCollection", *args, **kwargs)
def applyAbsIntOverride(self, *args, **kwargs):
return self(u"applyAbsIntOverride", *args, **kwargs)
def applyAbsOverride(self, *args, **kwargs):
return self(u"applyAbsOverride", *args, **kwargs)
def applyOverride(self, *args, **kwargs):
return self(u"applyOverride", *args, **kwargs)
def applyRelIntOverride(self, *args, **kwargs):
return self(u"applyRelIntOverride", *args, **kwargs)
def applyRelOverride(self, *args, **kwargs):
return self(u"applyRelOverride", *args, **kwargs)
def arcLengthDimension(self, *args, **kwargs):
return self(u"arcLengthDimension", *args, **kwargs)
def areaLight(self, *args, **kwargs):
return self(u"areaLight", *args, **kwargs)
def arrayMapper(self, *args, **kwargs):
return self(u"arrayMapper", *args, **kwargs)
def arrowManip(self, *args, **kwargs):
return self(u"arrowManip", *args, **kwargs)
def arubaTessellate(self, *args, **kwargs):
return self(u"arubaTessellate", *args, **kwargs)
def assemblyDefinition(self, *args, **kwargs):
return self(u"assemblyDefinition", *args, **kwargs)
def assemblyReference(self, *args, **kwargs):
return self(u"assemblyReference", *args, **kwargs)
def attachCurve(self, *args, **kwargs):
return self(u"attachCurve", *args, **kwargs)
def attachSurface(self, *args, **kwargs):
return self(u"attachSurface", *args, **kwargs)
def attrHierarchyTest(self, *args, **kwargs):
return self(u"attrHierarchyTest", *args, **kwargs)
def audio(self, *args, **kwargs):
return self(u"audio", *args, **kwargs)
def avgCurves(self, *args, **kwargs):
return self(u"avgCurves", *args, **kwargs)
def avgCurvesManip(self, *args, **kwargs):
return self(u"avgCurvesManip", *args, **kwargs)
def avgSurfacePoints(self, *args, **kwargs):
return self(u"avgSurfacePoints", *args, **kwargs)
def axesActionManip(self, *args, **kwargs):
return self(u"axesActionManip", *args, **kwargs)
def axisAngleToQuat(self, *args, **kwargs):
return self(u"axisAngleToQuat", *args, **kwargs)
def ballProjManip(self, *args, **kwargs):
return self(u"ballProjManip", *args, **kwargs)
def barnDoorManip(self, *args, **kwargs):
return self(u"barnDoorManip", *args, **kwargs)
def baseLattice(self, *args, **kwargs):
return self(u"baseLattice", *args, **kwargs)
def basicSelector(self, *args, **kwargs):
return self(u"basicSelector", *args, **kwargs)
def bevel(self, *args, **kwargs):
return self(u"bevel", *args, **kwargs)
def bevelManip(self, *args, **kwargs):
return self(u"bevelManip", *args, **kwargs)
def bevelPlus(self, *args, **kwargs):
return self(u"bevelPlus", *args, **kwargs)
def bezierCurve(self, *args, **kwargs):
return self(u"bezierCurve", *args, **kwargs)
def bezierCurveToNurbs(self, *args, **kwargs):
return self(u"bezierCurveToNurbs", *args, **kwargs)
def bifShape(self, *args, **kwargs):
return self(u"bifShape", *args, **kwargs)
def blendColorSets(self, *args, **kwargs):
return self(u"blendColorSets", *args, **kwargs)
def blendColors(self, *args, **kwargs):
return self(u"blendColors", *args, **kwargs)
def blendDevice(self, *args, **kwargs):
return self(u"blendDevice", *args, **kwargs)
def blendManip(self, *args, **kwargs):
return self(u"blendManip", *args, **kwargs)
def blendMatrix(self, *args, **kwargs):
return self(u"blendMatrix", *args, **kwargs)
def blendShape(self, *args, **kwargs):
return self(u"blendShape", *args, **kwargs)
def blendTwoAttr(self, *args, **kwargs):
return self(u"blendTwoAttr", *args, **kwargs)
def blendWeighted(self, *args, **kwargs):
return self(u"blendWeighted", *args, **kwargs)
def blindDataTemplate(self, *args, **kwargs):
return self(u"blindDataTemplate", *args, **kwargs)
def blinn(self, *args, **kwargs):
return self(u"blinn", *args, **kwargs)
def boneLattice(self, *args, **kwargs):
return self(u"boneLattice", *args, **kwargs)
def boolean(self, *args, **kwargs):
return self(u"boolean", *args, **kwargs)
def boundary(self, *args, **kwargs):
return self(u"boundary", *args, **kwargs)
def brownian(self, *args, **kwargs):
return self(u"brownian", *args, **kwargs)
def brush(self, *args, **kwargs):
return self(u"brush", *args, **kwargs)
def bulge(self, *args, **kwargs):
return self(u"bulge", *args, **kwargs)
def bump2d(self, *args, **kwargs):
return self(u"bump2d", *args, **kwargs)
def bump3d(self, *args, **kwargs):
return self(u"bump3d", *args, **kwargs)
def buttonManip(self, *args, **kwargs):
return self(u"buttonManip", *args, **kwargs)
def cMuscleCreator(self, *args, **kwargs):
return self(u"cMuscleCreator", *args, **kwargs)
def cMuscleDebug(self, *args, **kwargs):
return self(u"cMuscleDebug", *args, **kwargs)
def cMuscleDirection(self, *args, **kwargs):
return self(u"cMuscleDirection", *args, **kwargs)
def cMuscleDisplace(self, *args, **kwargs):
return self(u"cMuscleDisplace", *args, **kwargs)
def cMuscleDisplay(self, *args, **kwargs):
return self(u"cMuscleDisplay", *args, **kwargs)
def cMuscleFalloff(self, *args, **kwargs):
return self(u"cMuscleFalloff", *args, **kwargs)
def cMuscleKeepOut(self, *args, **kwargs):
return self(u"cMuscleKeepOut", *args, **kwargs)
def cMuscleMultiCollide(self, *args, **kwargs):
return self(u"cMuscleMultiCollide", *args, **kwargs)
def cMuscleObject(self, *args, **kwargs):
return self(u"cMuscleObject", *args, **kwargs)
def cMuscleRelative(self, *args, **kwargs):
return self(u"cMuscleRelative", *args, **kwargs)
def cMuscleShader(self, *args, **kwargs):
return self(u"cMuscleShader", *args, **kwargs)
def cMuscleSmartCollide(self, *args, **kwargs):
return self(u"cMuscleSmartCollide", *args, **kwargs)
def cMuscleSpline(self, *args, **kwargs):
return self(u"cMuscleSpline", *args, **kwargs)
def cMuscleStretch(self, *args, **kwargs):
return self(u"cMuscleStretch", *args, **kwargs)
def cMuscleSurfAttach(self, *args, **kwargs):
return self(u"cMuscleSurfAttach", *args, **kwargs)
def cMuscleSystem(self, *args, **kwargs):
return self(u"cMuscleSystem", *args, **kwargs)
def cacheBlend(self, *args, **kwargs):
return self(u"cacheBlend", *args, **kwargs)
def cacheFile(self, *args, **kwargs):
return self(u"cacheFile", *args, **kwargs)
def caddyManip(self, *args, **kwargs):
return self(u"caddyManip", *args, **kwargs)
def caddyManipBase(self, *args, **kwargs):
return self(u"caddyManipBase", *args, **kwargs)
def camera(self, *args, **kwargs):
return self(u"camera", *args, **kwargs)
def cameraManip(self, *args, **kwargs):
return self(u"cameraManip", *args, **kwargs)
def cameraPlaneManip(self, *args, **kwargs):
return self(u"cameraPlaneManip", *args, **kwargs)
def cameraSet(self, *args, **kwargs):
return self(u"cameraSet", *args, **kwargs)
def cameraView(self, *args, **kwargs):
return self(u"cameraView", *args, **kwargs)
def centerManip(self, *args, **kwargs):
return self(u"centerManip", *args, **kwargs)
def channels(self, *args, **kwargs):
return self(u"channels", *args, **kwargs)
def character(self, *args, **kwargs):
return self(u"character", *args, **kwargs)
def characterMap(self, *args, **kwargs):
return self(u"characterMap", *args, **kwargs)
def characterOffset(self, *args, **kwargs):
return self(u"characterOffset", *args, **kwargs)
def checker(self, *args, **kwargs):
return self(u"checker", *args, **kwargs)
def childNode(self, *args, **kwargs):
return self(u"childNode", *args, **kwargs)
def choice(self, *args, **kwargs):
return self(u"choice", *args, **kwargs)
def chooser(self, *args, **kwargs):
return self(u"chooser", *args, **kwargs)
def circleManip(self, *args, **kwargs):
return self(u"circleManip", *args, **kwargs)
def circleSweepManip(self, *args, **kwargs):
return self(u"circleSweepManip", *args, **kwargs)
def clamp(self, *args, **kwargs):
return self(u"clamp", *args, **kwargs)
def clipGhostShape(self, *args, **kwargs):
return self(u"clipGhostShape", *args, **kwargs)
def clipLibrary(self, *args, **kwargs):
return self(u"clipLibrary", *args, **kwargs)
def clipScheduler(self, *args, **kwargs):
return self(u"clipScheduler", *args, **kwargs)
def clipToGhostData(self, *args, **kwargs):
return self(u"clipToGhostData", *args, **kwargs)
def closeCurve(self, *args, **kwargs):
return self(u"closeCurve", *args, **kwargs)
def closeSurface(self, *args, **kwargs):
return self(u"closeSurface", *args, **kwargs)
def closestPointOnMesh(self, *args, **kwargs):
return self(u"closestPointOnMesh", *args, **kwargs)
def cloth(self, *args, **kwargs):
return self(u"cloth", *args, **kwargs)
def cloud(self, *args, **kwargs):
return self(u"cloud", *args, **kwargs)
def cluster(self, *args, **kwargs):
return self(u"cluster", *args, **kwargs)
def clusterFlexorShape(self, *args, **kwargs):
return self(u"clusterFlexorShape", *args, **kwargs)
def clusterHandle(self, *args, **kwargs):
return self(u"clusterHandle", *args, **kwargs)
def coiManip(self, *args, **kwargs):
return self(u"coiManip", *args, **kwargs)
def collection(self, *args, **kwargs):
return self(u"collection", *args, **kwargs)
def collisionModel(self, *args, **kwargs):
return self(u"collisionModel", *args, **kwargs)
def colorComposite(self, *args, **kwargs):
return self(u"colorComposite", *args, **kwargs)
def colorCondition(self, *args, **kwargs):
return self(u"colorCondition", *args, **kwargs)
def colorConstant(self, *args, **kwargs):
return self(u"colorConstant", *args, **kwargs)
def colorCorrect(self, *args, **kwargs):
return self(u"colorCorrect", *args, **kwargs)
def colorLogic(self, *args, **kwargs):
return self(u"colorLogic", *args, **kwargs)
def colorMask(self, *args, **kwargs):
return self(u"colorMask", *args, **kwargs)
def colorMath(self, *args, **kwargs):
return self(u"colorMath", *args, **kwargs)
def colorProfile(self, *args, **kwargs):
return self(u"colorProfile", *args, **kwargs)
def combinationShape(self, *args, **kwargs):
return self(u"combinationShape", *args, **kwargs)
def componentManip(self, *args, **kwargs):
return self(u"componentManip", *args, **kwargs)
def composeMatrix(self, *args, **kwargs):
return self(u"composeMatrix", *args, **kwargs)
def concentricProjManip(self, *args, **kwargs):
return self(u"concentricProjManip", *args, **kwargs)
def condition(self, *args, **kwargs):
return self(u"condition", *args, **kwargs)
def connectionOverride(self, *args, **kwargs):
return self(u"connectionOverride", *args, **kwargs)
def container(self, *args, **kwargs):
return self(u"container", *args, **kwargs)
def containerBase(self, *args, **kwargs):
return self(u"containerBase", *args, **kwargs)
def contourProjManip(self, *args, **kwargs):
return self(u"contourProjManip", *args, **kwargs)
def contrast(self, *args, **kwargs):
return self(u"contrast", *args, **kwargs)
def controller(self, *args, **kwargs):
return self(u"controller", *args, **kwargs)
def copyColorSet(self, *args, **kwargs):
return self(u"copyColorSet", *args, **kwargs)
def copyUVSet(self, *args, **kwargs):
return self(u"copyUVSet", *args, **kwargs)
def cpManip(self, *args, **kwargs):
return self(u"cpManip", *args, **kwargs)
def crater(self, *args, **kwargs):
return self(u"crater", *args, **kwargs)
def creaseSet(self, *args, **kwargs):
return self(u"creaseSet", *args, **kwargs)
def createBPManip(self, *args, **kwargs):
return self(u"createBPManip", *args, **kwargs)
def createCVManip(self, *args, **kwargs):
return self(u"createCVManip", *args, **kwargs)
def createColorSet(self, *args, **kwargs):
return self(u"createColorSet", *args, **kwargs)
def createEPManip(self, *args, **kwargs):
return self(u"createEPManip", *args, **kwargs)
def createPtexUV(self, *args, **kwargs):
return self(u"createPtexUV", *args, **kwargs)
def createUVSet(self, *args, **kwargs):
return self(u"createUVSet", *args, **kwargs)
def cryptomatte(self, *args, **kwargs):
return self(u"cryptomatte", *args, **kwargs)
def cubeManip(self, *args, **kwargs):
return self(u"cubeManip", *args, **kwargs)
def cubicProjManip(self, *args, **kwargs):
return self(u"cubicProjManip", *args, **kwargs)
def curveEdManip(self, *args, **kwargs):
return self(u"curveEdManip", *args, **kwargs)
def curveFromMeshCoM(self, *args, **kwargs):
return self(u"curveFromMeshCoM", *args, **kwargs)
def curveFromMeshEdge(self, *args, **kwargs):
return self(u"curveFromMeshEdge", *args, **kwargs)
def curveFromSubdivEdge(self, *args, **kwargs):
return self(u"curveFromSubdivEdge", *args, **kwargs)
def curveFromSubdivFace(self, *args, **kwargs):
return self(u"curveFromSubdivFace", *args, **kwargs)
def curveFromSurfaceBnd(self, *args, **kwargs):
return self(u"curveFromSurfaceBnd", *args, **kwargs)
def curveFromSurfaceCoS(self, *args, **kwargs):
return self(u"curveFromSurfaceCoS", *args, **kwargs)
def curveFromSurfaceIso(self, *args, **kwargs):
return self(u"curveFromSurfaceIso", *args, **kwargs)
def curveInfo(self, *args, **kwargs):
return self(u"curveInfo", *args, **kwargs)
def curveIntersect(self, *args, **kwargs):
return self(u"curveIntersect", *args, **kwargs)
def curveSegmentManip(self, *args, **kwargs):
return self(u"curveSegmentManip", *args, **kwargs)
def curveVarGroup(self, *args, **kwargs):
return self(u"curveVarGroup", *args, **kwargs)
def curveWarp(self, *args, **kwargs):
return self(u"curveWarp", *args, **kwargs)
def dagContainer(self, *args, **kwargs):
return self(u"dagContainer", *args, **kwargs)
def dagPose(self, *args, **kwargs):
return self(u"dagPose", *args, **kwargs)
def dataBlockTest(self, *args, **kwargs):
return self(u"dataBlockTest", *args, **kwargs)
def decomposeMatrix(self, *args, **kwargs):
return self(u"decomposeMatrix", *args, **kwargs)
def defaultLightList(self, *args, **kwargs):
return self(u"defaultLightList", *args, **kwargs)
def defaultShaderList(self, *args, **kwargs):
return self(u"defaultShaderList", *args, **kwargs)
def defaultTextureList(self, *args, **kwargs):
return self(u"defaultTextureList", *args, **kwargs)
def deformBend(self, *args, **kwargs):
return self(u"deformBend", *args, **kwargs)
def deformBendManip(self, *args, **kwargs):
return self(u"deformBendManip", *args, **kwargs)
def deformFlare(self, *args, **kwargs):
return self(u"deformFlare", *args, **kwargs)
def deformFlareManip(self, *args, **kwargs):
return self(u"deformFlareManip", *args, **kwargs)
def deformSine(self, *args, **kwargs):
return self(u"deformSine", *args, **kwargs)
def deformSineManip(self, *args, **kwargs):
return self(u"deformSineManip", *args, **kwargs)
def deformSquash(self, *args, **kwargs):
return self(u"deformSquash", *args, **kwargs)
def deformSquashManip(self, *args, **kwargs):
return self(u"deformSquashManip", *args, **kwargs)
def deformTwist(self, *args, **kwargs):
return self(u"deformTwist", *args, **kwargs)
def deformTwistManip(self, *args, **kwargs):
return self(u"deformTwistManip", *args, **kwargs)
def deformWave(self, *args, **kwargs):
return self(u"deformWave", *args, **kwargs)
def deformWaveManip(self, *args, **kwargs):
return self(u"deformWaveManip", *args, **kwargs)
def deleteColorSet(self, *args, **kwargs):
return self(u"deleteColorSet", *args, **kwargs)
def deleteComponent(self, *args, **kwargs):
return self(u"deleteComponent", *args, **kwargs)
def deleteUVSet(self, *args, **kwargs):
return self(u"deleteUVSet", *args, **kwargs)
def deltaMush(self, *args, **kwargs):
return self(u"deltaMush", *args, **kwargs)
def detachCurve(self, *args, **kwargs):
return self(u"detachCurve", *args, **kwargs)
def detachSurface(self, *args, **kwargs):
return self(u"detachSurface", *args, **kwargs)
def directedDisc(self, *args, **kwargs):
return self(u"directedDisc", *args, **kwargs)
def directionManip(self, *args, **kwargs):
return self(u"directionManip", *args, **kwargs)
def directionalLight(self, *args, **kwargs):
return self(u"directionalLight", *args, **kwargs)
def discManip(self, *args, **kwargs):
return self(u"discManip", *args, **kwargs)
def diskCache(self, *args, **kwargs):
return self(u"diskCache", *args, **kwargs)
def displacementShader(self, *args, **kwargs):
return self(u"displacementShader", *args, **kwargs)
def displayLayer(self, *args, **kwargs):
return self(u"displayLayer", *args, **kwargs)
def displayLayerManager(self, *args, **kwargs):
return self(u"displayLayerManager", *args, **kwargs)
def displayPoints(self, *args, **kwargs):
return self(u"displayPoints", *args, **kwargs)
def distanceBetween(self, *args, **kwargs):
return self(u"distanceBetween", *args, **kwargs)
def distanceDimShape(self, *args, **kwargs):
return self(u"distanceDimShape", *args, **kwargs)
def distanceManip(self, *args, **kwargs):
return self(u"distanceManip", *args, **kwargs)
def dofManip(self, *args, **kwargs):
return self(u"dofManip", *args, **kwargs)
def doubleShadingSwitch(self, *args, **kwargs):
return self(u"doubleShadingSwitch", *args, **kwargs)
def dpBirailSrf(self, *args, **kwargs):
return self(u"dpBirailSrf", *args, **kwargs)
def dragField(self, *args, **kwargs):
return self(u"dragField", *args, **kwargs)
def dropoffLocator(self, *args, **kwargs):
return self(u"dropoffLocator", *args, **kwargs)
def dropoffManip(self, *args, **kwargs):
return self(u"dropoffManip", *args, **kwargs)
def dynAttenuationManip(self, *args, **kwargs):
return self(u"dynAttenuationManip", *args, **kwargs)
def dynController(self, *args, **kwargs):
return self(u"dynController", *args, **kwargs)
def dynGlobals(self, *args, **kwargs):
return self(u"dynGlobals", *args, **kwargs)
def dynHolder(self, *args, **kwargs):
return self(u"dynHolder", *args, **kwargs)
def dynSpreadManip(self, *args, **kwargs):
return self(u"dynSpreadManip", *args, **kwargs)
def dynamicConstraint(self, *args, **kwargs):
return self(u"dynamicConstraint", *args, **kwargs)
def editMetadata(self, *args, **kwargs):
return self(u"editMetadata", *args, **kwargs)
def editsManager(self, *args, **kwargs):
return self(u"editsManager", *args, **kwargs)
def emitterManip(self, *args, **kwargs):
return self(u"emitterManip", *args, **kwargs)
def enableManip(self, *args, **kwargs):
return self(u"enableManip", *args, **kwargs)
def envBall(self, *args, **kwargs):
return self(u"envBall", *args, **kwargs)
def envChrome(self, *args, **kwargs):
return self(u"envChrome", *args, **kwargs)
def envCube(self, *args, **kwargs):
return self(u"envCube", *args, **kwargs)
def envFacade(self, *args, **kwargs):
return self(u"envFacade", *args, **kwargs)
def envFog(self, *args, **kwargs):
return self(u"envFog", *args, **kwargs)
def envSky(self, *args, **kwargs):
return self(u"envSky", *args, **kwargs)
def envSphere(self, *args, **kwargs):
return self(u"envSphere", *args, **kwargs)
def environmentFog(self, *args, **kwargs):
return self(u"environmentFog", *args, **kwargs)
def eulerToQuat(self, *args, **kwargs):
return self(u"eulerToQuat", *args, **kwargs)
def explodeNurbsShell(self, *args, **kwargs):
return self(u"explodeNurbsShell", *args, **kwargs)
def expression(self, *args, **kwargs):
return self(u"expression", *args, **kwargs)
def extendCurve(self, *args, **kwargs):
return self(u"extendCurve", *args, **kwargs)
def extendSurface(self, *args, **kwargs):
return self(u"extendSurface", *args, **kwargs)
def extrude(self, *args, **kwargs):
return self(u"extrude", *args, **kwargs)
def extrudeManip(self, *args, **kwargs):
return self(u"extrudeManip", *args, **kwargs)
def facade(self, *args, **kwargs):
return self(u"facade", *args, **kwargs)
def ffBlendSrf(self, *args, **kwargs):
return self(u"ffBlendSrf", *args, **kwargs)
def ffBlendSrfObsolete(self, *args, **kwargs):
return self(u"ffBlendSrfObsolete", *args, **kwargs)
def ffFilletSrf(self, *args, **kwargs):
return self(u"ffFilletSrf", *args, **kwargs)
def fieldManip(self, *args, **kwargs):
return self(u"fieldManip", *args, **kwargs)
def fieldsManip(self, *args, **kwargs):
return self(u"fieldsManip", *args, **kwargs)
def file(self, *args, **kwargs):
return self(u"file", *args, **kwargs)
def filletCurve(self, *args, **kwargs):
return self(u"filletCurve", *args, **kwargs)
def fitBspline(self, *args, **kwargs):
return self(u"fitBspline", *args, **kwargs)
def flexorShape(self, *args, **kwargs):
return self(u"flexorShape", *args, **kwargs)
def floatComposite(self, *args, **kwargs):
return self(u"floatComposite", *args, **kwargs)
def floatCondition(self, *args, **kwargs):
return self(u"floatCondition", *args, **kwargs)
def floatConstant(self, *args, **kwargs):
return self(u"floatConstant", *args, **kwargs)
def floatCorrect(self, *args, **kwargs):
return self(u"floatCorrect", *args, **kwargs)
def floatLogic(self, *args, **kwargs):
return self(u"floatLogic", *args, **kwargs)
def floatMask(self, *args, **kwargs):
return self(u"floatMask", *args, **kwargs)
def floatMath(self, *args, **kwargs):
return self(u"floatMath", *args, **kwargs)
def flow(self, *args, **kwargs):
return self(u"flow", *args, **kwargs)
def fluidEmitter(self, *args, **kwargs):
return self(u"fluidEmitter", *args, **kwargs)
def fluidShape(self, *args, **kwargs):
return self(u"fluidShape", *args, **kwargs)
def fluidSliceManip(self, *args, **kwargs):
return self(u"fluidSliceManip", *args, **kwargs)
def fluidTexture2D(self, *args, **kwargs):
return self(u"fluidTexture2D", *args, **kwargs)
def fluidTexture3D(self, *args, **kwargs):
return self(u"fluidTexture3D", *args, **kwargs)
def follicle(self, *args, **kwargs):
return self(u"follicle", *args, **kwargs)
def forceUpdateManip(self, *args, **kwargs):
return self(u"forceUpdateManip", *args, **kwargs)
def fosterParent(self, *args, **kwargs):
return self(u"fosterParent", *args, **kwargs)
def fourByFourMatrix(self, *args, **kwargs):
return self(u"fourByFourMatrix", *args, **kwargs)
def fractal(self, *args, **kwargs):
return self(u"fractal", *args, **kwargs)
def frameCache(self, *args, **kwargs):
return self(u"frameCache", *args, **kwargs)
def freePointManip(self, *args, **kwargs):
return self(u"freePointManip", *args, **kwargs)
def freePointTriadManip(self, *args, **kwargs):
return self(u"freePointTriadManip", *args, **kwargs)
def gameFbxExporter(self, *args, **kwargs):
return self(u"gameFbxExporter", *args, **kwargs)
def gammaCorrect(self, *args, **kwargs):
return self(u"gammaCorrect", *args, **kwargs)
def geoConnectable(self, *args, **kwargs):
return self(u"geoConnectable", *args, **kwargs)
def geoConnector(self, *args, **kwargs):
return self(u"geoConnector", *args, **kwargs)
def geomBind(self, *args, **kwargs):
return self(u"geomBind", *args, **kwargs)
def geometryConstraint(self, *args, **kwargs):
return self(u"geometryConstraint", *args, **kwargs)
def geometryFilter(self, *args, **kwargs):
return self(u"geometryFilter", *args, **kwargs)
def geometryOnLineManip(self, *args, **kwargs):
return self(u"geometryOnLineManip", *args, **kwargs)
def geometryVarGroup(self, *args, **kwargs):
return self(u"geometryVarGroup", *args, **kwargs)
def globalCacheControl(self, *args, **kwargs):
return self(u"globalCacheControl", *args, **kwargs)
def globalStitch(self, *args, **kwargs):
return self(u"globalStitch", *args, **kwargs)
def gpuCache(self, *args, **kwargs):
return self(u"gpuCache", *args, **kwargs)
def granite(self, *args, **kwargs):
return self(u"granite", *args, **kwargs)
def gravityField(self, *args, **kwargs):
return self(u"gravityField", *args, **kwargs)
def greasePlane(self, *args, **kwargs):
return self(u"greasePlane", *args, **kwargs)
def grid(self, *args, **kwargs):
return self(u"grid", *args, **kwargs)
def group(self, *args, **kwargs):
return self(u"group", *args, **kwargs)
def groupId(self, *args, **kwargs):
return self(u"groupId", *args, **kwargs)
def groupParts(self, *args, **kwargs):
return self(u"groupParts", *args, **kwargs)
def guide(self, *args, **kwargs):
return self(u"guide", *args, **kwargs)
def hairConstraint(self, *args, **kwargs):
return self(u"hairConstraint", *args, **kwargs)
def hairPhysicalShader(self, *args, **kwargs):
return self(u"hairPhysicalShader", *args, **kwargs)
def hairSystem(self, *args, **kwargs):
return self(u"hairSystem", *args, **kwargs)
def hairTubeShader(self, *args, **kwargs):
return self(u"hairTubeShader", *args, **kwargs)
def hardenPoint(self, *args, **kwargs):
return self(u"hardenPoint", *args, **kwargs)
def heightField(self, *args, **kwargs):
return self(u"heightField", *args, **kwargs)
def hierarchyTestNode1(self, *args, **kwargs):
return self(u"hierarchyTestNode1", *args, **kwargs)
def hierarchyTestNode2(self, *args, **kwargs):
return self(u"hierarchyTestNode2", *args, **kwargs)
def hierarchyTestNode3(self, *args, **kwargs):
return self(u"hierarchyTestNode3", *args, **kwargs)
def hikEffector(self, *args, **kwargs):
return self(u"hikEffector", *args, **kwargs)
def hikFKJoint(self, *args, **kwargs):
return self(u"hikFKJoint", *args, **kwargs)
def hikGroundPlane(self, *args, **kwargs):
return self(u"hikGroundPlane", *args, **kwargs)
def hikHandle(self, *args, **kwargs):
return self(u"hikHandle", *args, **kwargs)
def hikIKEffector(self, *args, **kwargs):
return self(u"hikIKEffector", *args, **kwargs)
def hikSolver(self, *args, **kwargs):
return self(u"hikSolver", *args, **kwargs)
def historySwitch(self, *args, **kwargs):
return self(u"historySwitch", *args, **kwargs)
def holdMatrix(self, *args, **kwargs):
return self(u"holdMatrix", *args, **kwargs)
def hsvToRgb(self, *args, **kwargs):
return self(u"hsvToRgb", *args, **kwargs)
def hwReflectionMap(self, *args, **kwargs):
return self(u"hwReflectionMap", *args, **kwargs)
def hwRenderGlobals(self, *args, **kwargs):
return self(u"hwRenderGlobals", *args, **kwargs)
def hyperGraphInfo(self, *args, **kwargs):
return self(u"hyperGraphInfo", *args, **kwargs)
def hyperLayout(self, *args, **kwargs):
return self(u"hyperLayout", *args, **kwargs)
def hyperView(self, *args, **kwargs):
return self(u"hyperView", *args, **kwargs)
def igBrushManip(self, *args, **kwargs):
return self(u"igBrushManip", *args, **kwargs)
def igmDescription(self, *args, **kwargs):
return self(u"igmDescription", *args, **kwargs)
def ik2Bsolver(self, *args, **kwargs):
return self(u"ik2Bsolver", *args, **kwargs)
def ikEffector(self, *args, **kwargs):
return self(u"ikEffector", *args, **kwargs)
def ikHandle(self, *args, **kwargs):
return self(u"ikHandle", *args, **kwargs)
def ikMCsolver(self, *args, **kwargs):
return self(u"ikMCsolver", *args, **kwargs)
def ikPASolver(self, *args, **kwargs):
return self(u"ikPASolver", *args, **kwargs)
def ikRPManip(self, *args, **kwargs):
return self(u"ikRPManip", *args, **kwargs)
def ikRPsolver(self, *args, **kwargs):
return self(u"ikRPsolver", *args, **kwargs)
def ikSCsolver(self, *args, **kwargs):
return self(u"ikSCsolver", *args, **kwargs)
def ikSplineManip(self, *args, **kwargs):
return self(u"ikSplineManip", *args, **kwargs)
def ikSplineSolver(self, *args, **kwargs):
return self(u"ikSplineSolver", *args, **kwargs)
def ikSpringSolver(self, *args, **kwargs):
return self(u"ikSpringSolver", *args, **kwargs)
def ikSystem(self, *args, **kwargs):
return self(u"ikSystem", *args, **kwargs)
def imagePlane(self, *args, **kwargs):
return self(u"imagePlane", *args, **kwargs)
def implicitBox(self, *args, **kwargs):
return self(u"implicitBox", *args, **kwargs)
def implicitCone(self, *args, **kwargs):
return self(u"implicitCone", *args, **kwargs)
def implicitSphere(self, *args, **kwargs):
return self(u"implicitSphere", *args, **kwargs)
def indexManip(self, *args, **kwargs):
return self(u"indexManip", *args, **kwargs)
def insertKnotCurve(self, *args, **kwargs):
return self(u"insertKnotCurve", *args, **kwargs)
def insertKnotSurface(self, *args, **kwargs):
return self(u"insertKnotSurface", *args, **kwargs)
def instancer(self, *args, **kwargs):
return self(u"instancer", *args, **kwargs)
def intersectSurface(self, *args, **kwargs):
return self(u"intersectSurface", *args, **kwargs)
def inverseMatrix(self, *args, **kwargs):
return self(u"inverseMatrix", *args, **kwargs)
def isoparmManip(self, *args, **kwargs):
return self(u"isoparmManip", *args, **kwargs)
def jiggle(self, *args, **kwargs):
return self(u"jiggle", *args, **kwargs)
def joint(self, *args, **kwargs):
return self(u"joint", *args, **kwargs)
def jointCluster(self, *args, **kwargs):
return self(u"jointCluster", *args, **kwargs)
def jointClusterManip(self, *args, **kwargs):
return self(u"jointClusterManip", *args, **kwargs)
def jointFfd(self, *args, **kwargs):
return self(u"jointFfd", *args, **kwargs)
def jointLattice(self, *args, **kwargs):
return self(u"jointLattice", *args, **kwargs)
def jointTranslateManip(self, *args, **kwargs):
return self(u"jointTranslateManip", *args, **kwargs)
def keyframeRegionManip(self, *args, **kwargs):
return self(u"keyframeRegionManip", *args, **kwargs)
def keyingGroup(self, *args, **kwargs):
return self(u"keyingGroup", *args, **kwargs)
def lambert(self, *args, **kwargs):
return self(u"lambert", *args, **kwargs)
def lattice(self, *args, **kwargs):
return self(u"lattice", *args, **kwargs)
def layeredShader(self, *args, **kwargs):
return self(u"layeredShader", *args, **kwargs)
def layeredTexture(self, *args, **kwargs):
return self(u"layeredTexture", *args, **kwargs)
def leather(self, *args, **kwargs):
return self(u"leather", *args, **kwargs)
def lightEditor(self, *args, **kwargs):
return self(u"lightEditor", *args, **kwargs)
def lightFog(self, *args, **kwargs):
return self(u"lightFog", *args, **kwargs)
def lightGroup(self, *args, **kwargs):
return self(u"lightGroup", *args, **kwargs)
def lightInfo(self, *args, **kwargs):
return self(u"lightInfo", *args, **kwargs)
def lightItem(self, *args, **kwargs):
return self(u"lightItem", *args, **kwargs)
def lightItemBase(self, *args, **kwargs):
return self(u"lightItemBase", *args, **kwargs)
def lightLinker(self, *args, **kwargs):
return self(u"lightLinker", *args, **kwargs)
def lightList(self, *args, **kwargs):
return self(u"lightList", *args, **kwargs)
def lightManip(self, *args, **kwargs):
return self(u"lightManip", *args, **kwargs)
def lightsCollection(self, *args, **kwargs):
return self(u"lightsCollection", *args, **kwargs)
def limitManip(self, *args, **kwargs):
return self(u"limitManip", *args, **kwargs)
def lineManip(self, *args, **kwargs):
return self(u"lineManip", *args, **kwargs)
def lineModifier(self, *args, **kwargs):
return self(u"lineModifier", *args, **kwargs)
def listItem(self, *args, **kwargs):
return self(u"listItem", *args, **kwargs)
def locator(self, *args, **kwargs):
return self(u"locator", *args, **kwargs)
def lodGroup(self, *args, **kwargs):
return self(u"lodGroup", *args, **kwargs)
def lodThresholds(self, *args, **kwargs):
return self(u"lodThresholds", *args, **kwargs)
def loft(self, *args, **kwargs):
return self(u"loft", *args, **kwargs)
def lookAt(self, *args, **kwargs):
return self(u"lookAt", *args, **kwargs)
def luminance(self, *args, **kwargs):
return self(u"luminance", *args, **kwargs)
def makeGroup(self, *args, **kwargs):
return self(u"makeGroup", *args, **kwargs)
def makeNurbCircle(self, *args, **kwargs):
return self(u"makeNurbCircle", *args, **kwargs)
def makeNurbCone(self, *args, **kwargs):
return self(u"makeNurbCone", *args, **kwargs)
def makeNurbCube(self, *args, **kwargs):
return self(u"makeNurbCube", *args, **kwargs)
def makeNurbCylinder(self, *args, **kwargs):
return self(u"makeNurbCylinder", *args, **kwargs)
def makeNurbPlane(self, *args, **kwargs):
return self(u"makeNurbPlane", *args, **kwargs)
def makeNurbSphere(self, *args, **kwargs):
return self(u"makeNurbSphere", *args, **kwargs)
def makeNurbTorus(self, *args, **kwargs):
return self(u"makeNurbTorus", *args, **kwargs)
def makeNurbsSquare(self, *args, **kwargs):
return self(u"makeNurbsSquare", *args, **kwargs)
def makeTextCurves(self, *args, **kwargs):
return self(u"makeTextCurves", *args, **kwargs)
def mandelbrot(self, *args, **kwargs):
return self(u"mandelbrot", *args, **kwargs)
def mandelbrot3D(self, *args, **kwargs):
return self(u"mandelbrot3D", *args, **kwargs)
def manip2DContainer(self, *args, **kwargs):
return self(u"manip2DContainer", *args, **kwargs)
def manipContainer(self, *args, **kwargs):
return self(u"manipContainer", *args, **kwargs)
def marble(self, *args, **kwargs):
return self(u"marble", *args, **kwargs)
def markerManip(self, *args, **kwargs):
return self(u"markerManip", *args, **kwargs)
def materialFacade(self, *args, **kwargs):
return self(u"materialFacade", *args, **kwargs)
def materialInfo(self, *args, **kwargs):
return self(u"materialInfo", *args, **kwargs)
def materialOverride(self, *args, **kwargs):
return self(u"materialOverride", *args, **kwargs)
def membrane(self, *args, **kwargs):
return self(u"membrane", *args, **kwargs)
def mesh(self, *args, **kwargs):
return self(u"mesh", *args, **kwargs)
def meshVarGroup(self, *args, **kwargs):
return self(u"meshVarGroup", *args, **kwargs)
def motionPath(self, *args, **kwargs):
return self(u"motionPath", *args, **kwargs)
def motionPathManip(self, *args, **kwargs):
return self(u"motionPathManip", *args, **kwargs)
def motionTrail(self, *args, **kwargs):
return self(u"motionTrail", *args, **kwargs)
def motionTrailShape(self, *args, **kwargs):
return self(u"motionTrailShape", *args, **kwargs)
def mountain(self, *args, **kwargs):
return self(u"mountain", *args, **kwargs)
def moveVertexManip(self, *args, **kwargs):
return self(u"moveVertexManip", *args, **kwargs)
def movie(self, *args, **kwargs):
return self(u"movie", *args, **kwargs)
def mpBirailSrf(self, *args, **kwargs):
return self(u"mpBirailSrf", *args, **kwargs)
def multDoubleLinear(self, *args, **kwargs):
return self(u"multDoubleLinear", *args, **kwargs)
def multMatrix(self, *args, **kwargs):
return self(u"multMatrix", *args, **kwargs)
def multilisterLight(self, *args, **kwargs):
return self(u"multilisterLight", *args, **kwargs)
def multiplyDivide(self, *args, **kwargs):
return self(u"multiplyDivide", *args, **kwargs)
def mute(self, *args, **kwargs):
return self(u"mute", *args, **kwargs)
def nCloth(self, *args, **kwargs):
return self(u"nCloth", *args, **kwargs)
def nComponent(self, *args, **kwargs):
return self(u"nComponent", *args, **kwargs)
def nParticle(self, *args, **kwargs):
return self(u"nParticle", *args, **kwargs)
def nRigid(self, *args, **kwargs):
return self(u"nRigid", *args, **kwargs)
def nearestPointOnCurve(self, *args, **kwargs):
return self(u"nearestPointOnCurve", *args, **kwargs)
def network(self, *args, **kwargs):
return self(u"network", *args, **kwargs)
def newtonField(self, *args, **kwargs):
return self(u"newtonField", *args, **kwargs)
def newtonManip(self, *args, **kwargs):
return self(u"newtonManip", *args, **kwargs)
def nexManip(self, *args, **kwargs):
return self(u"nexManip", *args, **kwargs)
def nodeGraphEditorInfo(self, *args, **kwargs):
return self(u"nodeGraphEditorInfo", *args, **kwargs)
def noise(self, *args, **kwargs):
return self(u"noise", *args, **kwargs)
def nonLinear(self, *args, **kwargs):
return self(u"nonLinear", *args, **kwargs)
def normalConstraint(self, *args, **kwargs):
return self(u"normalConstraint", *args, **kwargs)
def nucleus(self, *args, **kwargs):
return self(u"nucleus", *args, **kwargs)
def nurbsCurve(self, *args, **kwargs):
return self(u"nurbsCurve", *args, **kwargs)
def nurbsCurveToBezier(self, *args, **kwargs):
return self(u"nurbsCurveToBezier", *args, **kwargs)
def nurbsSurface(self, *args, **kwargs):
return self(u"nurbsSurface", *args, **kwargs)
def nurbsTessellate(self, *args, **kwargs):
return self(u"nurbsTessellate", *args, **kwargs)
def nurbsToSubdiv(self, *args, **kwargs):
return self(u"nurbsToSubdiv", *args, **kwargs)
def nurbsToSubdivProc(self, *args, **kwargs):
return self(u"nurbsToSubdivProc", *args, **kwargs)
def objectAttrFilter(self, *args, **kwargs):
return self(u"objectAttrFilter", *args, **kwargs)
def objectBinFilter(self, *args, **kwargs):
return self(u"objectBinFilter", *args, **kwargs)
def objectFilter(self, *args, **kwargs):
return self(u"objectFilter", *args, **kwargs)
def objectGrpToComp(self, *args, **kwargs):
return self(u"objectGrpToComp", *args, **kwargs)
def objectMultiFilter(self, *args, **kwargs):
return self(u"objectMultiFilter", *args, **kwargs)
def objectNameFilter(self, *args, **kwargs):
return self(u"objectNameFilter", *args, **kwargs)
def objectRenderFilter(self, *args, **kwargs):
return self(u"objectRenderFilter", *args, **kwargs)
def objectScriptFilter(self, *args, **kwargs):
return self(u"objectScriptFilter", *args, **kwargs)
def objectSet(self, *args, **kwargs):
return self(u"objectSet", *args, **kwargs)
def objectTypeFilter(self, *args, **kwargs):
return self(u"objectTypeFilter", *args, **kwargs)
def ocean(self, *args, **kwargs):
return self(u"ocean", *args, **kwargs)
def oceanShader(self, *args, **kwargs):
return self(u"oceanShader", *args, **kwargs)
def offsetCos(self, *args, **kwargs):
return self(u"offsetCos", *args, **kwargs)
def offsetCosManip(self, *args, **kwargs):
return self(u"offsetCosManip", *args, **kwargs)
def offsetCurve(self, *args, **kwargs):
return self(u"offsetCurve", *args, **kwargs)
def offsetCurveManip(self, *args, **kwargs):
return self(u"offsetCurveManip", *args, **kwargs)
def offsetDeformer(self, *args, **kwargs):
return self(u"offsetDeformer", *args, **kwargs)
def offsetSurface(self, *args, **kwargs):
return self(u"offsetSurface", *args, **kwargs)
def offsetSurfaceManip(self, *args, **kwargs):
return self(u"offsetSurfaceManip", *args, **kwargs)
def oldBlindDataBase(self, *args, **kwargs):
return self(u"oldBlindDataBase", *args, **kwargs)
def oldNormalConstraint(self, *args, **kwargs):
return self(u"oldNormalConstraint", *args, **kwargs)
def opticalFX(self, *args, **kwargs):
return self(u"opticalFX", *args, **kwargs)
def orientConstraint(self, *args, **kwargs):
return self(u"orientConstraint", *args, **kwargs)
def orientationMarker(self, *args, **kwargs):
return self(u"orientationMarker", *args, **kwargs)
def override(self, *args, **kwargs):
return self(u"override", *args, **kwargs)
def pairBlend(self, *args, **kwargs):
return self(u"pairBlend", *args, **kwargs)
def paramDimension(self, *args, **kwargs):
return self(u"paramDimension", *args, **kwargs)
def parentConstraint(self, *args, **kwargs):
return self(u"parentConstraint", *args, **kwargs)
def particle(self, *args, **kwargs):
return self(u"particle", *args, **kwargs)
def particleAgeMapper(self, *args, **kwargs):
return self(u"particleAgeMapper", *args, **kwargs)
def particleCloud(self, *args, **kwargs):
return self(u"particleCloud", *args, **kwargs)
def particleColorMapper(self, *args, **kwargs):
return self(u"particleColorMapper", *args, **kwargs)
def particleSamplerInfo(self, *args, **kwargs):
return self(u"particleSamplerInfo", *args, **kwargs)
def partition(self, *args, **kwargs):
return self(u"partition", *args, **kwargs)
def passContributionMap(self, *args, **kwargs):
return self(u"passContributionMap", *args, **kwargs)
def passMatrix(self, *args, **kwargs):
return self(u"passMatrix", *args, **kwargs)
def pfxHair(self, *args, **kwargs):
return self(u"pfxHair", *args, **kwargs)
def pfxToon(self, *args, **kwargs):
return self(u"pfxToon", *args, **kwargs)
def phong(self, *args, **kwargs):
return self(u"phong", *args, **kwargs)
def phongE(self, *args, **kwargs):
return self(u"phongE", *args, **kwargs)
def pickMatrix(self, *args, **kwargs):
return self(u"pickMatrix", *args, **kwargs)
def pivot2dManip(self, *args, **kwargs):
return self(u"pivot2dManip", *args, **kwargs)
def pivotAndOrientManip(self, *args, **kwargs):
return self(u"pivotAndOrientManip", *args, **kwargs)
def place2dTexture(self, *args, **kwargs):
return self(u"place2dTexture", *args, **kwargs)
def place3dTexture(self, *args, **kwargs):
return self(u"place3dTexture", *args, **kwargs)
def placerTool(self, *args, **kwargs):
return self(u"placerTool", *args, **kwargs)
def planarProjManip(self, *args, **kwargs):
return self(u"planarProjManip", *args, **kwargs)
def planarTrimSurface(self, *args, **kwargs):
return self(u"planarTrimSurface", *args, **kwargs)
def plusMinusAverage(self, *args, **kwargs):
return self(u"plusMinusAverage", *args, **kwargs)
def pointConstraint(self, *args, **kwargs):
return self(u"pointConstraint", *args, **kwargs)
def pointEmitter(self, *args, **kwargs):
return self(u"pointEmitter", *args, **kwargs)
def pointLight(self, *args, **kwargs):
return self(u"pointLight", *args, **kwargs)
def pointMatrixMult(self, *args, **kwargs):
return self(u"pointMatrixMult", *args, **kwargs)
def pointOnCurveInfo(self, *args, **kwargs):
return self(u"pointOnCurveInfo", *args, **kwargs)
def pointOnCurveManip(self, *args, **kwargs):
return self(u"pointOnCurveManip", *args, **kwargs)
def pointOnLineManip(self, *args, **kwargs):
return self(u"pointOnLineManip", *args, **kwargs)
def pointOnSurfManip(self, *args, **kwargs):
return self(u"pointOnSurfManip", *args, **kwargs)
def pointOnSurfaceInfo(self, *args, **kwargs):
return self(u"pointOnSurfaceInfo", *args, **kwargs)
def pointOnSurfaceManip(self, *args, **kwargs):
return self(u"pointOnSurfaceManip", *args, **kwargs)
def polyAppend(self, *args, **kwargs):
return self(u"polyAppend", *args, **kwargs)
def polyAppendVertex(self, *args, **kwargs):
return self(u"polyAppendVertex", *args, **kwargs)
def polyAutoProj(self, *args, **kwargs):
return self(u"polyAutoProj", *args, **kwargs)
def polyAutoProjManip(self, *args, **kwargs):
return self(u"polyAutoProjManip", *args, **kwargs)
def polyAverageVertex(self, *args, **kwargs):
return self(u"polyAverageVertex", *args, **kwargs)
def polyBevel(self, *args, **kwargs):
return self(u"polyBevel", *args, **kwargs)
def polyBevel2(self, *args, **kwargs):
return self(u"polyBevel2", *args, **kwargs)
def polyBevel3(self, *args, **kwargs):
return self(u"polyBevel3", *args, **kwargs)
def polyBlindData(self, *args, **kwargs):
return self(u"polyBlindData", *args, **kwargs)
def polyBoolOp(self, *args, **kwargs):
return self(u"polyBoolOp", *args, **kwargs)
def polyBridgeEdge(self, *args, **kwargs):
return self(u"polyBridgeEdge", *args, **kwargs)
def polyCBoolOp(self, *args, **kwargs):
return self(u"polyCBoolOp", *args, **kwargs)
def polyCaddyManip(self, *args, **kwargs):
return self(u"polyCaddyManip", *args, **kwargs)
def polyChipOff(self, *args, **kwargs):
return self(u"polyChipOff", *args, **kwargs)
def polyCircularize(self, *args, **kwargs):
return self(u"polyCircularize", *args, **kwargs)
def polyClean(self, *args, **kwargs):
return self(u"polyClean", *args, **kwargs)
def polyCloseBorder(self, *args, **kwargs):
return self(u"polyCloseBorder", *args, **kwargs)
def polyCollapseEdge(self, *args, **kwargs):
return self(u"polyCollapseEdge", *args, **kwargs)
def polyCollapseF(self, *args, **kwargs):
return self(u"polyCollapseF", *args, **kwargs)
def polyColorDel(self, *args, **kwargs):
return self(u"polyColorDel", *args, **kwargs)
def polyColorMod(self, *args, **kwargs):
return self(u"polyColorMod", *args, **kwargs)
def polyColorPerVertex(self, *args, **kwargs):
return self(u"polyColorPerVertex", *args, **kwargs)
def polyCone(self, *args, **kwargs):
return self(u"polyCone", *args, **kwargs)
def polyContourProj(self, *args, **kwargs):
return self(u"polyContourProj", *args, **kwargs)
def polyCopyUV(self, *args, **kwargs):
return self(u"polyCopyUV", *args, **kwargs)
def polyCrease(self, *args, **kwargs):
return self(u"polyCrease", *args, **kwargs)
def polyCreaseEdge(self, *args, **kwargs):
return self(u"polyCreaseEdge", *args, **kwargs)
def polyCreateFace(self, *args, **kwargs):
return self(u"polyCreateFace", *args, **kwargs)
def polyCreateToolManip(self, *args, **kwargs):
return self(u"polyCreateToolManip", *args, **kwargs)
def polyCube(self, *args, **kwargs):
return self(u"polyCube", *args, **kwargs)
def polyCut(self, *args, **kwargs):
return self(u"polyCut", *args, **kwargs)
def polyCutManip(self, *args, **kwargs):
return self(u"polyCutManip", *args, **kwargs)
def polyCylProj(self, *args, **kwargs):
return self(u"polyCylProj", *args, **kwargs)
def polyCylinder(self, *args, **kwargs):
return self(u"polyCylinder", *args, **kwargs)
def polyDelEdge(self, *args, **kwargs):
return self(u"polyDelEdge", *args, **kwargs)
def polyDelFacet(self, *args, **kwargs):
return self(u"polyDelFacet", *args, **kwargs)
def polyDelVertex(self, *args, **kwargs):
return self(u"polyDelVertex", *args, **kwargs)
def polyDisc(self, *args, **kwargs):
return self(u"polyDisc", *args, **kwargs)
def polyDuplicateEdge(self, *args, **kwargs):
return self(u"polyDuplicateEdge", *args, **kwargs)
def polyEdgeToCurve(self, *args, **kwargs):
return self(u"polyEdgeToCurve", *args, **kwargs)
def polyEditEdgeFlow(self, *args, **kwargs):
return self(u"polyEditEdgeFlow", *args, **kwargs)
def polyExtrudeEdge(self, *args, **kwargs):
return self(u"polyExtrudeEdge", *args, **kwargs)
def polyExtrudeFace(self, *args, **kwargs):
return self(u"polyExtrudeFace", *args, **kwargs)
def polyExtrudeVertex(self, *args, **kwargs):
return self(u"polyExtrudeVertex", *args, **kwargs)
def polyFlipEdge(self, *args, **kwargs):
return self(u"polyFlipEdge", *args, **kwargs)
def polyFlipUV(self, *args, **kwargs):
return self(u"polyFlipUV", *args, **kwargs)
def polyGear(self, *args, **kwargs):
return self(u"polyGear", *args, **kwargs)
def polyHelix(self, *args, **kwargs):
return self(u"polyHelix", *args, **kwargs)
def polyHoleFace(self, *args, **kwargs):
return self(u"polyHoleFace", *args, **kwargs)
def polyLayoutUV(self, *args, **kwargs):
return self(u"polyLayoutUV", *args, **kwargs)
def polyMapCut(self, *args, **kwargs):
return self(u"polyMapCut", *args, **kwargs)
def polyMapDel(self, *args, **kwargs):
return self(u"polyMapDel", *args, **kwargs)
def polyMapSew(self, *args, **kwargs):
return self(u"polyMapSew", *args, **kwargs)
def polyMapSewMove(self, *args, **kwargs):
return self(u"polyMapSewMove", *args, **kwargs)
def polyMappingManip(self, *args, **kwargs):
return self(u"polyMappingManip", *args, **kwargs)
def polyMergeEdge(self, *args, **kwargs):
return self(u"polyMergeEdge", *args, **kwargs)
def polyMergeFace(self, *args, **kwargs):
return self(u"polyMergeFace", *args, **kwargs)
def polyMergeUV(self, *args, **kwargs):
return self(u"polyMergeUV", *args, **kwargs)
def polyMergeVert(self, *args, **kwargs):
return self(u"polyMergeVert", *args, **kwargs)
def polyMergeVertsManip(self, *args, **kwargs):
return self(u"polyMergeVertsManip", *args, **kwargs)
def polyMirror(self, *args, **kwargs):
return self(u"polyMirror", *args, **kwargs)
def polyModifierManip(self, *args, **kwargs):
return self(u"polyModifierManip", *args, **kwargs)
def polyMoveEdge(self, *args, **kwargs):
return self(u"polyMoveEdge", *args, **kwargs)
def polyMoveFace(self, *args, **kwargs):
return self(u"polyMoveFace", *args, **kwargs)
def polyMoveFacetUV(self, *args, **kwargs):
return self(u"polyMoveFacetUV", *args, **kwargs)
def polyMoveUV(self, *args, **kwargs):
return self(u"polyMoveUV", *args, **kwargs)
def polyMoveUVManip(self, *args, **kwargs):
return self(u"polyMoveUVManip", *args, **kwargs)
def polyMoveVertex(self, *args, **kwargs):
return self(u"polyMoveVertex", *args, **kwargs)
def polyMoveVertexManip(self, *args, **kwargs):
return self(u"polyMoveVertexManip", *args, **kwargs)
def polyNormal(self, *args, **kwargs):
return self(u"polyNormal", *args, **kwargs)
def polyNormalPerVertex(self, *args, **kwargs):
return self(u"polyNormalPerVertex", *args, **kwargs)
def polyNormalizeUV(self, *args, **kwargs):
return self(u"polyNormalizeUV", *args, **kwargs)
def polyOptUvs(self, *args, **kwargs):
return self(u"polyOptUvs", *args, **kwargs)
def polyPassThru(self, *args, **kwargs):
return self(u"polyPassThru", *args, **kwargs)
def polyPinUV(self, *args, **kwargs):
return self(u"polyPinUV", *args, **kwargs)
def polyPipe(self, *args, **kwargs):
return self(u"polyPipe", *args, **kwargs)
def polyPlanarProj(self, *args, **kwargs):
return self(u"polyPlanarProj", *args, **kwargs)
def polyPlane(self, *args, **kwargs):
return self(u"polyPlane", *args, **kwargs)
def polyPlatonic(self, *args, **kwargs):
return self(u"polyPlatonic", *args, **kwargs)
def polyPlatonicSolid(self, *args, **kwargs):
return self(u"polyPlatonicSolid", *args, **kwargs)
def polyPoke(self, *args, **kwargs):
return self(u"polyPoke", *args, **kwargs)
def polyPokeManip(self, *args, **kwargs):
return self(u"polyPokeManip", *args, **kwargs)
def polyPrimitiveMisc(self, *args, **kwargs):
return self(u"polyPrimitiveMisc", *args, **kwargs)
def polyPrism(self, *args, **kwargs):
return self(u"polyPrism", *args, **kwargs)
def polyProj(self, *args, **kwargs):
return self(u"polyProj", *args, **kwargs)
def polyProjManip(self, *args, **kwargs):
return self(u"polyProjManip", *args, **kwargs)
def polyProjectCurve(self, *args, **kwargs):
return self(u"polyProjectCurve", *args, **kwargs)
def polyPyramid(self, *args, **kwargs):
return self(u"polyPyramid", *args, **kwargs)
def polyQuad(self, *args, **kwargs):
return self(u"polyQuad", *args, **kwargs)
def polyReduce(self, *args, **kwargs):
return self(u"polyReduce", *args, **kwargs)
def polyRemesh(self, *args, **kwargs):
return self(u"polyRemesh", *args, **kwargs)
def polyRetopo(self, *args, **kwargs):
return self(u"polyRetopo", *args, **kwargs)
def polySeparate(self, *args, **kwargs):
return self(u"polySeparate", *args, **kwargs)
def polySewEdge(self, *args, **kwargs):
return self(u"polySewEdge", *args, **kwargs)
def polySmooth(self, *args, **kwargs):
return self(u"polySmooth", *args, **kwargs)
def polySmoothFace(self, *args, **kwargs):
return self(u"polySmoothFace", *args, **kwargs)
def polySmoothProxy(self, *args, **kwargs):
return self(u"polySmoothProxy", *args, **kwargs)
def polySoftEdge(self, *args, **kwargs):
return self(u"polySoftEdge", *args, **kwargs)
def polySphProj(self, *args, **kwargs):
return self(u"polySphProj", *args, **kwargs)
def polySphere(self, *args, **kwargs):
return self(u"polySphere", *args, **kwargs)
def polySpinEdge(self, *args, **kwargs):
return self(u"polySpinEdge", *args, **kwargs)
def polySplit(self, *args, **kwargs):
return self(u"polySplit", *args, **kwargs)
def polySplitEdge(self, *args, **kwargs):
return self(u"polySplitEdge", *args, **kwargs)
def polySplitRing(self, *args, **kwargs):
return self(u"polySplitRing", *args, **kwargs)
def polySplitToolManip1(self, *args, **kwargs):
return self(u"polySplitToolManip1", *args, **kwargs)
def polySplitVert(self, *args, **kwargs):
return self(u"polySplitVert", *args, **kwargs)
def polySubdEdge(self, *args, **kwargs):
return self(u"polySubdEdge", *args, **kwargs)
def polySubdFace(self, *args, **kwargs):
return self(u"polySubdFace", *args, **kwargs)
def polySuperShape(self, *args, **kwargs):
return self(u"polySuperShape", *args, **kwargs)
def polyToSubdiv(self, *args, **kwargs):
return self(u"polyToSubdiv", *args, **kwargs)
def polyTorus(self, *args, **kwargs):
return self(u"polyTorus", *args, **kwargs)
def polyTransfer(self, *args, **kwargs):
return self(u"polyTransfer", *args, **kwargs)
def polyTriangulate(self, *args, **kwargs):
return self(u"polyTriangulate", *args, **kwargs)
def polyTweak(self, *args, **kwargs):
return self(u"polyTweak", *args, **kwargs)
def polyTweakUV(self, *args, **kwargs):
return self(u"polyTweakUV", *args, **kwargs)
def polyUVRectangle(self, *args, **kwargs):
return self(u"polyUVRectangle", *args, **kwargs)
def polyUnite(self, *args, **kwargs):
return self(u"polyUnite", *args, **kwargs)
def polyWedgeFace(self, *args, **kwargs):
return self(u"polyWedgeFace", *args, **kwargs)
def poseInterpolator(self, *args, **kwargs):
return self(u"poseInterpolator", *args, **kwargs)
def positionMarker(self, *args, **kwargs):
return self(u"positionMarker", *args, **kwargs)
def postProcessList(self, *args, **kwargs):
return self(u"postProcessList", *args, **kwargs)
def precompExport(self, *args, **kwargs):
return self(u"precompExport", *args, **kwargs)
def premultiply(self, *args, **kwargs):
return self(u"premultiply", *args, **kwargs)
def projectCurve(self, *args, **kwargs):
return self(u"projectCurve", *args, **kwargs)
def projectTangent(self, *args, **kwargs):
return self(u"projectTangent", *args, **kwargs)
def projectTangentManip(self, *args, **kwargs):
return self(u"projectTangentManip", *args, **kwargs)
def projection(self, *args, **kwargs):
return self(u"projection", *args, **kwargs)
def projectionManip(self, *args, **kwargs):
return self(u"projectionManip", *args, **kwargs)
def projectionUVManip(self, *args, **kwargs):
return self(u"projectionUVManip", *args, **kwargs)
def propModManip(self, *args, **kwargs):
return self(u"propModManip", *args, **kwargs)
def propMoveTriadManip(self, *args, **kwargs):
return self(u"propMoveTriadManip", *args, **kwargs)
def proximityPin(self, *args, **kwargs):
return self(u"proximityPin", *args, **kwargs)
def proximityWrap(self, *args, **kwargs):
return self(u"proximityWrap", *args, **kwargs)
def proxyManager(self, *args, **kwargs):
return self(u"proxyManager", *args, **kwargs)
def psdFileTex(self, *args, **kwargs):
return self(u"psdFileTex", *args, **kwargs)
def quadPtOnLineManip(self, *args, **kwargs):
return self(u"quadPtOnLineManip", *args, **kwargs)
def quadShadingSwitch(self, *args, **kwargs):
return self(u"quadShadingSwitch", *args, **kwargs)
def quatAdd(self, *args, **kwargs):
return self(u"quatAdd", *args, **kwargs)
def quatConjugate(self, *args, **kwargs):
return self(u"quatConjugate", *args, **kwargs)
def quatInvert(self, *args, **kwargs):
return self(u"quatInvert", *args, **kwargs)
def quatNegate(self, *args, **kwargs):
return self(u"quatNegate", *args, **kwargs)
def quatNormalize(self, *args, **kwargs):
return self(u"quatNormalize", *args, **kwargs)
def quatProd(self, *args, **kwargs):
return self(u"quatProd", *args, **kwargs)
def quatSlerp(self, *args, **kwargs):
return self(u"quatSlerp", *args, **kwargs)
def quatSub(self, *args, **kwargs):
return self(u"quatSub", *args, **kwargs)
def quatToAxisAngle(self, *args, **kwargs):
return self(u"quatToAxisAngle", *args, **kwargs)
def quatToEuler(self, *args, **kwargs):
return self(u"quatToEuler", *args, **kwargs)
def radialField(self, *args, **kwargs):
return self(u"radialField", *args, **kwargs)
def ramp(self, *args, **kwargs):
return self(u"ramp", *args, **kwargs)
def rampShader(self, *args, **kwargs):
return self(u"rampShader", *args, **kwargs)
def rbfSrf(self, *args, **kwargs):
return self(u"rbfSrf", *args, **kwargs)
def rbfSrfManip(self, *args, **kwargs):
return self(u"rbfSrfManip", *args, **kwargs)
def rebuildCurve(self, *args, **kwargs):
return self(u"rebuildCurve", *args, **kwargs)
def rebuildSurface(self, *args, **kwargs):
return self(u"rebuildSurface", *args, **kwargs)
def record(self, *args, **kwargs):
return self(u"record", *args, **kwargs)
def reference(self, *args, **kwargs):
return self(u"reference", *args, **kwargs)
def relOverride(self, *args, **kwargs):
return self(u"relOverride", *args, **kwargs)
def relUniqueOverride(self, *args, **kwargs):
return self(u"relUniqueOverride", *args, **kwargs)
def remapColor(self, *args, **kwargs):
return self(u"remapColor", *args, **kwargs)
def remapHsv(self, *args, **kwargs):
return self(u"remapHsv", *args, **kwargs)
def remapValue(self, *args, **kwargs):
return self(u"remapValue", *args, **kwargs)
def renderBox(self, *args, **kwargs):
return self(u"renderBox", *args, **kwargs)
def renderCone(self, *args, **kwargs):
return self(u"renderCone", *args, **kwargs)
def renderGlobals(self, *args, **kwargs):
return self(u"renderGlobals", *args, **kwargs)
def renderGlobalsList(self, *args, **kwargs):
return self(u"renderGlobalsList", *args, **kwargs)
def renderLayer(self, *args, **kwargs):
return self(u"renderLayer", *args, **kwargs)
def renderLayerManager(self, *args, **kwargs):
return self(u"renderLayerManager", *args, **kwargs)
def renderPass(self, *args, **kwargs):
return self(u"renderPass", *args, **kwargs)
def renderPassSet(self, *args, **kwargs):
return self(u"renderPassSet", *args, **kwargs)
def renderQuality(self, *args, **kwargs):
return self(u"renderQuality", *args, **kwargs)
def renderRect(self, *args, **kwargs):
return self(u"renderRect", *args, **kwargs)
def renderSetup(self, *args, **kwargs):
return self(u"renderSetup", *args, **kwargs)
def renderSetupLayer(self, *args, **kwargs):
return self(u"renderSetupLayer", *args, **kwargs)
def renderSphere(self, *args, **kwargs):
return self(u"renderSphere", *args, **kwargs)
def renderTarget(self, *args, **kwargs):
return self(u"renderTarget", *args, **kwargs)
def renderedImageSource(self, *args, **kwargs):
return self(u"renderedImageSource", *args, **kwargs)
def reorderUVSet(self, *args, **kwargs):
return self(u"reorderUVSet", *args, **kwargs)
def resolution(self, *args, **kwargs):
return self(u"resolution", *args, **kwargs)
def reverse(self, *args, **kwargs):
return self(u"reverse", *args, **kwargs)
def reverseCurve(self, *args, **kwargs):
return self(u"reverseCurve", *args, **kwargs)
def reverseCurveManip(self, *args, **kwargs):
return self(u"reverseCurveManip", *args, **kwargs)
def reverseSurface(self, *args, **kwargs):
return self(u"reverseSurface", *args, **kwargs)
def reverseSurfaceManip(self, *args, **kwargs):
return self(u"reverseSurfaceManip", *args, **kwargs)
def revolve(self, *args, **kwargs):
return self(u"revolve", *args, **kwargs)
def revolveManip(self, *args, **kwargs):
return self(u"revolveManip", *args, **kwargs)
def rgbToHsv(self, *args, **kwargs):
return self(u"rgbToHsv", *args, **kwargs)
def rigidBody(self, *args, **kwargs):
return self(u"rigidBody", *args, **kwargs)
def rigidConstraint(self, *args, **kwargs):
return self(u"rigidConstraint", *args, **kwargs)
def rigidSolver(self, *args, **kwargs):
return self(u"rigidSolver", *args, **kwargs)
def rock(self, *args, **kwargs):
return self(u"rock", *args, **kwargs)
def rotateHelper(self, *args, **kwargs):
return self(u"rotateHelper", *args, **kwargs)
def rotateLimitsManip(self, *args, **kwargs):
return self(u"rotateLimitsManip", *args, **kwargs)
def rotateManip(self, *args, **kwargs):
return self(u"rotateManip", *args, **kwargs)
def rotateUV2dManip(self, *args, **kwargs):
return self(u"rotateUV2dManip", *args, **kwargs)
def roundConstantRadius(self, *args, **kwargs):
return self(u"roundConstantRadius", *args, **kwargs)
def roundRadiusCrvManip(self, *args, **kwargs):
return self(u"roundRadiusCrvManip", *args, **kwargs)
def roundRadiusManip(self, *args, **kwargs):
return self(u"roundRadiusManip", *args, **kwargs)
def sampler(self, *args, **kwargs):
return self(u"sampler", *args, **kwargs)
def samplerInfo(self, *args, **kwargs):
return self(u"samplerInfo", *args, **kwargs)
def scaleConstraint(self, *args, **kwargs):
return self(u"scaleConstraint", *args, **kwargs)
def scaleLimitsManip(self, *args, **kwargs):
return self(u"scaleLimitsManip", *args, **kwargs)
def scaleManip(self, *args, **kwargs):
return self(u"scaleManip", *args, **kwargs)
def scaleUV2dManip(self, *args, **kwargs):
return self(u"scaleUV2dManip", *args, **kwargs)
def script(self, *args, **kwargs):
return self(u"script", *args, **kwargs)
def scriptManip(self, *args, **kwargs):
return self(u"scriptManip", *args, **kwargs)
def sculpt(self, *args, **kwargs):
return self(u"sculpt", *args, **kwargs)
def selector(self, *args, **kwargs):
return self(u"selector", *args, **kwargs)
def sequenceManager(self, *args, **kwargs):
return self(u"sequenceManager", *args, **kwargs)
def sequencer(self, *args, **kwargs):
return self(u"sequencer", *args, **kwargs)
def setRange(self, *args, **kwargs):
return self(u"setRange", *args, **kwargs)
def shaderGlow(self, *args, **kwargs):
return self(u"shaderGlow", *args, **kwargs)
def shaderOverride(self, *args, **kwargs):
return self(u"shaderOverride", *args, **kwargs)
def shadingEngine(self, *args, **kwargs):
return self(u"shadingEngine", *args, **kwargs)
def shadingMap(self, *args, **kwargs):
return self(u"shadingMap", *args, **kwargs)
def shapeEditorManager(self, *args, **kwargs):
return self(u"shapeEditorManager", *args, **kwargs)
def shellDeformer(self, *args, **kwargs):
return self(u"shellDeformer", *args, **kwargs)
def shellTessellate(self, *args, **kwargs):
return self(u"shellTessellate", *args, **kwargs)
def shot(self, *args, **kwargs):
return self(u"shot", *args, **kwargs)
def shrinkWrap(self, *args, **kwargs):
return self(u"shrinkWrap", *args, **kwargs)
def simpleSelector(self, *args, **kwargs):
return self(u"simpleSelector", *args, **kwargs)
def simpleTestNode(self, *args, **kwargs):
return self(u"simpleTestNode", *args, **kwargs)
def simpleVolumeShader(self, *args, **kwargs):
return self(u"simpleVolumeShader", *args, **kwargs)
def simplexNoise(self, *args, **kwargs):
return self(u"simplexNoise", *args, **kwargs)
def singleShadingSwitch(self, *args, **kwargs):
return self(u"singleShadingSwitch", *args, **kwargs)
def sketchPlane(self, *args, **kwargs):
return self(u"sketchPlane", *args, **kwargs)
def skinBinding(self, *args, **kwargs):
return self(u"skinBinding", *args, **kwargs)
def skinCluster(self, *args, **kwargs):
return self(u"skinCluster", *args, **kwargs)
def smoothCurve(self, *args, **kwargs):
return self(u"smoothCurve", *args, **kwargs)
def smoothTangentSrf(self, *args, **kwargs):
return self(u"smoothTangentSrf", *args, **kwargs)
def snapUV2dManip(self, *args, **kwargs):
return self(u"snapUV2dManip", *args, **kwargs)
def snapshot(self, *args, **kwargs):
return self(u"snapshot", *args, **kwargs)
def snapshotShape(self, *args, **kwargs):
return self(u"snapshotShape", *args, **kwargs)
def snow(self, *args, **kwargs):
return self(u"snow", *args, **kwargs)
def softMod(self, *args, **kwargs):
return self(u"softMod", *args, **kwargs)
def softModHandle(self, *args, **kwargs):
return self(u"softModHandle", *args, **kwargs)
def softModManip(self, *args, **kwargs):
return self(u"softModManip", *args, **kwargs)
def solidFractal(self, *args, **kwargs):
return self(u"solidFractal", *args, **kwargs)
def spBirailSrf(self, *args, **kwargs):
return self(u"spBirailSrf", *args, **kwargs)
def sphericalProjManip(self, *args, **kwargs):
return self(u"sphericalProjManip", *args, **kwargs)
def spotCylinderManip(self, *args, **kwargs):
return self(u"spotCylinderManip", *args, **kwargs)
def spotLight(self, *args, **kwargs):
return self(u"spotLight", *args, **kwargs)
def spotManip(self, *args, **kwargs):
return self(u"spotManip", *args, **kwargs)
def spring(self, *args, **kwargs):
return self(u"spring", *args, **kwargs)
def squareSrf(self, *args, **kwargs):
return self(u"squareSrf", *args, **kwargs)
def squareSrfManip(self, *args, **kwargs):
return self(u"squareSrfManip", *args, **kwargs)
def standardSurface(self, *args, **kwargs):
return self(u"standardSurface", *args, **kwargs)
def stencil(self, *args, **kwargs):
return self(u"stencil", *args, **kwargs)
def stereoRigCamera(self, *args, **kwargs):
return self(u"stereoRigCamera", *args, **kwargs)
def stitchAsNurbsShell(self, *args, **kwargs):
return self(u"stitchAsNurbsShell", *args, **kwargs)
def stitchSrf(self, *args, **kwargs):
return self(u"stitchSrf", *args, **kwargs)
def stitchSrfManip(self, *args, **kwargs):
return self(u"stitchSrfManip", *args, **kwargs)
def stroke(self, *args, **kwargs):
return self(u"stroke", *args, **kwargs)
def strokeGlobals(self, *args, **kwargs):
return self(u"strokeGlobals", *args, **kwargs)
def stucco(self, *args, **kwargs):
return self(u"stucco", *args, **kwargs)
def styleCurve(self, *args, **kwargs):
return self(u"styleCurve", *args, **kwargs)
def subCurve(self, *args, **kwargs):
return self(u"subCurve", *args, **kwargs)
def subSurface(self, *args, **kwargs):
return self(u"subSurface", *args, **kwargs)
def subdAddTopology(self, *args, **kwargs):
return self(u"subdAddTopology", *args, **kwargs)
def subdAutoProj(self, *args, **kwargs):
return self(u"subdAutoProj", *args, **kwargs)
def subdBlindData(self, *args, **kwargs):
return self(u"subdBlindData", *args, **kwargs)
def subdCleanTopology(self, *args, **kwargs):
return self(u"subdCleanTopology", *args, **kwargs)
def subdHierBlind(self, *args, **kwargs):
return self(u"subdHierBlind", *args, **kwargs)
def subdLayoutUV(self, *args, **kwargs):
return self(u"subdLayoutUV", *args, **kwargs)
def subdMapCut(self, *args, **kwargs):
return self(u"subdMapCut", *args, **kwargs)
def subdMapSewMove(self, *args, **kwargs):
return self(u"subdMapSewMove", *args, **kwargs)
def subdMappingManip(self, *args, **kwargs):
return self(u"subdMappingManip", *args, **kwargs)
def subdPlanarProj(self, *args, **kwargs):
return self(u"subdPlanarProj", *args, **kwargs)
def subdProjManip(self, *args, **kwargs):
return self(u"subdProjManip", *args, **kwargs)
def subdTweak(self, *args, **kwargs):
return self(u"subdTweak", *args, **kwargs)
def subdTweakUV(self, *args, **kwargs):
return self(u"subdTweakUV", *args, **kwargs)
def subdiv(self, *args, **kwargs):
return self(u"subdiv", *args, **kwargs)
def subdivCollapse(self, *args, **kwargs):
return self(u"subdivCollapse", *args, **kwargs)
def subdivComponentId(self, *args, **kwargs):
return self(u"subdivComponentId", *args, **kwargs)
def subdivReverseFaces(self, *args, **kwargs):
return self(u"subdivReverseFaces", *args, **kwargs)
def subdivToNurbs(self, *args, **kwargs):
return self(u"subdivToNurbs", *args, **kwargs)
def subdivToPoly(self, *args, **kwargs):
return self(u"subdivToPoly", *args, **kwargs)
def surfaceEdManip(self, *args, **kwargs):
return self(u"surfaceEdManip", *args, **kwargs)
def surfaceInfo(self, *args, **kwargs):
return self(u"surfaceInfo", *args, **kwargs)
def surfaceLuminance(self, *args, **kwargs):
return self(u"surfaceLuminance", *args, **kwargs)
def surfaceShader(self, *args, **kwargs):
return self(u"surfaceShader", *args, **kwargs)
def surfaceVarGroup(self, *args, **kwargs):
return self(u"surfaceVarGroup", *args, **kwargs)
def svgToPoly(self, *args, **kwargs):
return self(u"svgToPoly", *args, **kwargs)
def symmetryConstraint(self, *args, **kwargs):
return self(u"symmetryConstraint", *args, **kwargs)
def tangentConstraint(self, *args, **kwargs):
return self(u"tangentConstraint", *args, **kwargs)
def tension(self, *args, **kwargs):
return self(u"tension", *args, **kwargs)
def texLattice(self, *args, **kwargs):
return self(u"texLattice", *args, **kwargs)
def texMoveShellManip(self, *args, **kwargs):
return self(u"texMoveShellManip", *args, **kwargs)
def texSmoothManip(self, *args, **kwargs):
return self(u"texSmoothManip", *args, **kwargs)
def texSmudgeUVManip(self, *args, **kwargs):
return self(u"texSmudgeUVManip", *args, **kwargs)
def textButtonManip(self, *args, **kwargs):
return self(u"textButtonManip", *args, **kwargs)
def textManip2D(self, *args, **kwargs):
return self(u"textManip2D", *args, **kwargs)
def texture3dManip(self, *args, **kwargs):
return self(u"texture3dManip", *args, **kwargs)
def textureBakeSet(self, *args, **kwargs):
return self(u"textureBakeSet", *args, **kwargs)
def textureDeformer(self, *args, **kwargs):
return self(u"textureDeformer", *args, **kwargs)
def textureToGeom(self, *args, **kwargs):
return self(u"textureToGeom", *args, **kwargs)
def time(self, *args, **kwargs):
return self(u"time", *args, **kwargs)
def timeEditor(self, *args, **kwargs):
return self(u"timeEditor", *args, **kwargs)
def timeEditorClip(self, *args, **kwargs):
return self(u"timeEditorClip", *args, **kwargs)
def timeEditorClipBase(self, *args, **kwargs):
return self(u"timeEditorClipBase", *args, **kwargs)
def timeEditorTracks(self, *args, **kwargs):
return self(u"timeEditorTracks", *args, **kwargs)
def timeFunction(self, *args, **kwargs):
return self(u"timeFunction", *args, **kwargs)
def timeWarp(self, *args, **kwargs):
return self(u"timeWarp", *args, **kwargs)
def toggleManip(self, *args, **kwargs):
return self(u"toggleManip", *args, **kwargs)
def toggleOnLineManip(self, *args, **kwargs):
return self(u"toggleOnLineManip", *args, **kwargs)
def toolDrawManip(self, *args, **kwargs):
return self(u"toolDrawManip", *args, **kwargs)
def toolDrawManip2D(self, *args, **kwargs):
return self(u"toolDrawManip2D", *args, **kwargs)
def toonLineAttributes(self, *args, **kwargs):
return self(u"toonLineAttributes", *args, **kwargs)
def trackInfoManager(self, *args, **kwargs):
return self(u"trackInfoManager", *args, **kwargs)
def trans2dManip(self, *args, **kwargs):
return self(u"trans2dManip", *args, **kwargs)
def transUV2dManip(self, *args, **kwargs):
return self(u"transUV2dManip", *args, **kwargs)
def transferAttributes(self, *args, **kwargs):
return self(u"transferAttributes", *args, **kwargs)
def transform(self, *args, **kwargs):
return self(u"transform", *args, **kwargs)
def transformGeometry(self, *args, **kwargs):
return self(u"transformGeometry", *args, **kwargs)
def translateManip(self, *args, **kwargs):
return self(u"translateManip", *args, **kwargs)
def translateUVManip(self, *args, **kwargs):
return self(u"translateUVManip", *args, **kwargs)
def transposeMatrix(self, *args, **kwargs):
return self(u"transposeMatrix", *args, **kwargs)
def trim(self, *args, **kwargs):
return self(u"trim", *args, **kwargs)
def trimManip(self, *args, **kwargs):
return self(u"trimManip", *args, **kwargs)
def trimWithBoundaries(self, *args, **kwargs):
return self(u"trimWithBoundaries", *args, **kwargs)
def triplanarProjManip(self, *args, **kwargs):
return self(u"triplanarProjManip", *args, **kwargs)
def tripleShadingSwitch(self, *args, **kwargs):
return self(u"tripleShadingSwitch", *args, **kwargs)
def trsInsertManip(self, *args, **kwargs):
return self(u"trsInsertManip", *args, **kwargs)
def trsManip(self, *args, **kwargs):
return self(u"trsManip", *args, **kwargs)
def turbulenceField(self, *args, **kwargs):
return self(u"turbulenceField", *args, **kwargs)
def turbulenceManip(self, *args, **kwargs):
return self(u"turbulenceManip", *args, **kwargs)
def tweak(self, *args, **kwargs):
return self(u"tweak", *args, **kwargs)
def type(self, *args, **kwargs):
return self(u"type", *args, **kwargs)
def typeExtrude(self, *args, **kwargs):
return self(u"typeExtrude", *args, **kwargs)
def typeManip(self, *args, **kwargs):
return self(u"typeManip", *args, **kwargs)
def ufeProxyTransform(self, *args, **kwargs):
return self(u"ufeProxyTransform", *args, **kwargs)
def uniformField(self, *args, **kwargs):
return self(u"uniformField", *args, **kwargs)
def unitConversion(self, *args, **kwargs):
return self(u"unitConversion", *args, **kwargs)
def unknown(self, *args, **kwargs):
return self(u"unknown", *args, **kwargs)
def unknownDag(self, *args, **kwargs):
return self(u"unknownDag", *args, **kwargs)
def unknownTransform(self, *args, **kwargs):
return self(u"unknownTransform", *args, **kwargs)
def unpremultiply(self, *args, **kwargs):
return self(u"unpremultiply", *args, **kwargs)
def untrim(self, *args, **kwargs):
return self(u"untrim", *args, **kwargs)
def useBackground(self, *args, **kwargs):
return self(u"useBackground", *args, **kwargs)
def uv2dManip(self, *args, **kwargs):
return self(u"uv2dManip", *args, **kwargs)
def uvChooser(self, *args, **kwargs):
return self(u"uvChooser", *args, **kwargs)
def uvPin(self, *args, **kwargs):
return self(u"uvPin", *args, **kwargs)
def valueOverride(self, *args, **kwargs):
return self(u"valueOverride", *args, **kwargs)
def vectorAdjust(self, *args, **kwargs):
return self(u"vectorAdjust", *args, **kwargs)
def vectorExtrude(self, *args, **kwargs):
return self(u"vectorExtrude", *args, **kwargs)
def vectorProduct(self, *args, **kwargs):
return self(u"vectorProduct", *args, **kwargs)
def vectorRenderGlobals(self, *args, **kwargs):
return self(u"vectorRenderGlobals", *args, **kwargs)
def vertexBakeSet(self, *args, **kwargs):
return self(u"vertexBakeSet", *args, **kwargs)
def viewColorManager(self, *args, **kwargs):
return self(u"viewColorManager", *args, **kwargs)
def volumeAxisField(self, *args, **kwargs):
return self(u"volumeAxisField", *args, **kwargs)
def volumeBindManip(self, *args, **kwargs):
return self(u"volumeBindManip", *args, **kwargs)
def volumeFog(self, *args, **kwargs):
return self(u"volumeFog", *args, **kwargs)
def volumeLight(self, *args, **kwargs):
return self(u"volumeLight", *args, **kwargs)
def volumeNoise(self, *args, **kwargs):
return self(u"volumeNoise", *args, **kwargs)
def volumeShader(self, *args, **kwargs):
return self(u"volumeShader", *args, **kwargs)
def vortexField(self, *args, **kwargs):
return self(u"vortexField", *args, **kwargs)
def water(self, *args, **kwargs):
return self(u"water", *args, **kwargs)
def wire(self, *args, **kwargs):
return self(u"wire", *args, **kwargs)
def wood(self, *args, **kwargs):
return self(u"wood", *args, **kwargs)
def wrap(self, *args, **kwargs):
return self(u"wrap", *args, **kwargs)
def wtAddMatrix(self, *args, **kwargs):
return self(u"wtAddMatrix", *args, **kwargs)
def xformManip(self, *args, **kwargs):
return self(u"xformManip", *args, **kwargs)
createNode = _CreateNode()
```
#### File: CPMel/cmds/__createNode_script.py
```python
u"""
:创建时间: 2020/6/27 14:46
:作者: 苍之幻灵
:我的主页: https://cpcgskill.com
:QQ: 2921251087
:爱发电: https://afdian.net/@Phantom_of_the_Cang
:aboutcg: https://www.aboutcg.org/teacher/54335
:bilibili: https://space.bilibili.com/351598127
"""
import maya.cmds as cmds
import re
mode = u'''\
def <<node>>(self, *args, **kwargs):
return self(u"<<node>>", *args, **kwargs)'''
strings = list()
nodes = cmds.allNodeTypes(ia=False)
nodes = [i for i in nodes if i.find(u"MASH") != 0]
nodes = [i for i in nodes if i.find(u"xgm") != 0]
nodes = [i for i in nodes if i.find(u"xgen") != 0]
nodes = [i for i in nodes if i.find(u"ai") != 0]
nodes = [i for i in nodes if i.find(u"AI") != 0]
nodes = [i for i in nodes if i.find(u"HIK") != 0]
nodes = [i for i in nodes if i.find(u"Boss") != 0]
nodes = [i for i in nodes if i.find(u"bifrost") != 0]
nodes = [i for i in nodes if len(i) < 20]
nodes = [i for i in nodes if len(i) > 3]
for i in nodes:
strings.append(re.sub("<<node>>", i, mode))
print(u"\n".join(strings))
```
#### File: cmds/melproc/base.py
```python
u"""
:创建时间: 2020/5/18 23:57
:作者: 苍之幻灵
:我的主页: https://cpcgskill.com
:QQ: 2921251087
:爱发电: https://afdian.net/@Phantom_of_the_Cang
:aboutcg: https://www.aboutcg.org/teacher/54335
:bilibili: https://space.bilibili.com/351598127
"""
from collections import Iterable
from functools import partial
import maya.mel
import maya.cmds as mc
from toMel import toMel, toArray, toFloat, toInt, toString
eval = maya.mel.eval
def melProc(item, *args, **kwargs):
args_s = u" ".join(
[
toArray(i) if isinstance(i, Iterable) and (not isinstance(i, basestring)) else toMel(i)
for i in args
]
)
kwargs_s = u" ".join(
[
u"-%s %s" % (
k,
toArray(v) if isinstance(v, Iterable) and (
not isinstance(v, basestring)) else toMel(v)
)
for k, v in kwargs.items()
]
)
command_s = u"{0} {2} {1};".format(item, args_s, kwargs_s)
return eval(command_s)
class melbase(object):
def __getattribute__(self, item):
try:
return object.__getattribute__(self, item)
except AttributeError:
return partial(melProc, item)
```
#### File: src/CPMel/__init__.py
```python
u"""
:创建时间: 2020/5/18 23:57
:作者: 苍之幻灵
:我的主页: https://www.cpcgskill.com
:QQ: 2921251087
:爱发电: https://afdian.net/@Phantom_of_the_Cang
:aboutcg: https://www.aboutcg.org/teacher/54335
:bilibili: https://space.bilibili.com/351598127
* 获得路径模块
* PATH : CPMel所在路径
* MAYAPLUG : CPMel的Maya插件所在路径
* ISDEBUG : 是否处在Debug模式
* 快速入门:
* 导入:
>>> import CPMel.cmds as cc
>>> import CPMel.tool as ctl
* 命令:
* maya.cmds:
>>> import maya.cmds as cmds
>>> cmds.joint()
u"xxx"
* CPMel.cmds
>>> cc.joint()
joint(u"xxx")
* 命令参数转化规则:
* CPObject = str ,Double3 = (x,y,z), Matrix = (x,x,x,..*16)
* 更加方便的创建节点的方法:
>>> cc.createNode.transform()
transform(u"transform")
* mel方法访问:
>>> cc.mel.SmoothSkinWeights()
None
* 事件引擎:
>>> class printDg(cevent.Dg):
... def createNode(self, node):
... print(node)
... def removeNode(self, node):
... print(node)
>>> obj = printDg()
>>> cc.createNode.transform()
transform1 << 打印
transform(u'transform1')
* 工具:
>>> ctl.decode("你好世界")
u'你好世界'
>>> ctl.MayaObjectData(u"time1")
<CPMel.tool.MayaObjectData object at 0x0000000053CB32E8>
>>> ctl.undoBlock(xxx type = func)# Qt撤销的实现
xxx type = func
* 视频版教程: https://www.aboutcg.org/courseDetails/1031/introduce
* 2.5版本更新 :
* 使用了预编译脚本优化了文件体积
* 修复了一些BUG
* 2.6版本更新 :
* 解决了qt错误处理问题
* 错误与mayaplug可以运行多个了
* 实现了相对运行
* 区分debug版与release版
* 去除了static_cmds中无用的注释
* 通过文档注释进行类型指定优化了在pycharm中编写程序的补全效果
* 去除了mayaPlug模块下无用的程序
* 2.7版本更新 :
* 优化了导入实现
* 使用CLI
注意2.7的CLI还不完善将于!!!CPMel3版本稳定CLI功能
"""
from . import initializeMaya
import os
import sys
import maya.cmds
sys.cpmel_data = dict()
MAYAINDEX = int(maya.cmds.about(v=True))
ISDEBUG = True
try:
PATH = os.path.dirname(os.path.abspath(__file__))
if type(PATH) == str:
try:
PATH = PATH.decode("utf8")
except UnicodeDecodeError:
try:
PATH = PATH.decode("gbk")
except UnicodeDecodeError:
try:
PATH = PATH.decode("GB18030")
except UnicodeDecodeError:
try:
PATH = PATH.decode("GB2312")
except UnicodeDecodeError:
PATH = unicode(PATH)
PATH = PATH.encode("utf8").decode("utf8")
except:
PATH = os.path.dirname(os.path.abspath(__file__))
MAYAPLUG = u'%s\\mayaPlug' % PATH
from . import mayaPlug
from . import core
from . import api
from . import cmds
from . import event
from . import ui
from . import tool
# DELETE #
if ISDEBUG:
reload(mayaPlug)
reload(core)
reload(api)
reload(cmds)
reload(event)
reload(ui)
reload(tool)
# \DELETE #
cmds.upcommands()
maya.cmds.pluginInfo(cc=cmds.upcommands)
del maya
if hasattr(sys, "cpmel_data"):
del sys.cpmel_data
```
#### File: CPMel/tool/__init__.py
```python
u"""
:创建时间: 2020/5/27 23:26
:作者: 苍之幻灵
:我的主页: https://cpcgskill.com
:QQ: 2921251087
:爱发电: https://afdian.net/@Phantom_of_the_Cang
:aboutcg: https://www.aboutcg.org/teacher/54335
:bilibili: https://space.bilibili.com/351598127
一个提供了Python开发中的便利功能的模块
"""
import re
import sys
import json
import functools
import maya.api.OpenMaya as OpenMaya
import maya.cmds as cmds
from .. import core as cmcore
def undoBlock(fn):
u"""
撤销块装饰器将所有maya命令的撤销打包到一个块里
:param fn:
:return:
"""
def _(*args, **kwargs):
cmds.undoInfo(ock=True)
try:
try:
return fn(*args, **kwargs)
except:
OpenMaya.MGlobal.displayError(cmcore.error.formatGuiException(*sys.exc_info()))
finally:
cmds.undoInfo(cck=True)
_.__name__ = fn.__name__
_.__doc__ = fn.__doc__
_.__module__ = fn.__module__
return _
funcs = list()
def scriptStringFunc(fn, *args, **kwargs):
u"""
可以将Python函数包装为Mel脚本
用于Maya没有提供函数输入的脚本参数
:param fn:
:param args:
:param kwargs:
:return:
"""
fn = functools.partial(fn, *args, **kwargs)
funcs.append(fn)
return u'python "__import__(\\"ctypes\\").cast({}, __import__(\\"ctypes\\").py_object).value()";'.format(id(fn))
def scriptStringString(s=u""):
u"""
可以将Python 回调字符串包装为Mel脚本
用于Maya没有提供函数输入的脚本参数
:param fn:
:return:
:rtype:bool
"""
return u'python "%s";' % s.replace('"', '\\"').replace("\n", "\\n")
def decode(s=''):
u"""
字符串解码函数
:param s:
:return:
"""
if not isinstance(s, basestring):
try:
s = str(s)
except:
s = unicode(s)
if type(s) == str:
try:
return s.decode("UTF-8")
except UnicodeDecodeError:
try:
return s.decode("GB18030")
except UnicodeDecodeError:
try:
return s.decode("Shift-JIS")
except UnicodeDecodeError:
try:
return s.decode("EUC-KR")
except UnicodeDecodeError:
return unicode(s)
return s.encode("UTF-8").decode("UTF-8")
class Dict(object):
u"""
一个与MayaObjectData交互的为dict类
"""
def __init__(self, datacore, name, updatafuncs):
self.datacore = datacore
self.name = name
self.updatafuncs = updatafuncs
def this(self):
return self.datacore.value[self.name]
def __iter__(self):
return self.datacore.value[self.name].__iter__()
def __setitem__(self, key, value):
self.datacore.value[self.name].__setitem__(key, value)
self.updata()
def __getitem__(self, item):
self.datacore.read()
return self.datacore.value[self.name].__getitem__(item)
def __str__(self):
return self.datacore.value[self.name].__str__()
def __repr__(self):
return self.datacore.value[self.name].__repr__()
def __unicode__(self):
return self.datacore.value[self.name].__unicode__()
def updata(self):
for i in self.updatafuncs:
i(self.datacore.value[self.name])
self.datacore.write()
def clear(self, *args):
return self.datacore.value[self.name].clear(*args)
def copy(self, *args):
return self.datacore.value[self.name].copy(*args)
def fromkeys(self, *args):
return self.datacore.value[self.name].fromkeys(*args)
def get(self, *args):
return self.datacore.value[self.name].get(*args)
def has_key(self, *args):
return self.datacore.value[self.name].has_key(*args)
def items(self, *args):
return self.datacore.value[self.name].items(*args)
def iteritems(self, *args):
return self.datacore.value[self.name].iteritems(*args)
def iterkeys(self, *args):
return self.datacore.value[self.name].iterkeys(*args)
def itervalues(self, *args):
return self.datacore.value[self.name].itervalues(*args)
def keys(self, *args):
return self.datacore.value[self.name].keys(*args)
def pop(self, *args):
return self.datacore.value[self.name].pop(*args)
def popitem(self, *args):
return self.datacore.value[self.name].popitem(*args)
def setdefault(self, *args):
return self.datacore.value[self.name].setdefault(*args)
def update(self, *args):
return self.datacore.value[self.name].update(*args)
def values(self, *args):
return self.datacore.value[self.name].values(*args)
def viewitems(self, *args):
return self.datacore.value[self.name].viewitems(*args)
def viewkeys(self, *args):
return self.datacore.value[self.name].viewkeys(*args)
def viewvalues(self, *args):
return self.datacore.value[self.name].viewvalues(*args)
class MayaObjectData(object):
re_compile = re.compile("_")
def __init__(self, obj=u"", name=u"default"):
obj = str(obj)
if not cmds.objExists(obj):
raise cmcore.CPMelError(u"对象不存在")
name_hash = hash(name)
if name_hash < 0:
self.name = u"CPMEL_MayaObjectData__%d" % (name_hash * -1)
else:
self.name = u"CPMEL_MayaObjectData_%d" % (name_hash)
sel = OpenMaya.MSelectionList()
sel.add(obj)
obj = sel.getDependNode(0)
fn = OpenMaya.MFnDependencyNode(obj)
self.uuid = fn.uuid()
if not cmds.objExists(self.__name):
cmds.addAttr(self.__path, ln=self.name, dt="string")
self.value = dict()
self.write()
else:
self.read()
@property
def __path(self):
sel = OpenMaya.MSelectionList()
sel.add(self.uuid)
if sel.length() < 1:
raise cmcore.CPMelError(u"节点不存在")
return sel.getSelectionStrings()[0]
@property
def __name(self):
return u"%s.%s" % (self.__path, self.name)
def write(self):
json_string = decode(json.dumps(self.value))
cmds.setAttr(u"%s.%s" % (self.__path, self.name), json_string, type="string")
def read(self):
try:
json_string = cmds.getAttr(u"%s.%s" % (self.__path, self.name))
except RuntimeError:
raise cmcore.CPMelError(u"无法正确读取数据")
try:
self.value = json.loads(json_string)
except ValueError:
raise cmcore.CPMelError(u"无法正确解析数据")
def addValueBlock(self, name):
name = u"@VALUE->%s" % name
return MayaObjectData._VlaueBlock(self, name)
def addDict(self, name, d_v, updata_funcs):
name = u"@DICT->%s" % name
if name in self.value:
o = self.value[name]
else:
o = dict()
self.value[name] = o
for i in d_v:
if not i in o:
o[i] = d_v[i]
self.write()
return Dict(self, name, updata_funcs)
def getDict(self, name):
if name in self.value:
o = self.value[name]
else:
o = dict()
self.value[name] = o
return o
class _VlaueBlock(object):
def __init__(self, core_object, name=u""):
self.core_object = core_object
self.name = name
if not self.name in self.core_object.value:
self.core_object.value[self.name] = None
self.core_object.write()
def setV(self, v):
self.core_object.value[self.name] = v
self.core_object.write()
def getV(self):
self.core_object.read()
return self.core_object.value[self.name]
v = property(getV, setV)
class Block(object):
def upDataFunc(self):
pass
def metaclass(self, name="default"):
_block_name = name
def _metaclass(name, bases, attrs):
# 获得更新函数
try:
upDataFunc = attrs["upDataFunc"]
attrs.pop("upDataFunc")
except KeyError:
try:
upDataFunc = attrs[u"upDataFunc"]
attrs.pop(u"upDataFunc")
except KeyError:
def upDataFunc(self):
pass
upDataFunc.is_do_new_updatafunc = False
def newUpDataFunc(this):
if not upDataFunc.is_do_new_updatafunc:
upDataFunc.is_do_new_updatafunc = True
upDataFunc(this)
upDataFunc.is_do_new_updatafunc = False
cls = type(name, bases, attrs)
block_name = u"@CLS->%s_%s_%s" % (cls.__module__, name, _block_name)
if not block_name in self.value:
this_value_block = dict()
self.value[block_name] = this_value_block
else:
this_value_block = self.value[block_name]
for i in dir(cls):
if not self.re_compile.match(i):
if not i in this_value_block:
this_value_block[i] = getattr(cls, i)
def __get(k, this):
self.read()
return self.value[block_name][k]
def __set(k, this, v):
try:
if v == self.value[block_name][k]:
return None
except KeyError:
pass
self.value[block_name][k] = v
self.write()
newUpDataFunc(this)
setattr(cls, i, property(functools.partial(__get, i), functools.partial(__set, i)))
self.write()
return cls()
return _metaclass
``` |
{
"source": "292916808/MolCloze",
"score": 2
} |
#### File: model/attention/multi_head.py
```python
import torch.nn as nn
from .single_head import Attention, EdgeGuidedAttention
class MultiHeadedAttention(nn.Module):
def __init__(self, h, d_model, dropout=0.1):
super(MultiHeadedAttention, self).__init__()
assert d_model % h == 0
self.d_k = d_model // h
self.h = h
self.linear_layers = nn.ModuleList(
[
nn.Linear(d_model, d_model)
for _ in range(3)
]
)
self.output_linear = nn.Linear(d_model, d_model)
self.attention = Attention()
# edge guided attention
# self.attention = EdgeGuidedAttention(d_k=self.d_k)
self.dropout = nn.Dropout(p=dropout)
def forward(self, query, key, value, mask=None):
batch_size = query.size(0)
# 1) Do all the linear projections in batch from d_model ==> h * d_k
# query/key/value: (batch_size, attn_heads, max_atoms, d_k)
query, key, value = [
l(x).view(batch_size, -1, self.h, self.d_k).transpose(1, 2)
for l, x in zip(self.linear_layers, (query, key, value))
]
# 2) Apply attention on al the projected vectors in batch.
x, attn = self.attention(query, key, value, mask=mask, dropout=self.dropout)
# 3) "Concat" using a viwe and apply a final linear
x = x.transpose(1, 2).contiguous().view(batch_size, -1, self.h * self.d_k)
return self.output_linear(x)
```
#### File: model/decoder/nn_bilinear.py
```python
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
class NNBilinearDecoder(nn.Module):
def __init__(self, input_dim: int, dropout: float = 0.0):
super(NNBilinearDecoder, self).__init__()
self.dropout = nn.Dropout(dropout)
self.act = F.sigmoid
self.relation = Parameter(torch.FloatTensor(input_dim, input_dim))
ffn = [
nn.Linear(input_dim, input_dim),
nn.ReLU(),
]
self.ffn = nn.Sequential(*ffn)
self.reset_parameter()
def reset_parameter(self):
for param in self.parameters():
if param.dim() == 1:
nn.init.constant_(param, 0)
elif param.dim() == 0:
nn.init.constant_(param, 1.)
else:
nn.init.xavier_normal_(param)
def forward(self, inputs: torch.Tensor) -> torch.Tensor:
"""
:param inputs: (batch_size, seq_len, hidden_size)
:return: (seq_len * seq_len, )
"""
inputs_row = inputs
inputs_col = inputs
inputs_row = self.ffn(self.dropout(inputs_row))
inputs_col = self.ffn(self.dropout(inputs_col)).transpose(0, 1)
intermediate_product = torch.bmm(inputs_row, self.relation)
rec = torch.bmm(intermediate_product, inputs_col)
outputs = self.act(rec)
outputs = outputs.view(-1)
return outputs
```
#### File: model/embedding/segment.py
```python
import torch.nn as nn
class SegmentEmbedding(nn.Embedding):
def __init__(self, embed_size=512):
super(SegmentEmbedding, self).__init__(3, embed_size, padding_idx=0)
```
#### File: bert/model/transformer.py
```python
import torch
import torch.nn as nn
from .attention import MultiHeadedAttention
from .utils import SublayerConnection, PositionwiseFeedForward
from .utils import NaiveSublayerConnection, RawSublayerConnection, GraphNaiveSublayerConnection, GraphRawSublayerConnection
class TransformerBlock(nn.Module):
def __init__(self, hidden, attn_heads, feed_forward_hidden, dropout,
residual_type, without_ffn):
"""
:param hidden: hidden size of transformer
:param attn_heads: head sizes of multi-head attention
:param feed_forward_hidden: feed_forward_hdiden, usually 4 * hidden
:param dropout: dropout rate
"""
super(TransformerBlock, self).__init__()
self.attention = MultiHeadedAttention(h=attn_heads, d_model=hidden)
self.feed_forward = PositionwiseFeedForward(d_model=hidden, d_ff=feed_forward_hidden, dropout=dropout)
self.input_sublayer = SublayerConnection(size=hidden, dropout=dropout)
if residual_type == 'naive':
self.input_sublayer = SublayerConnection(size=hidden, dropout=dropout)
elif residual_type == 'raw':
self.input_sublayer = RawSublayerConnection(size=hidden, dropout=dropout)
elif residual_type == 'graph_naive':
self.input_sublayer = GraphNaiveSublayerConnection(size=hidden, dropout=dropout)
elif residual_type == 'graph_raw':
self.input_sublayer = GraphRawSublayerConnection(size=hidden, dropout=dropout)
else:
self.input_sublayer = SublayerConnection(size=hidden, dropout=dropout)
self.output_sublayer = SublayerConnection(size=hidden, dropout=dropout) if not without_ffn else None
self.dropout = nn.Dropout(p=dropout)
def forward(self, x, mask, raw_x=None, adj=None):
x = self.input_sublayer(x, lambda x_: self.attention.forward(x_, x_, x_, mask=mask),
raw_x, adj)
if self.output_sublayer is not None:
x = self.output_sublayer(x, self.feed_forward,
raw_x, adj)
else:
x = self.dropout(torch.relu(x))
return self.dropout(x)
```
#### File: bert/training/optim_schedule.py
```python
import numpy as np
class Scheduleoptim(object):
def __init__(self, optimizer, d_model, n_warmup_steps, grad_clip=None):
self.optimizer = optimizer
self.n_warmup_steps = n_warmup_steps
self.n_current_steps = 0
self.init_lr = np.power(d_model, -0.5)
self.grad_clip = grad_clip
def step_and_update_lr(self):
# modify: do not update learning rate
self._update_learning_rate()
if self.grad_clip is not None:
self.clip_gradient(self.grad_clip)
self.optimizer.step()
def clip_gradient(self, grad_clip):
"""
Clips gradients computed during backpropagation to avoid explosion of gradients.
:param optimizer: optimizer with the gradients to be clipped
:param grad_clip: clip value
"""
for group in self.optimizer.param_groups:
for param in group["params"]:
if param.grad is not None:
param.grad.data.clamp_(-grad_clip, grad_clip)
def zero_grad(self):
self.optimizer.zero_grad()
def _get_lr_scale(self):
return np.min(
[
np.power(self.n_current_steps, -0.5),
np.power(self.n_warmup_steps, -1.5) * self.n_current_steps
]
)
def _update_learning_rate(self):
self.n_current_steps += 1
lr = self.init_lr * self._get_lr_scale()
for param_group in self.optimizer.param_groups:
param_group['lr'] = lr
```
#### File: bert/training/trainer.py
```python
import sys
import tqdm
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from .optim_schedule import Scheduleoptim
from bert.model import MolBertLM, MolBertDis, LinearActivation
from bert.model.decoder import InnerProductDecoder, BilinearDecoder, NNBilinearDecoder
def sampling(input_ids, generator_logits, masked_lm_labels):
generator_id = torch.argmax(generator_logits, dim=-1).detach()
origin_input = input_ids.clone()
fake_input = torch.where(masked_lm_labels < 1, origin_input, generator_id)
corrupt_label = (masked_lm_labels != 0)
origin_input[corrupt_label] = masked_lm_labels[corrupt_label]
discriminator_label = torch.eq(origin_input, fake_input)
return generator_id, discriminator_label
def get_energy_loss(device, logits, targets, weights, vocab_size, batch_size, seq_len=140):
# targets = torch.unsqueeze(targets, dim=1)
# print(torch.zeros(batch_size, seq_len, vocab_size).size(), targets.unsqueeze(-1).size())
oh_labels = torch.zeros(batch_size, seq_len, vocab_size).to(device).scatter_(2, targets.unsqueeze(-1), 1)
# ones = torch.sparse.torch.eye(vocab_size).to(device)
# oh_labels = ones.index_select(0, targets)
log_probs = F.log_softmax(logits, dim=-1)
# print(log_probs.size(), oh_labels.size())
label_log_probs = -torch.sum(log_probs * oh_labels, dim=-1)
# print(weights.is_cuda, label_log_probs.is_cuda)
numerator = torch.sum(weights * label_log_probs)
denominator = torch.sum(weights) + 1e-6
loss = numerator/denominator
return loss
def get_token_energy_logits(device, LinearAct, inputs, table, weights, targets, vocab_size, batch_size):
energy_hidden = LinearAct(inputs)
# print(energy_hidden.size(), table.size())
logits = torch.matmul(energy_hidden, table.transpose(-2,-1))
energy_loss = get_energy_loss(device, logits, targets, weights, vocab_size, batch_size)
return logits, energy_loss
def get_cloze_outputs(device, candidate_mask, LinearAct, inputs, table, targets, vocab_size, batch_size):
weights = candidate_mask.type(torch.FloatTensor).to(device)
logits, energy_loss = get_token_energy_logits(device, LinearAct, inputs, table, weights, targets, vocab_size, batch_size)
return logits, energy_loss, weights
def get_discriminator_energy_loss(device, hidden, dis_output, criterion, input_ids, cloze_logits, weights, vocab_size, \
batch_size, discriminator_label, candidate_mask, mask_prob=0.15, seq_len=140):
# print(input_ids.size())
oh_labels = torch.zeros(batch_size, seq_len, vocab_size).to(device).scatter_(2, input_ids.unsqueeze(-1), 1)
# print(cloze_logits.size(), oh_labels.size())
log_q = torch.sum(F.log_softmax(cloze_logits, -1) * oh_labels.type(torch.FloatTensor).to(device), dim=-1).detach()
# print(logits.size(), log_q.size())
logits = torch.squeeze(hidden(dis_output), -1)
logits += log_q
logits += torch.log(mask_prob/(1-mask_prob)*torch.ones_like(logits))
unmask_labels = torch.mul(discriminator_label, candidate_mask).type(torch.FloatTensor).to(device)
losses = criterion(logits, unmask_labels) * weights
loss = torch.sum(losses)/(torch.sum(weights)+1e-6)
return loss
class MolBertTrainer(object):
def __init__(self, bert, vocab_size,
train_loader, test_loader, batch_size,
lr=1e-4, betas=(0.9, 0.999),
weight_decay=0.01, warmup_steps=10000,
with_cuda=True, cuda_devices=None,
with_span=False, with_mask=False, with_proximity=False,
log_freq=10, logger=None):
super(MolBertTrainer, self).__init__()
cuda_condition = torch.cuda.is_available() and with_cuda
self.device = torch.device("cuda:0" if cuda_condition else "cpu")
self.vocab_size = vocab_size
self.bert = bert
self.model = MolBertLM(bert, vocab_size).to(self.device)
self.discriminator = MolBertDis(bert).to(self.device)
self.get_energy_logits = LinearActivation(self.bert.hidden, self.bert.hidden).to(self.device)
self.dis_energy_preds = LinearActivation(self.bert.hidden, 1).to(self.device)
if with_cuda and torch.cuda.device_count() > 1:
logger.info('Using {} GPUs for Bert'.format(torch.cuda.device_count()))
self.model = nn.DataParallel(self.model, device_ids=cuda_devices)
self.discriminator = nn.DataParallel(self.discriminator, device_ids=cuda_devices)
self.train_loader = train_loader
self.test_loader = test_loader
self.optim = torch.optim.Adam(self.model.parameters(), lr=lr, betas=betas, weight_decay=weight_decay)
self.optim_schedule = Scheduleoptim(self.optim, self.bert.hidden, n_warmup_steps=warmup_steps, grad_clip=5.)
self.criterion = nn.NLLLoss(ignore_index=0)
self.dis_det_criterion = nn.BCEWithLogitsLoss()
self.dis_ene_criterion = nn.BCEWithLogitsLoss()
self.with_span = with_span
self.with_mask = with_mask
self.with_proximity = with_proximity
if with_proximity:
# self.decoder = BilinearDecoder(input_dim=bert.hidden)
self.decoder = InnerProductDecoder(input_dim=bert.hidden)
self.dec_criterion = nn.MSELoss()
self.log_freq = log_freq
self.logger = logger
self.batch_size = batch_size
logger.info('Total parameters: {}'.format(
sum([p.nelement() for p in self.model.parameters()])+sum([p.nelement() for p in self.discriminator.parameters()])
))
# def _tie_embeddings(self):
def train(self, epoch):
self.iteration(epoch, self.train_loader)
def test(self, epoch):
self.iteration(epoch, self.test_loader, train=False)
def iteration(self, epoch, loader, train=True):
logger = self.logger
str_code = 'train' if train else 'test'
data_iter = tqdm.tqdm(
enumerate(loader),
desc='{} : epoch {}'.format(str_code, epoch),
total=len(loader),
bar_format='{l_bar}{r_bar}'
)
avg_loss = 0.0
avg_dec_loss = 0.0
for i, data in data_iter:
# 0. batch_data will be sent into the device
data = {key: value.to(self.device) for key, value in data.items()}
bert_table = [i for i in range(self.vocab_size)]
bert_table = torch.LongTensor(bert_table).to(self.device)
bert_table = self.bert.embedding.token(bert_table)
# 1. forward masked_lm_model
# # for span masking
# mask_lm_output, bert_outputs = self.model.forward(data['bert_input'], data['segment_label'])
if self.with_span:
mask_lm_output, _, bert_output = self.model.forward(data['bert_input'], data['segment_label'],
data['masked_adj'])
elif self.with_mask:
mask_lm_output, _, bert_output = self.model.forward(data['bert_input'], data['segment_label'],
data['masked_adj'])
elif self.with_proximity:
mask_lm_output, _, bert_output = self.model.forward(data['bert_input'], data['segment_label'],
data['masked_adj'])
rec_adj = self.decoder(bert_outputs[-1])
ori_adj = data['bert_adj'].view(-1)
dec_loss = self.dec_criterion(rec_adj, ori_adj)
else:
mask_lm_output, _, bert_output = self.model.forward(data['bert_input'], data['segment_label'])
# print(data['bert_input'].size()[0])
# 2-1. NLL(negative log likelihood) loss
# mask_lm_output = (mask_lm_output < 1e-6) * torch.ones_like(mask_lm_output) * 1e-6 + (mask_lm_output >= 1e-6) * mask_lm_output
mask_loss = self.criterion(mask_lm_output.transpose(1, 2), data['bert_label'])
# print(data['bert_table'][0])
# print(self.bert.embedding.token(data['bert_table'][0]).size())
cloze_logits, gen_ene_loss, weights = get_cloze_outputs(self.device, data['bert_mask'], self.get_energy_logits, bert_output, \
bert_table, data['bert_input'], self.vocab_size, data['bert_input'].size()[0])
discriminator_input, discriminator_label = sampling(data['bert_input'], bert_output, data['bert_label'])
if self.with_mask:
dis_preds, dis_output = self.discriminator.forward(discriminator_input, data['segment_label'], data['masked_adj'])
else:
dis_preds, dis_output = self.discriminator.forward(discriminator_input, data['segment_label'])
# print(dis_output.size())
# print(data['bert_label'].size(), mask_lm_output.size(), dis_output.size())
dis_det_loss = self.dis_det_criterion(dis_preds.view(-1), discriminator_label.view(-1).float())
dis_ene_loss = get_discriminator_energy_loss(self.device, self.dis_energy_preds, dis_output, self.dis_ene_criterion, discriminator_input, cloze_logits, weights, \
self.vocab_size, self.batch_size, discriminator_label, data['bert_mask'])
# print(gen_ene_loss, dis_ene_loss)
loss = mask_loss + 20 * dis_det_loss + (gen_ene_loss + dis_ene_loss)
if np.isnan(loss.item()):
print(data['bert_input'])
print(mask_lm_output)
sys.exit(-1)
if self.with_proximity:
loss += dec_loss
avg_dec_loss += dec_loss
# 3. backward and optimization only in train
if train:
self.optim_schedule.zero_grad()
loss.backward()
self.optim_schedule.step_and_update_lr()
avg_loss += loss.item()
post_fix = {
'epoch': epoch,
'iter': i,
'avg_loss': avg_loss / (i + 1),
'loss': loss.item(),
'mask_loss': mask_loss.item(),
'gen_ene_loss': gen_ene_loss.item(),
'dis_det_loss': dis_det_loss.item(),
'dis_ene_loss': dis_ene_loss.item()
}
if self.with_proximity:
post_fix.update(
{'avg_dec_loss': avg_dec_loss.item() / (i + 1)}
)
if i % self.log_freq == 0:
data_iter.write(str(post_fix))
logger.info('{} : epoch {}, avg_loss = {:.4f}'.format(
str_code, epoch, avg_loss / len(data_iter)
))
def save(self, epoch, file_path='output/bert_trained.model'):
logger = self.logger
output_filepath = file_path + '.ep{}'.format(epoch)
torch.save(self.bert.cpu(), output_filepath)
self.bert.to(self.device)
logger.info('Epoch {:>3d} Model save on: {}'.format(
epoch, output_filepath
))
```
#### File: bert/utils/logger_utils.py
```python
import os
import logging
def create_logger(name: str, save_dir: str = None, quiet: bool = False) -> logging.Logger:
"""
Creates a logger with a stream handler and two file handlers.
The stream handler prints to the screen depending on the value of `quiet`.
One file handler (verbose.log) saves all logs, the other (quiet.log) only saves important info.
:param name: The name of the logger.
:param save_dir: The directory in which to save the logs.
:param quiet: Whether the stream handler should be quiet (i.e. print only important info).
:return: The logger.
"""
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
logger.propagate = False
# Set logger depending on desired verbosity
ch = logging.StreamHandler()
if quiet:
ch.setLevel(logging.INFO)
else:
ch.setLevel(logging.DEBUG)
logger.addHandler(ch)
if save_dir is not None:
# os.makedirs(save_dir)
fh_v = logging.FileHandler(os.path.join(save_dir, 'verbose.log'))
fh_v.setLevel(logging.DEBUG)
fh_q = logging.FileHandler(os.path.join(save_dir, 'quiet.log'))
fh_q.setLevel(logging.INFO)
logger.addHandler(fh_v)
logger.addHandler(fh_q)
return logger
```
#### File: bert/walks/deepwalk.py
```python
import random
class DeepWalker(object):
def __init__(self, G, num_workers=4):
self.G = G
self.num_workers = num_workers
def walk(self, walk_length, start_node):
G = self.G
walk = [start_node]
while len(walk) < walk_length:
cur = walk[-1]
neighbors = list(G.neighbors(cur))
if len(neighbors) > 0:
walk.append(random.choice(neighbors))
else:
break
return walk
```
#### File: 292916808/MolCloze/pretrain_molcloze.py
```python
import os
from torch.utils.data import DataLoader
from argparse import ArgumentParser, Namespace
from bert.utils.logger_utils import create_logger
from bert.utils.data_utils import get_pubchem_zinc_path, get_available_data_types
from bert.dataset import WordVocab, MolBertDataset
from bert.model import MolBert
from bert.training import MolBertTrainer
pubchem_zinc_path = get_pubchem_zinc_path()
def add_args():
parser = ArgumentParser()
# pretrain dataset
parser.add_argument('--data_type', type=str, choices=get_available_data_types() + ['baai'], default='1m')
parser.add_argument('--suffix', type=str, choices=['.txt', '.smi'], default='.txt')
parser.add_argument('--min_freq', type=int, default=5)
parser.add_argument('--radius', type=int, default=2)
parser.add_argument('--train_dataset', type=str, default=None)
parser.add_argument('--test_dataset', type=str, default=None)
parser.add_argument('--vocab_path', type=str, default=None)
# parser.add_argument('--output_path', type=str, default=None)
parser.add_argument('--on_memory', type=bool, default=True)
parser.add_argument('--corpus_lines', type=int, default=None)
# bert architecture
parser.add_argument('--hidden', type=int, default=768)
parser.add_argument('--layers', type=int, default=4)
parser.add_argument('--attn_heads', type=int, default=12)
parser.add_argument('--seq_len', type=int, default=140)
parser.add_argument('--with_span', action='store_true', default=False)
parser.add_argument('--with_mask', action='store_true', default=False)
parser.add_argument('--with_proximity', action='store_true', default=False)
parser.add_argument('--residual_type', type=str, choices=['graph_raw', 'graph_naive', 'raw', 'naive'],
default='naive')
parser.add_argument('--without_ffn', action='store_true', default=False,
help='whether not to use ffn in transformer.')
# training
parser.add_argument('--batch_size', type=int, default=16)
parser.add_argument('--epochs', type=int, default=10)
parser.add_argument('--num_workers', type=int, default=5)
parser.add_argument('--lr', type=float, default=1e-3)
parser.add_argument('--adam_weight_decay', type=float, default=0.01)
parser.add_argument('--adam_beta1', type=float, default=0.9)
parser.add_argument('--adam_beta2', type=float, default=0.999)
# device
parser.add_argument('--gpu', type=str, default='2')
parser.add_argument('--cuda_devices', type=int, nargs='+', default=None)
# output
parser.add_argument('--log_freq', type=int, default=10)
args = parser.parse_args()
return args
def modify_args(args: Namespace):
args.with_adj = args.with_span or args.with_mask or args.with_proximity
if args.data_type in get_available_data_types():
if not args.with_adj:
if args.suffix == '.smi':
args.train_dataset = os.path.join(
pubchem_zinc_path, 'train_{}'.format(args.data_type), 'total.smi'
)
args.test_dataset = os.path.join(
pubchem_zinc_path, 'valid_{}'.format(args.data_type), 'total.smi'
)
args.vocab_path = os.path.join(
pubchem_zinc_path, 'train_{}'.format(args.data_type), 'bert_vocab_min{}.pkl'.format(args.min_freq)
)
elif args.suffix == '.txt':
args.train_dataset = os.path.join(
pubchem_zinc_path, 'train_{}'.format(args.data_type),
'sentences.txt' if args.radius == 1 else 'sentences_r{}.txt'.format(args.radius)
)
args.test_dataset = os.path.join(
pubchem_zinc_path, 'valid_{}'.format(args.data_type),
'sentences.txt' if args.radius == 1 else 'sentences_r{}.txt'.format(args.radius)
)
args.vocab_path = os.path.join(
pubchem_zinc_path, 'train_{}'.format(args.data_type),
'bert_vocab_min{}_txt.pkl'.format(args.min_freq) if args.radius == 1 else 'bert_vocab_min{}_r{}_txt.pkl'.format(args.min_freq, args.radius)
)
else:
if args.suffix == '.txt':
args.train_dataset = os.path.join(
pubchem_zinc_path, 'train_{}'.format(args.data_type),
'sentences.txt' if args.radius == 1 else 'sentences_r{}.txt'.format(args.radius)
)
args.train_adj_filepath = os.path.join(
pubchem_zinc_path, 'train_{}'.format(args.data_type),
'adjs.pkl' if args.radius == 1 else 'adjs_r{}.pkl'.format(args.radius)
)
args.test_dataset = os.path.join(
pubchem_zinc_path, 'valid_{}'.format(args.data_type),
'sentences.txt' if args.radius == 1 else 'sentences_r{}.txt'.format(args.radius)
)
args.test_adj_filepath = os.path.join(
pubchem_zinc_path, 'valid_{}'.format(args.data_type),
'adjs.pkl' if args.radius == 1 else 'adjs_r{}.pkl'.format(args.radius)
)
args.vocab_path = os.path.join(
pubchem_zinc_path, 'train_{}'.format(args.data_type),
'bert_vocab_min{}_txt.pkl'.format(args.min_freq) if args.radius == 1 else 'bert_vocab_min{}_r{}_txt.pkl'.format(args.min_freq, args.radius)
)
else:
raise ValueError('Suffix must be .txt when using adj.')
if not args.with_adj:
if args.suffix == '.smi':
args.output_path = 'output_bert_{}_min{}_H{}_L{}_A{}'.format(
args.data_type, args.min_freq,
args.hidden, args.layers, args.attn_heads)
elif args.suffix == '.txt':
args.output_path = 'output_bert_{}_min{}_H{}_L{}_A{}_txt'.format(
args.data_type, args.min_freq,
args.hidden, args.layers, args.attn_heads)
if args.radius != 1:
args.output_path = 'output_bert_{}_min{}_r{}_H{}_L{}_A{}_txt'.format(
args.data_type, args.min_freq, args.radius,
args.hidden, args.layers, args.attn_heads)
else:
raise ValueError('No such suffix named {}.'.format(args.suffix))
else:
if args.suffix == '.txt':
if args.with_span: adj_strategy = 'span'
elif args.with_mask: adj_strategy = 'mask'
elif args.with_proximity: adj_strategy = 'prox'
else: adj_strategy = 'none'
args.output_path = 'output_bert_{}_min{}_H{}_L{}_A{}_{}_txt'.format(
args.data_type, args.min_freq,
args.hidden, args.layers, args.attn_heads, adj_strategy
)
if args.radius != 1:
args.output_path = 'output_bert_{}_min{}_r{}_H{}_L{}_A{}_{}_txt'.format(
args.data_type, args.min_freq, args.radius,
args.hidden, args.layers, args.attn_heads, adj_strategy)
if args.residual_type not in ['none', 'graph_raw']:
args.output_path = 'output_bert_{}_min{}_H{}_L{}_A{}_{}_{}_txt'.format(
args.data_type, args.min_freq,
args.hidden, args.layers, args.attn_heads, adj_strategy, args.residual_type
)
if args.without_ffn:
args.output_path = args.output_path + '_{}'.format('wo_ffn')
else:
raise ValueError('Suffix must be .txt when using adj.')
if not os.path.exists(args.output_path):
os.makedirs(args.output_path)
args.with_cuda = True if args.gpu is not None else False
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
def parse_args():
args = add_args()
modify_args(args)
return args
def train():
logger.info('Loading vocab from {}'.format(args.vocab_path))
vocab = WordVocab.load_vocab(args.vocab_path)
logger.info('Vocab size: {}'.format(len(vocab)))
logger.info('Output path: {}'.format(args.output_path))
logger.info('Loading train dataset from {}'.format(args.train_dataset))
train_dataset = MolBertDataset(
args.train_dataset, vocab,
seq_len=args.seq_len,
corpus_lines=args.corpus_lines,
on_memory=args.on_memory,
with_span=args.with_span,
with_mask=args.with_mask,
with_proximity=args.with_proximity,
adj_path=args.train_adj_filepath if args.with_adj else None,
)
logger.info('Loading test dataset from {}'.format(args.test_dataset))
test_dataset = MolBertDataset(
args.test_dataset, vocab,
seq_len=args.seq_len,
corpus_lines=args.corpus_lines,
on_memory=args.on_memory,
with_span=args.with_span,
with_mask=args.with_mask,
with_proximity=args.with_proximity,
adj_path=args.test_adj_filepath if args.with_adj else None,
)
logger.info('Creating dataloader')
train_loader = DataLoader(
train_dataset, batch_size=args.batch_size, num_workers=args.num_workers, shuffle=False, drop_last=True
)
test_loader = DataLoader(
test_dataset, batch_size=args.batch_size, num_workers=args.num_workers, shuffle=False
)
logger.info('Building Bert model')
bert = MolBert(len(vocab), hidden=args.hidden,
n_layers=args.layers,
attn_heads=args.attn_heads,
residual_type=args.residual_type,
without_ffn=args.without_ffn,
)
logger.info('Creating Bert trainer')
trainer = MolBertTrainer(
bert, len(vocab), train_loader, test_loader, batch_size=args.batch_size,
lr=args.lr, betas=(args.adam_beta1, args.adam_beta2),
weight_decay=args.adam_weight_decay,
with_span=args.with_span,
with_mask=args.with_mask,
with_proximity=args.with_proximity,
with_cuda=args.with_cuda, cuda_devices=args.cuda_devices,
log_freq=args.log_freq, logger=logger
)
logger.info('Training start')
for epoch in range(args.epochs):
trainer.train(epoch)
trainer.save(epoch, os.path.join(args.output_path, 'model.pt'))
if test_loader is not None:
trainer.test(epoch)
args = parse_args()
logger = create_logger(__file__.split('.')[0], save_dir=args.output_path)
if __name__ == '__main__':
train()
``` |
{
"source": "2947721120/curly-hockeypuck",
"score": 2
} |
#### File: ortools/linear_solver/linear_solver_natural_api.py
```python
import types
# The classes below allow linear expressions to be expressed naturally with the
# usual arithmetic operators +-*/ and with constant numbers, which makes the
# python API very intuitive. See the top-level comment for examples.
class LinearExpr(object):
"""Holds linear expressions.
A linear expression is essentially an offset (floating-point value), and a
dictionary mapping MPVariable objects to their coefficient (which is also a
floating-point value).
"""
def Visit(self, coeffs):
"""Fills the coefficient dictionary, and returns the offset."""
return self.DoVisit(coeffs, 1.0)
def DoVisit(self, coeffs, multiplier):
"""Like Visit, but do that with a global floating-point multiplier."""
raise NotImplementedError
def solution_value(self): # pylint: disable=invalid-name
"""Value of this linear expr, using the solution_value of its vars."""
coeffs = {}
constant = self.Visit(coeffs)
return constant + sum(
var.solution_value() * coeff for var, coeff in sorted(coeffs.items()))
def __add__(self, expr):
if isinstance(expr, (int, long, float)):
return SumCst(self, expr)
else:
return Sum(self, expr)
def __radd__(self, cst):
if isinstance(cst, (int, long, float)):
return SumCst(self, cst)
else:
raise TypeError
def __sub__(self, expr):
if isinstance(expr, (int, long, float)):
return SumCst(self, -expr)
else:
return Sum(self, ProductCst(expr, -1))
def __rsub__(self, cst):
if isinstance(cst, (int, long, float)):
return SumCst(ProductCst(self, -1), cst)
else:
raise TypeError
def __mul__(self, cst):
if isinstance(cst, (int, long, float)):
return ProductCst(self, cst)
else:
raise TypeError
def __rmul__(self, cst):
if isinstance(cst, (int, long, float)):
return ProductCst(self, cst)
else:
raise TypeError
def __div__(self, cst):
if isinstance(cst, (int, long, float)):
if cst == 0.0:
raise ZeroDivisionError
else:
return ProductCst(self, 1.0 / cst)
else:
raise TypeError
def __truediv__(self, cst):
if isinstance(cst, (int, long, float)):
if cst == 0.0:
raise ZeroDivisionError
else:
return ProductCst(self, 1.0 / cst)
else:
raise TypeError
def __neg__(self):
return ProductCst(self, -1)
def __eq__(self, arg):
if isinstance(arg, (int, long, float)):
return LinearConstraint(self, arg, arg)
else:
return LinearConstraint(Sum(self, ProductCst(arg, -1)), 0.0, 0.0)
def __ge__(self, arg):
if isinstance(arg, (int, long, float)):
return LinearConstraint(self, arg, 1e308)
else:
return LinearConstraint(Sum(self, ProductCst(arg, -1)), 0.0, 1e308)
def __le__(self, arg):
if isinstance(arg, (int, long, float)):
return LinearConstraint(self, -1e308, arg)
else:
return LinearConstraint(Sum(self, ProductCst(arg, -1)), -1e308, 0.0)
class ProductCst(LinearExpr):
"""Represents the product of a LinearExpr by a constant."""
def __init__(self, expr, coef):
self.__expr = expr
self.__coef = coef
def __str__(self):
if self.__coef == -1:
return '-' + str(self.__expr)
else:
return '(' + str(self.__coef) + ' * ' + str(self.__expr) + ')'
def DoVisit(self, coeffs, multiplier):
current_multiplier = multiplier * self.__coef
if current_multiplier:
return self.__expr.DoVisit(coeffs, current_multiplier)
return 0.0
class Sum(LinearExpr):
"""Represents the sum of two LinearExpr."""
def __init__(self, left, right):
self.__left = left
self.__right = right
def __str__(self):
return '(' + str(self.__left) + ' + ' + str(self.__right) + ')'
def DoVisit(self, coeffs, multiplier):
constant = self.__left.DoVisit(coeffs, multiplier)
constant += self.__right.DoVisit(coeffs, multiplier)
return constant
class SumArray(LinearExpr):
"""Represents the sum of an array of objects (constants or LinearExpr)."""
def __init__(self, array):
if type(array) is types.GeneratorType:
self.__array = [x for x in array]
else:
self.__array = array
def __str__(self):
return 'Sum(' + str(self.__array) + ')'
def DoVisit(self, coeffs, multiplier):
constant = 0.0
for t in self.__array:
if isinstance(t, (int, long, float)):
constant += t * multiplier
else:
constant += t.DoVisit(coeffs, multiplier)
return constant
class SumCst(LinearExpr):
"""Represents the sum of a LinearExpr and a constant."""
def __init__(self, expr, cst):
self.__expr = expr
self.__cst = cst
def __str__(self):
return '(' + str(self.__expr) + ' + ' + str(self.__cst) + ')'
def DoVisit(self, coeffs, multiplier):
constant = self.__expr.DoVisit(coeffs, multiplier)
return constant + self.__cst * multiplier
class LinearConstraint(object):
"""Represents a linear constraint: LowerBound <= LinearExpr <= UpperBound."""
def __init__(self, expr, lb, ub):
self.__expr = expr
self.__lb = lb
self.__ub = ub
def __str__(self):
if self.__lb > -1e308 and self.__ub < 1e308:
if self.__lb == self.__ub:
return str(self.__expr) + ' == ' + str(self.__lb)
else:
return (str(self.__lb) + ' <= ' + str(self.__expr) +
' <= ' + str(self.__ub))
elif self.__lb > -1e308:
return str(self.__expr) + ' >= ' + str(self.__lb)
elif self.__ub < 1e308:
return str(self.__expr) + ' <= ' + str(self.__ub)
else:
return 'Trivial inequality (always true)'
def Extract(self, solver, name=''):
"""Performs the actual creation of the constraint object."""
coeffs = {}
constant = self.__expr.Visit(coeffs)
lb = -solver.infinity()
ub = solver.infinity()
if self.__lb > -1e308:
lb = self.__lb - constant
if self.__ub < 1e308:
ub = self.__ub - constant
constraint = solver.RowConstraint(lb, ub, name)
for v, c, in sorted(coeffs.items()):
constraint.SetCoefficient(v, float(c))
return constraint
``` |
{
"source": "2947721120/or-tools",
"score": 3
} |
#### File: examples/python/costas_array.py
```python
import sys
from ortools.constraint_solver import pywrapcp
def main(n=6):
# Create the solver.
solver = pywrapcp.Solver("Costas array")
#
# data
#
print "n:", n
#
# declare variables
#
costas = [solver.IntVar(1, n, "costas[%i]" % i) for i in range(n)]
differences = {}
for i in range(n):
for j in range(n):
differences[(i, j)] = solver.IntVar(-n + 1, n - 1,
"differences[%i,%i]" % (i, j))
differences_flat = [differences[i, j] for i in range(n) for j in range(n)]
#
# constraints
#
# Fix the values in the lower triangle in the
# difference matrix to -n+1. This removes variants
# of the difference matrix for the the same Costas array.
for i in range(n):
for j in range(i + 1):
solver.Add(differences[i, j] == -n + 1)
# hakank: All the following constraints are from
# <NAME>'s original model.
#
solver.Add(solver.AllDifferent(costas))
# "How do the positions in the Costas array relate
# to the elements of the distance triangle."
for i in range(n):
for j in range(n):
if i < j:
solver.Add(differences[(i, j)] == costas[j] - costas[j - i - 1])
# "All entries in a particular row of the difference
# triangle must be distint."
for i in range(n - 2):
solver.Add(solver.AllDifferent([differences[i, j]
for j in range(n) if j > i]))
#
# "All the following are redundant - only here to speed up search."
#
# "We can never place a 'token' in the same row as any other."
for i in range(n):
for j in range(n):
if i < j:
solver.Add(differences[i, j] != 0)
for k in range(2, n):
for l in range(2, n):
if k < l:
solver.Add(differences[k - 2, l - 1] +
differences[k, l] ==
differences[k - 1, l - 1] +
differences[k - 1, l])
#
# search and result
#
db = solver.Phase(costas + differences_flat,
solver.CHOOSE_FIRST_UNBOUND,
solver.ASSIGN_MIN_VALUE)
solver.NewSearch(db)
num_solutions = 0
while solver.NextSolution():
print "costas:", [costas[i].Value() for i in range(n)]
print "differences:"
for i in range(n):
for j in range(n):
v = differences[i, j].Value()
if v == -n + 1:
print " ",
else:
print "%2d" % v,
print
print
num_solutions += 1
solver.EndSearch()
print
print "num_solutions:", num_solutions
print "failures:", solver.Failures()
print "branches:", solver.Branches()
print "WallTime:", solver.WallTime()
n = 6
if __name__ == "__main__":
if len(sys.argv) > 1:
n = int(sys.argv[1])
main(n)
``` |
{
"source": "2947721120/thumbor",
"score": 2
} |
#### File: thumbor/old_tests/test_app.py
```python
import sys
from os.path import join, abspath, dirname
sys.path.append(abspath(join(dirname(__file__), '..')))
from tornado.testing import AsyncHTTPTestCase
from thumbor.app import ThumborServiceApp
class ThumborServiceTest(AsyncHTTPTestCase):
def get_app(self):
return ThumborServiceApp()
def test_app_exists_and_is_instanceof_thumborserviceapp(self):
assert isinstance(self._app, ThumborServiceApp), 'App does not exist or is not instance of the ThumborServiceApp class'
```
#### File: thumbor/old_tests/test_meta_transform.py
```python
from os.path import abspath, dirname, join
import json
from tornado.testing import AsyncHTTPTestCase
from tornado.options import options
from thumbor.app import ThumborServiceApp
get_conf_path = lambda filename: abspath(join(dirname(__file__), 'fixtures', filename))
class MetaHandlerTestCase(AsyncHTTPTestCase):
def get_app(self):
app = ThumborServiceApp(get_conf_path('default.py'))
return app
def test_meta_returns_200(self):
options.META_CALLBACK_NAME = None
self.http_client.fetch(
self.get_url('/unsafe/meta/s.glbimg.com/es/ge/f/original/2011/03/22/boavista_x_botafogo.jpg'), self.stop
)
response = self.wait()
self.assertEqual(200, response.code)
def test_meta_returns_appjson_code(self):
options.META_CALLBACK_NAME = None
self.http_client.fetch(
self.get_url('/unsafe/meta/s.glbimg.com/es/ge/f/original/2011/03/22/boavista_x_botafogo.jpg'), self.stop
)
response = self.wait()
assert response.code == 200
content_type = response.headers['Content-Type']
self.assertEqual("application/json", content_type)
def test_meta_returns_proper_json_for_no_ops(self):
options.META_CALLBACK_NAME = None
image_url = "s.glbimg.com/es/ge/f/original/2011/03/22/boavista_x_botafogo.jpg"
self.http_client.fetch(self.get_url('/unsafe/meta/%s' % image_url), self.stop)
response = self.wait()
text = response.body
operations = json.loads(text)
assert operations
assert operations['thumbor']
assert operations['thumbor']['source']['url'] == image_url
assert operations['thumbor']['source']['width'] == 620
assert operations['thumbor']['source']['height'] == 349
assert "operations" in operations['thumbor']
assert not operations['thumbor']['operations']
def test_meta_returns_proper_json_for_resize_and_crop(self):
options.META_CALLBACK_NAME = None
image_url = "s.glbimg.com/es/ge/f/original/2011/03/22/boavista_x_botafogo.jpg"
self.http_client.fetch(self.get_url('/unsafe/meta/300x200/%s' % image_url), self.stop)
response = self.wait()
text = response.body
thumbor_json = json.loads(text)
operations = thumbor_json['thumbor']['operations']
assert len(operations) == 2
assert operations[0]['type'] == 'crop'
assert operations[0]['top'] == 0
assert operations[0]['right'] == 572
assert operations[0]['bottom'] == 349
assert operations[0]['left'] == 48
assert operations[1]['type'] == 'resize'
assert operations[1]['width'] == 300
assert operations[1]['height'] == 200
def test_meta_returns_proper_json_for_resize_and_manual_crop(self):
options.META_CALLBACK_NAME = None
image_url = "s.glbimg.com/es/ge/f/original/2011/03/22/boavista_x_botafogo.jpg"
self.http_client.fetch(self.get_url('/unsafe/meta/0x0:100x100/50x0/%s' % image_url), self.stop)
response = self.wait()
text = response.body
thumbor_json = json.loads(text)
target = thumbor_json['thumbor']['target']
assert target['width'] == 50
assert target['height'] == 50, target['height']
def test_meta_returns_proper_target_for_resize_and_crop(self):
options.META_CALLBACK_NAME = None
image_url = "s.glbimg.com/es/ge/f/original/2011/03/22/boavista_x_botafogo.jpg"
self.http_client.fetch(self.get_url('/unsafe/meta/300x200/%s' % image_url), self.stop)
response = self.wait()
text = response.body
thumbor_json = json.loads(text)
target = thumbor_json['thumbor']['target']
assert target['width'] == 300
assert target['height'] == 200
def test_meta_returns_proper_target_for_crop(self):
options.META_CALLBACK_NAME = None
image_url = "s.glbimg.com/es/ge/f/original/2011/03/22/boavista_x_botafogo.jpg"
self.http_client.fetch(self.get_url('/unsafe/meta/0x0:100x100/%s' % image_url), self.stop)
response = self.wait()
text = response.body
thumbor_json = json.loads(text)
target = thumbor_json['thumbor']['target']
assert target['width'] == 100
assert target['height'] == 100
def test_meta_returns_proper_target_for_crop_and_resize(self):
options.META_CALLBACK_NAME = None
image_url = "s.glbimg.com/es/ge/f/original/2011/03/22/boavista_x_botafogo.jpg"
self.http_client.fetch(self.get_url('/unsafe/meta/0x0:200x250/200x0/%s' % image_url), self.stop)
response = self.wait()
text = response.body
thumbor_json = json.loads(text)
target = thumbor_json['thumbor']['target']
image_url = "s.glbimg.com/es/ge/f/original/2011/03/22/boavista_x_botafogo.jpg"
self.http_client.fetch(self.get_url('/unsafe/meta/50x40:250x290/200x0/%s' % image_url), self.stop)
response = self.wait()
text = response.body
thumbor_json = json.loads(text)
target_2 = thumbor_json['thumbor']['target']
image_url = "s.glbimg.com/es/ge/f/original/2011/03/22/boavista_x_botafogo.jpg"
self.http_client.fetch(self.get_url('/unsafe/meta/250x80:450x330/200x0/%s' % image_url), self.stop)
response = self.wait()
text = response.body
thumbor_json = json.loads(text)
target_3 = thumbor_json['thumbor']['target']
assert target['width'] == target_2['width']
assert target['height'] == target_2['height']
assert target['width'] == target_3['width']
assert target['height'] == target_3['height']
def test_meta_returns_proper_json_for_flip(self):
options.META_CALLBACK_NAME = None
image_url = "s.glbimg.com/es/ge/f/original/2011/03/22/boavista_x_botafogo.jpg"
self.http_client.fetch(self.get_url('/unsafe/meta/-300x-200/%s' % image_url), self.stop)
response = self.wait()
text = response.body
thumbor_json = json.loads(text)
operations = thumbor_json['thumbor']['operations']
assert len(operations) == 4
assert operations[2]['type'] == 'flip_horizontally'
assert operations[3]['type'] == 'flip_vertically'
class MetaHandlerJSONPTestCase(AsyncHTTPTestCase):
def get_app(self):
return ThumborServiceApp(get_conf_path('jsonp.py'))
def test_meta_returns_proper_json_for_no_ops_with_callback(self):
image_url = "s.glbimg.com/es/ge/f/original/2011/03/22/boavista_x_botafogo.jpg"
self.http_client.fetch(self.get_url('/unsafe/meta/%s' % image_url), self.stop)
response = self.wait()
text = response.body
assert text.strip().startswith('callback({')
assert text.strip().endswith('});')
```
#### File: tests/detectors/test_glasses_detector.py
```python
from os.path import abspath
import mock
from preggy import expect
from tests.base import DetectorTestCase
from thumbor.detectors.glasses_detector import Detector as GlassesDetector
class GlassesDetectorTestCase(DetectorTestCase):
def test_detector_uses_proper_cascade(self):
cascade = './tests/fixtures/haarcascade_eye_tree_eyeglasses.xml'
ctx = mock.Mock(
config=mock.Mock(
GLASSES_DETECTOR_CASCADE_FILE=abspath(cascade),
)
)
detector = GlassesDetector(ctx, 1, [])
expect(detector).not_to_be_null()
def test_should_detect_glasses(self):
with open(abspath('./tests/fixtures/images/glasses.jpg')) as f:
self.engine.load(f.read(), None)
self.context.config.GLASSES_DETECTOR_CASCADE_FILE = abspath(
'./thumbor/detectors/glasses_detector/haarcascade_eye_tree_eyeglasses.xml',
)
if hasattr(GlassesDetector, 'cascade'):
del GlassesDetector.cascade
GlassesDetector(self.context, 0, []).detect(lambda: None)
detection_result = self.context.request.focal_points[0]
expect(detection_result.origin).to_equal('detection')
expect(detection_result.x).to_be_numeric()
expect(detection_result.y).to_be_numeric()
expect(detection_result.width).to_be_numeric()
expect(detection_result.height).to_be_numeric()
```
#### File: tests/handlers/test_healthcheck.py
```python
from preggy import expect
from tests.base import TestCase
class HealthcheckHandlerTestCase(TestCase):
def test_can_get_healthcheck(self):
response = self.fetch('/healthcheck')
expect(response.code).to_equal(200)
expect(response.body).to_equal("WORKING")
def test_can_head_healthcheck(self):
response = self.fetch('/healthcheck', method='HEAD')
expect(response.code).to_equal(200)
```
#### File: thumbor/filters/extract_focal.py
```python
import re
from thumbor.filters import BaseFilter, filter_method, PHASE_PRE_LOAD
from thumbor.url import Url
from thumbor.point import FocalPoint
MAX_LEVEL = 10
class Filter(BaseFilter):
phase = PHASE_PRE_LOAD
domain_regex = re.compile(r'^(https?://)?.*?/')
url_regex = re.compile(Url.regex())
def parse_url(self, url):
level = 0
while level < MAX_LEVEL:
url = self.domain_regex.sub('', url)
result = self.url_regex.match(url)
if not result:
return None
parts = result.groupdict()
image = parts.get('image', None)
if not (image and (parts.get('hash', None) or parts.get('unsafe', None))):
return None
top, right, left, bottom = parts.get('crop_top', None), parts.get('crop_right', None), \
parts.get('crop_left', None), parts.get('crop_bottom', None)
if top and right and left and bottom:
return (image, top, right, left, bottom)
url = image
level += 1
return None
@filter_method()
def extract_focal(self):
parts = self.parse_url(self.context.request.image_url)
if parts:
image, top, right, left, bottom = parts
top, right, left, bottom = int(top), int(right), int(left), int(bottom)
width = right - left
height = bottom - top
self.context.request.focal_points.append(
FocalPoint.from_square(left, top, width, height, origin="Original Extraction")
)
self.context.request.image_url = image
```
#### File: thumbor/optimizers/gifv.py
```python
import os
import subprocess
from thumbor.optimizers import BaseOptimizer
class Optimizer(BaseOptimizer):
def should_run(self, image_extension, buffer):
return 'gif' in image_extension and 'gifv' in self.context.request.filters
def optimize(self, buffer, input_file, output_file):
format, command_params = self.set_format()
ffmpeg_path = self.context.config.FFMPEG_PATH
command = '%s -y -f gif -i %s -an -movflags faststart -f %s -pix_fmt yuv420p %s -qmin 10 -qmax 42 -crf 23' \
' -maxrate 500k -vf "scale=trunc(iw/2)*2:trunc(ih/2)*2" %s -loglevel error'
command = command % (
ffmpeg_path,
input_file,
format,
command_params,
output_file,
)
with open(os.devnull) as null:
subprocess.call(command, shell=True, stdin=null)
self.context.request.format = format
def set_format(self):
if 'webm' in self.context.request.filters:
format = 'webm'
command_params = '-quality good -cpu-used 4'
else:
format = 'mp4'
command_params = '-profile:v baseline -level 4.0'
return format, command_params
```
#### File: thumbor/vows/base64_hmac_sha1_url_signer_vows.py
```python
import hashlib
import base64
import hmac
from pyvows import Vows, expect
from thumbor.url_signers.base64_hmac_sha1 import UrlSigner
@Vows.batch
class Base64HmacSha1UrlSignerVows(Vows.Context):
def topic(self):
return UrlSigner(security_key="something")
def should_be_signer_instance(self, topic):
expect(topic).to_be_instance_of(UrlSigner)
def should_have_security_key(self, topic):
expect(topic.security_key).to_equal('something')
class Sign(Vows.Context):
def topic(self, signer):
url = '10x11:12x13/-300x-300/center/middle/smart/some/image.jpg'
expected = base64.urlsafe_b64encode(
hmac.new(
'something', unicode(url).encode('utf-8'), hashlib.sha1
).digest()
)
return (signer.signature(url), expected)
def should_equal_encrypted_string(self, test_data):
topic, expected = test_data
expect(topic).to_equal(expected)
```
#### File: thumbor/vows/blacklist_vows.py
```python
from pyvows import Vows, expect
from tornado_pyvows.context import TornadoHTTPContext
from thumbor.app import ThumborServiceApp
from thumbor.importer import Importer
from thumbor.config import Config
from thumbor.context import Context, ServerParameters
from os.path import abspath, join, dirname, exists
from shutil import rmtree
class BaseContext(TornadoHTTPContext):
def get_app(self):
file_storage_root_path = '/tmp/thumbor-vows/storage'
if exists(file_storage_root_path):
rmtree(file_storage_root_path)
cfg = Config()
cfg.USE_BLACKLIST = True
cfg.LOADER = "thumbor.loaders.file_loader"
cfg.FILE_LOADER_ROOT_PATH = abspath(join(dirname(__file__), 'fixtures/'))
cfg.STORAGE = 'thumbor.storages.file_storage'
cfg.FILE_STORAGE_ROOT_PATH = file_storage_root_path
importer = Importer(cfg)
importer.import_modules()
server = ServerParameters(8889, 'localhost', 'thumbor.conf', None, 'debug', None)
ctx = Context(server, cfg, importer)
application = ThumborServiceApp(ctx)
return application
@Vows.batch
class Blacklist(BaseContext):
class BaseBlacklist(TornadoHTTPContext):
def topic(self):
response = self.get('/blacklist')
return (response.code, response.body)
def should_return_blank(self, topic):
expect(topic[1]).to_equal("")
def should_return_200(self, topic):
expect(topic[0]).to_equal(200)
class AddingToBlacklist(TornadoHTTPContext):
def topic(self):
response = self.fetch('/blacklist?blocked.jpg', method='PUT', body='')
return (response.code, response.body)
def should_return_200(self, topic):
expect(topic[0]).to_equal(200)
class ReadingUpdatedBlacklist(TornadoHTTPContext):
def topic(self):
response = self.get('/blacklist')
return (response.code, response.body)
def should_return_200(self, topic):
expect(topic[0]).to_equal(200)
def should_contain_blacklisted_file(self, topic):
expect("blocked.jpg\n" in topic[1]).to_equal(True)
@Vows.batch
class BlacklistIntegration(BaseContext):
class NormalGetImage(TornadoHTTPContext):
def topic(self):
response = self.get('/unsafe/image.jpg')
return response.code
def should_return_200(self, topic):
expect(topic).to_equal(200)
class BlacklistedGetImage(TornadoHTTPContext):
def topic(self):
self.fetch('/blacklist?image.jpg', method='PUT', body='')
response = self.get('/unsafe/image.jpg')
return response.code
def should_return_bad_request(self, topic):
expect(topic).to_equal(400)
```
#### File: thumbor/vows/json_engine_vows.py
```python
import re
from json import loads
from pyvows import Vows, expect
ctx = Vows.Context
from thumbor.engines.json_engine import JSONEngine
from thumbor.point import FocalPoint
class MockImage:
def __init__(self, size, data=None):
self.size = size
self.data = data
class MockEngine:
def __init__(self, size):
self.context = None
self.image = MockImage(size)
self.frame_count = 1
def get_image_mode(self):
return 'RGB'
def get_image_data(self):
return self.image.data
def set_image_data(self, data):
self.image.data = data
def resize(self, width, height):
self.image.size = (width, height)
def crop(self, left, top, right, bottom):
self.image.size = (right - left, bottom - top)
def image_data_as_rgb(self, update_image=True):
return 'RGB', self.image.data
@property
def size(self):
return self.image.size
IMAGE_PATH = '/some/image/path.jpg'
IMAGE_SIZE = (300, 200)
@Vows.batch
class JsonEngineVows(ctx):
class CreateInstanceVows(ctx):
def topic(self):
engine = MockEngine(size=IMAGE_SIZE)
json = JSONEngine(engine=engine, path=IMAGE_PATH)
return json
def should_not_be_null_or_error(self, topic):
expect(topic).not_to_be_null()
expect(topic).not_to_be_an_error()
def should_have_proper_engine(self, topic):
expect(topic.engine).to_be_instance_of(MockEngine)
def should_have_proper_dimensions(self, topic):
expect(topic.width).to_equal(300)
expect(topic.height).to_equal(200)
def should_have_proper_path(self, topic):
expect(topic.path).to_equal(IMAGE_PATH)
def should_have_null_callback_name(self, topic):
expect(topic.callback_name).to_be_null()
def should_have_empty_operations(self, topic):
expect(topic.operations).to_be_empty()
def should_have_empty_focal_points(self, topic):
expect(topic.focal_points).to_be_empty()
def should_have_proper_image(self, topic):
expect(topic.image).to_be_instance_of(MockImage)
def should_return_size(self, topic):
expect(topic.size).to_equal((300, 200))
class GetImageMode(ctx):
def topic(self, engine):
return engine.get_image_mode()
def should_return_proper_image_mode(self, topic):
expect(topic).to_equal('RGB')
class GetImageDataAsRgb(ctx):
def topic(self, engine):
engine.set_image_data('SOME DATA')
return engine.image_data_as_rgb()
def should_return_proper_image_data(self, (mode, data)):
expect(mode).to_equal('RGB')
expect(data).to_equal('SOME DATA')
class GetImageData(ctx):
def topic(self, engine):
engine.set_image_data('SOME DATA')
return engine.get_image_data()
def should_return_proper_image_data(self, topic):
expect(topic).to_equal('SOME DATA')
class Read(ctx):
def topic(self, engine):
return loads(engine.read('jpg', 100))
def should_be_proper_json(self, topic):
expected = {
"thumbor": {
"operations": [],
"source": {
"url": "/some/image/path.jpg",
"width": 300,
"height": 200,
"frameCount": 1,
},
"target": {
"width": 300,
"height": 200
}
}
}
expect(topic).to_be_like(expected)
class ReadWithCallbackName(ctx):
def topic(self):
engine = MockEngine(size=IMAGE_SIZE)
json = JSONEngine(engine=engine, path=IMAGE_PATH, callback_name="callback")
jsonp = json.read('jpg', 100)
match = re.match('^callback\((.+)\);', jsonp)
return match
def should_not_be_null(self, topic):
expect(topic).not_to_be_null()
class JsonCompare(ctx):
def topic(self, match):
json = match.groups()[0]
return loads(json)
def should_be_proper_json(self, topic):
expected = {
"thumbor": {
"operations": [],
"source": {
"url": "/some/image/path.jpg",
"width": 300,
"height": 200,
"frameCount": 1,
},
"target": {
"width": 300,
"height": 200
}
}
}
expect(topic).to_be_like(expected)
class ResizeVows(ctx):
def topic(self):
engine = MockEngine(size=IMAGE_SIZE)
json = JSONEngine(engine=engine, path=IMAGE_PATH)
json.resize(200, 300)
return loads(json.read('jpg', 100))
def should_be_proper_json(self, topic):
expected = {
"thumbor": {
"operations": [
{u'width': 200, u'type': u'resize', u'height': 300}
],
"source": {
"url": "/some/image/path.jpg",
"width": 300,
"height": 200,
"frameCount": 1,
},
"target": {
"width": 200,
"height": 300
}
}
}
expect(topic).to_be_like(expected)
class CropVows(ctx):
def topic(self):
engine = MockEngine(size=IMAGE_SIZE)
json = JSONEngine(engine=engine, path=IMAGE_PATH)
json.crop(100, 100, 200, 150)
return loads(json.read('jpg', 100))
def should_be_proper_json(self, topic):
expected = {
"thumbor": {
"operations": [
{u'top': 100, u'right': 200, u'type': u'crop', u'left': 100, u'bottom': 150}
],
"source": {
"url": "/some/image/path.jpg",
"width": 300,
"height": 200,
"frameCount": 1,
},
"target": {
"width": 100,
"height": 50
}
}
}
expect(topic).to_be_like(expected)
class FlipVows(ctx):
def topic(self):
engine = MockEngine(size=IMAGE_SIZE)
json = JSONEngine(engine=engine, path=IMAGE_PATH)
json.flip_vertically()
json.flip_horizontally()
return loads(json.read('jpg', 100))
def should_be_proper_json(self, topic):
expected = {
"thumbor": {
"operations": [
{u'type': u'flip_vertically'},
{u'type': u'flip_horizontally'}
],
"source": {
"url": "/some/image/path.jpg",
"width": 300,
"height": 200,
"frameCount": 1,
},
"target": {
"width": 300,
"height": 200
}
}
}
expect(topic).to_be_like(expected)
class FocalVows(ctx):
def topic(self):
engine = MockEngine(size=IMAGE_SIZE)
json = JSONEngine(engine=engine, path=IMAGE_PATH)
json.focus([
FocalPoint(100, 100),
FocalPoint(200, 200)
])
return loads(json.read('jpg', 100))
def should_be_proper_json(self, topic):
expected = {
"thumbor": {
"operations": [
],
"focal_points": [
{u'origin': u'alignment', u'height': 1, u'width': 1, u'y': 100, u'x': 100, u'z': 1.0},
{u'origin': u'alignment', u'height': 1, u'width': 1, u'y': 200, u'x': 200, u'z': 1.0}
],
"source": {
"url": "/some/image/path.jpg",
"width": 300,
"height": 200,
"frameCount": 1,
},
"target": {
"width": 300,
"height": 200
}
}
}
expect(topic).to_be_like(expected)
```
#### File: thumbor/vows/rotate_filter_vows.py
```python
from pyvows import Vows, expect
from thumbor.context import Context, RequestParameters
from thumbor.config import Config
from thumbor.importer import Importer
from thumbor.filters.rotate import Filter
import thumbor.filters
class RotateEngine:
def __init__(self):
self.rotate_val = None
def rotate(self, rotate_val):
self.rotate_val = rotate_val
def is_multiple(self):
return False
@Vows.batch
class RotateFilterVows(Vows.Context):
class DisallowsInvalidContext(Vows.Context):
def topic(self):
conf = Config()
imp = Importer(conf)
imp.filters = [Filter]
ctx = Context(None, conf, imp)
ctx.request = RequestParameters()
runner = ctx.filters_factory.create_instances(ctx, "rotate(91)")
filter_instances = runner.filter_instances[thumbor.filters.PHASE_POST_TRANSFORM]
filter_instances[0].engine = RotateEngine()
filter_instances[0].run()
return filter_instances[0].engine.rotate_val
def should_be_none(self, rotate):
expect(rotate).to_be_null()
class RotateProperly(Vows.Context):
def topic(self):
conf = Config()
imp = Importer(conf)
imp.filters = [Filter]
ctx = Context(None, conf, imp)
ctx.request = RequestParameters()
runner = ctx.filters_factory.create_instances(ctx, "rotate(90)")
filter_instances = runner.filter_instances[thumbor.filters.PHASE_POST_TRANSFORM]
filter_instances[0].engine = RotateEngine()
filter_instances[0].run()
return filter_instances[0].engine.rotate_val
def should_equal_90(self, rotate):
expect(rotate).to_equal(90)
class NormalizeRotate(Vows.Context):
def topic(self):
conf = Config()
imp = Importer(conf)
imp.filters = [Filter]
ctx = Context(None, conf, imp)
ctx.request = RequestParameters()
runner = ctx.filters_factory.create_instances(ctx, "rotate(540)")
filter_instances = runner.filter_instances[thumbor.filters.PHASE_POST_TRANSFORM]
filter_instances[0].engine = RotateEngine()
filter_instances[0].run()
return filter_instances[0].engine.rotate_val
def should_equal_90(self, rotate):
expect(rotate).to_equal(180)
``` |
{
"source": "2951121599/Django-ToDoList-db-version",
"score": 2
} |
#### File: to_do_list/todolist/views.py
```python
from django.shortcuts import render, redirect
from .models import Todo
# Create your views here.
def home(request):
# request.POST
if request.method == 'POST':
if request.POST['待办事项'] == '':
content = {'清单': Todo.objects.all(), '警告': '请收入内容!'}
return render(request, 'todolist/home.html', content)
else:
# lst.append({'待办事项':request.POST['待办事项'], '已完成': False})
a_row = Todo(thing=request.POST['待办事项'], done=False)
a_row.save()
content = {'清单': Todo.objects.all(), '信息': '添加成功!'}
return render(request, 'todolist/home.html', content)
elif request.method == 'GET':
content = {'清单': Todo.objects.all()}
return render(request, 'todolist/home.html', content)
def about(request):
return render(request, 'todolist/about.html')
def edit(request, 每一件事_id):
if request.method == 'POST':
if request.POST['已修改事项'] == '':
return render(request, 'todolist/edit.html', {'警告': '请收入内容!'})
else:
a = Todo.objects.get(id=每一件事_id)
a.thing = request.POST['已修改事项']
a.save()
return redirect('todolist:主页')
elif request.method == 'GET':
content = {'待修改事项': Todo.objects.get(id=每一件事_id).thing}
return render(request, 'todolist/edit.html', content)
def delete(request, 每一件事_id):
a = Todo.objects.get(id=每一件事_id)
a.delete()
return redirect('todolist:主页')
def cross(request, 每一件事_id):
if request.POST['完成状态'] == '已完成':
a = Todo.objects.get(id=每一件事_id)
a.done = True
a.save()
return redirect('todolist:主页')
elif request.POST['完成状态'] == '未完成':
a = Todo.objects.get(id=每一件事_id)
a.done = False
a.save()
return redirect('todolist:主页')
``` |
{
"source": "29527/OKExPyWebsocket",
"score": 2
} |
#### File: OKExPyWebsocket/restmode/app.py
```python
from pprint import pprint
import requests
import urllib3
import app_utils as u
from config import HTTP_URL
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
def request_get(request_path):
# http时需要访问什么接口,headers和url都得设置成相同的
target_url = HTTP_URL + request_path
headers = u.header_util.get_rest_sign(request_path=request_path)
response = requests.get(target_url, headers=headers, verify=False)
return response.json()
if __name__ == "__main__":
res = request_get("/api/v5/account/bills")
pprint(res, width=20)
```
#### File: OKExPyWebsocket/wsmode/agent.py
```python
import time
from faker import Faker
from ws4py.client.threadedclient import WebSocketClient
import app_utils as u
from app_utils import WS_PRIVATE_URL, WS_PUBLIC_URL, gen_send_msg, gen_send_order_msg
from constant import Channel, Operation
from wsmode import stategies as s, utils as pu
from wsmode.handler import MessageCallbackHandler
fake = Faker("zh-CN")
class BaseAgent:
"""
提供环境上下文
"""
URL = ""
def __init__(
self,
strategy: s.IStrategy,
msg_cb_handler: MessageCallbackHandler,
ws: WebSocketClient = None,
):
self.ws = ws
self.msg_cb_handler = msg_cb_handler
self.strategy = strategy
self.strategy.set_agent(self)
self.strategy.register()
def handle(self, json_msg: dict):
msg = pu.parse_message(json_msg)
topic_msg = pu.parse(msg)
self.msg_cb_handler.dispatch(topic_msg)
def set_websocket(self, ws: WebSocketClient):
self.ws = ws
def after_ws_opened(self):
raise NotImplementedError
def subscribe_channel(self, channel: Channel, **kwargs):
args = {
"channel": channel,
}
args.update(kwargs)
self.ws.send(gen_send_msg(Operation.SUBSCRIBE, args))
class PrivateWSAgent(BaseAgent):
URL = WS_PRIVATE_URL
def send_order(self, op: Operation, **kwargs):
random_order_id = fake.pystr(min_chars=1, max_chars=32)
self.ws.send(gen_send_order_msg(random_order_id, op, kwargs))
return random_order_id
def after_ws_opened(self):
# 第一步,登陆
headers = u.header_util.get_ws_header()
self.ws.send(u.gen_send_msg(Operation.LOGIN, headers))
time.sleep(1)
print("成功登入私有频道~")
self.strategy.open_subscribe()
class PublicWSAgent(BaseAgent):
URL = WS_PUBLIC_URL
def after_ws_opened(self):
print("成功登入公有频道~")
self.strategy.open_subscribe()
# self.ws.send(
# '{"op":"subscribe","args":[{"channel": "instruments","instType": "FUTURES"}]}'
# )
``` |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.