max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
nihars.com/server.py | niharokz/website_archive | 0 | 12796351 | <reponame>niharokz/website_archive
#!/bin/python
#
# ███╗░░██╗██╗██╗░░██╗░█████╗░██████╗░░█████╗░██╗░░██╗███████╗
# ████╗░██║██║██║░░██║██╔══██╗██╔══██╗██╔══██╗██║░██╔╝╚════██║
# ██╔██╗██║██║███████║███████║██████╔╝██║░░██║█████═╝░░░███╔═╝
# ██║╚████║██║██╔══██║██╔══██║██╔══██╗██║░░██║██╔═██╗░██╔══╝░░
# ██║░╚███║██║██║░░██║██║░░██║██║░░██║╚█████╔╝██║░╚██╗███████╗
# ╚═╝░░╚══╝╚═╝╚═╝░░╚═╝╚═╝░░╚═╝╚═╝░░╚═╝░╚════╝░╚═╝░░╚═╝╚══════╝
#
# DRAFTED BY <NAME> ON 22-03-21. [https://nihars.com]
# SOURCE [server.py] LAST MODIFIED ON 27-03-21
import http.server
import socketserver
import os
PORT = 8000
web_dir = os.path.join(os.path.dirname(__file__), 'public')
os.chdir(web_dir)
Handler = http.server.SimpleHTTPRequestHandler
httpd = socketserver.TCPServer(("", PORT), Handler)
print("serving at port", PORT)
httpd.serve_forever()
#python -m http.server --directory public &1
| 2.546875 | 3 |
catkin_ws/src/aruco_intercept/src/grab_object.py | filesmuggler/ur3e-ird435-rg2 | 1 | 12796352 | #!/usr/bin/env python
import sys
import copy
import rospy
import moveit_commander
import moveit_msgs.msg
import geometry_msgs.msg
from math import pi
from std_msgs.msg import String
from moveit_commander.conversions import pose_to_list
import tf
def all_close(goal, actual, tolerance):
"""
Convenience method for testing if a list of values are within a tolerance of their counterparts in another list
@param: goal A list of floats, a Pose or a PoseStamped
@param: actual A list of floats, a Pose or a PoseStamped
@param: tolerance A float
@returns: bool
"""
all_equal = True
if type(goal) is list:
for index in range(len(goal)):
if abs(actual[index] - goal[index]) > tolerance:
return False
elif type(goal) is geometry_msgs.msg.PoseStamped:
return all_close(goal.pose, actual.pose, tolerance)
elif type(goal) is geometry_msgs.msg.Pose:
return all_close(pose_to_list(goal), pose_to_list(actual), tolerance)
return True
def callback(data,args):
print("hello there")
ur_robot = args[0]
ur_scene = args[1]
ur_move_group = args[2]
ur_planning_frame = args[3]
ur_eef_link = args[4]
ur_group_names = args[5]
move_group = ur_move_group
print("elo grab")
print(data)
data.position.x = data.position.x - 0.05
data.position.y = data.position.y - 0.03
data.position.z = 0.15
data.orientation.x = -0.0
data.orientation.y = 1.0
data.orientation.z = 0.0
data.orientation.w = -0.0
move_group.set_pose_target(data)
plan = move_group.go(wait=True)
move_group.stop()
move_group.clear_pose_targets()
current_pose = move_group.get_current_pose().pose
return all_close(data, current_pose, 0.01)
def main():
try:
print("Grab")
moveit_commander.roscpp_initialize(sys.argv)
robot = moveit_commander.RobotCommander()
scene = moveit_commander.PlanningSceneInterface()
group_name = "manipulator"
move_group = moveit_commander.MoveGroupCommander(group_name)
planning_frame = move_group.get_planning_frame()
eef_link = move_group.get_end_effector_link()
group_names = robot.get_group_names()
robot.get_current_state()
# Misc variables
ur_robot = robot
ur_scene = scene
ur_move_group = move_group
ur_planning_frame = planning_frame
ur_eef_link = eef_link
ur_group_names = group_names
rospy.init_node('move_ur_python_interface', anonymous=True)
rospy.Subscriber("/aruco_pose",geometry_msgs.msg.Pose,callback,(ur_robot,
ur_scene,
ur_move_group,
ur_planning_frame,
ur_eef_link,
ur_group_names))
rospy.spin()
except rospy.ROSInterruptException:
return
except KeyboardInterrupt:
return
if __name__ == '__main__':
main() | 2.75 | 3 |
LLDBagility/lldbagility.py | killvxk/LLDBagility | 1 | 12796353 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
from __future__ import print_function
import argparse
import functools
import re
import shlex
import threading
import time
import traceback
import kdpserver
import lldb
import lldbagilityutils
import stubvm
vm = None
def _exec_cmd(debugger, command, capture_output=False):
if capture_output:
cmdretobj = lldb.SBCommandReturnObject()
debugger.GetCommandInterpreter().HandleCommand(command, cmdretobj)
return cmdretobj
else:
debugger.HandleCommand(command)
return None
def _evaluate_expression(exe_ctx, expression):
res = exe_ctx.frame.EvaluateExpression(expression)
try:
vaddr = int(res.GetValue(), 0)
except (TypeError, ValueError):
return None
else:
return vaddr
def fdp_attach(debugger, command, exe_ctx, result, internal_dict):
"""
Connect to a macOS VM via FDP.
The VM must have already been started.
Existing breakpoints are deleted on attaching.
Re-execute this command every time the VM is rebooted.
"""
parser = argparse.ArgumentParser(prog="fdp-attach")
parser.add_argument("vm_name")
args = parser.parse_args(shlex.split(command))
_attach(debugger, exe_ctx, stubvm.FDPSTUB, args.vm_name)
def vmsn_attach(debugger, command, exe_ctx, result, internal_dict):
"""
Connect to a macOS VM via VMSN. Currently not maintained!
Existing breakpoints are deleted on attaching.
"""
parser = argparse.ArgumentParser(prog="vmsn-attach")
parser.add_argument("vm_name")
args = parser.parse_args(shlex.split(command))
_attach(debugger, exe_ctx, stubvm.VMSNSTUB, args.vm_name)
def _attach(debugger, exe_ctx, vm_stub, vm_name):
global vm
print(lldbagilityutils.LLDBAGILITY)
print("* Attaching to the VM")
try:
vm = stubvm.STUBVM(vm_stub, vm_name)
except Exception as exc:
print("* Could not attach! {}".format(str(exc)))
return
print("* Resuming the VM execution until reaching kernel code")
vm.complete_attach()
print("* Kernel load address: 0x{:016x}".format(vm.kernel_load_vaddr))
print("* Kernel slide: 0x{:x}".format(vm.kernel_slide))
print("* Kernel cr3: 0x{:x}".format(vm.kernel_cr3))
print("* Kernel version: {}".format(vm.kernel_version))
print("* VM breakpoints deleted")
# detach the previous process (if any)
exe_ctx.process.Detach()
# remove all LLDB breakpoints
exe_ctx.target.DeleteAllBreakpoints()
print("* LLDB breakpoints deleted")
# start the fake KDP server
kdpsv = kdpserver.KDPServer()
th = threading.Thread(target=kdpsv.debug, args=(vm,))
th.daemon = True
th.start()
# connect LLDB to the fake KDP server
kdpsv_addr, kdpsv_port = kdpsv.sv_sock.getsockname()
_exec_cmd(debugger, "kdp-remote '{}:{}'".format(kdpsv_addr, kdpsv_port))
# trigger a memory write to find out the address of the kdp struct
vm.store_kdp_at_next_write_virtual_memory()
if _exec_cmd(debugger, "memory write &kdp 41", capture_output=True).GetError():
print("* Unable to find the 'kdp' symbol. Did you specify the target to debug?")
vm.abort_store_kdp_at_next_write_virtual_memory()
def _attached(f):
@functools.wraps(f)
def _wrapper(*args, **kwargs):
global vm
if not vm:
print("* Not attached to a VM!")
return
return f(*args, **kwargs)
return _wrapper
@_attached
def fdp_save(debugger, command, exe_ctx, result, internal_dict):
"""
Save the current state of the attached macOS VM.
Breakpoints are not saved (but retained for the current session).
"""
# saving the state causes all breakpoints (soft and hard) to be unset, but
# we can preserve them at least for the current session
# we disable soft breakpoints before saving and then re-enable them once the state
# has been saved, so that LLDB sends again the KDP requests for setting them
exe_ctx.target.DisableAllBreakpoints()
# similarly, for hard breakpoints we save the state of the debug registers before saving,
# and restore it afterwards
dbgregs = vm.read_registers(("dr0", "dr1", "dr2", "dr3", "dr6", "dr7"))
# interrupt and save the VM state
process_was_stopped = exe_ctx.process.is_stopped
print("* Saving the VM state")
vm.interrupt_and_take_snapshot()
print("* State saved")
# restore soft breakpoints
exe_ctx.target.EnableAllBreakpoints()
# restore hard breakpoints
vm.write_registers(dbgregs)
if not process_was_stopped:
# display stop info
_exec_cmd(debugger, "process status")
@_attached
def fdp_restore(debugger, command, exe_ctx, result, internal_dict):
"""
Restore the attached macOS VM to the last saved state.
Breakpoints are deleted on restoring.
"""
# interrupt and restore the VM state
print("* Restoring the last saved VM state")
if vm.interrupt_and_restore_last_snapshot():
print("* State restored")
# do a full reattach (the kernel load address may differ)
fdp_attach(debugger, vm.name, exe_ctx, result, internal_dict)
else:
print("* No saved state found")
@_attached
def fdp_interrupt(debugger, command, exe_ctx, result, internal_dict):
"""
Interrupt (pause) the execution of the attached macOS VM.
"""
vm.interrupt()
@_attached
def fdp_hbreakpoint(debugger, command, exe_ctx, result, internal_dict):
"""
Set or unset hardware breakpoints.
Hardware breakpoints are implemented using the debug registers DR0, DR1, DR2 and DR3.
Consequently, a maximum of four hardware breakpoints can be active simultaneously.
"""
parser = argparse.ArgumentParser(prog="fdp-hbreakpoint")
subparsers = parser.add_subparsers(dest="action")
set_parser = subparsers.add_parser("set")
set_parser.add_argument(
"trigger",
choices={"e", "rw", "w"},
help="Type of memory access to trap on: execute, read/write, or write only.",
)
set_parser.add_argument(
"nreg",
type=lambda i: int(i, 0),
choices={0, 1, 2, 3},
help="Breakpoint slot to use (corresponding to registers ).",
)
set_parser.add_argument(
"expression", help="Breakpoint address or expression to be evaluated as such."
)
unset_parser = subparsers.add_parser("unset")
unset_parser.add_argument(
"nreg",
type=lambda i: int(i, 0),
choices={0, 1, 2, 3},
help="Breakpoint slot to free (corresponding to registers DR0, DR1, DR2 and DR3).",
)
args = parser.parse_args(shlex.split(command))
if args.action == "set":
vaddr = _evaluate_expression(exe_ctx, args.expression)
if vaddr:
vm.set_hard_breakpoint(args.trigger, args.nreg, vaddr)
print("* Hardware breakpoint set: address = 0x{:016x}".format(vaddr))
else:
print("* Invalid expression")
elif args.action == "unset":
vm.unset_hard_breakpoint(args.nreg)
print("* Hardware breakpoint unset")
else:
raise AssertionError
@_attached
def fdp_test(debugger, command, exe_ctx, result, internal_dict):
"""
Run some tests.
Warning: tests change the state of the machine and modify the last saved state!
"""
regs = {
"rax",
"rbx",
"rcx",
"rdx",
"rdi",
"rsi",
"rbp",
"rsp",
"r8",
"r9",
"r10",
"r11",
"r12",
"r13",
"r14",
"r15",
"rip",
"rflags",
"cs",
"fs",
"gs",
}
def _t1():
print("* Halt/resume/single step")
vm.halt()
assert vm.is_state_halted()
vm.resume()
assert not vm.is_state_halted()
vm.halt()
for _ in range(100):
vm.single_step()
assert vm.is_state_halted()
def _t2():
print("* Read/write registers")
vm.halt()
orig_values = vm.read_registers(regs)
new_values = {reg: 0x1337 for reg in regs}
for reg in regs:
vm.write_register(reg, new_values[reg])
# modifications to RFLAGS should be disabled
assert vm.read_register("rflags") == orig_values["rflags"]
del new_values["rflags"]
assert vm.read_registers(regs - {"rflags"}) == new_values
vm.write_registers(orig_values)
for reg in regs:
assert vm.read_register(reg) == orig_values[reg]
def _t3():
print("* Read/write virtual memory")
vm.halt()
data = vm.read_virtual_memory(vm.read_register("rsp"), 0x8)
new_data = b"ABCDEFGH"
vm.write_virtual_memory(vm.read_register("rsp"), new_data)
assert vm.read_virtual_memory(vm.read_register("rsp"), 0x8) == new_data
vm.write_virtual_memory(vm.read_register("rsp"), data)
assert vm.read_virtual_memory(vm.read_register("rsp"), 0x8) == data
def _t4():
print("* Save/restore")
vm.halt()
orig_values = vm.read_registers(regs)
orig_data = vm.read_virtual_memory(vm.read_register("rsp"), 0x100)
vm.interrupt_and_take_snapshot()
assert vm.is_state_halted()
vm.write_virtual_memory(vm.read_register("rsp"), b"A" * 0x100)
vm.single_step()
vm.resume()
time.sleep(0.100)
vm.interrupt_and_restore_last_snapshot()
assert vm.is_state_halted()
assert not vm.is_breakpoint_hit()
assert vm.read_registers(regs) == orig_values
assert vm.read_virtual_memory(vm.read_register("rsp"), 0x100) == orig_data
def _t5():
print("* Debug registers")
vm.halt()
vm.write_register("dr7", 0x0)
vm.set_hard_breakpoint("rw", 0x0, 0x1234)
assert vm.read_register("dr0") == 0x1234
assert vm.read_register("dr7") == 0b00000000000000110000000000000010
vm.set_hard_breakpoint("e", 0x0, 0x1234)
assert vm.read_register("dr7") == 0b00000000000000000000000000000010
vm.set_hard_breakpoint("w", 0x0, 0x1234)
assert vm.read_register("dr7") == 0b00000000000000010000000000000010
vm.set_hard_breakpoint("rw", 0x1, 0x1234)
assert vm.read_register("dr1") == 0x1234
assert vm.read_register("dr7") == 0b00000000001100010000000000001010
vm.set_hard_breakpoint("rw", 0x2, 0x1234)
assert vm.read_register("dr2") == 0x1234
assert vm.read_register("dr7") == 0b00000011001100010000000000101010
vm.set_hard_breakpoint("rw", 0x3, 0x1234)
assert vm.read_register("dr3") == 0x1234
assert vm.read_register("dr7") == 0b00110011001100010000000010101010
vm.unset_hard_breakpoint(0x0)
assert vm.read_register("dr7") == 0b00110011001100010000000010101000
vm.unset_hard_breakpoint(0x1)
assert vm.read_register("dr7") == 0b00110011001100010000000010100000
vm.unset_hard_breakpoint(0x2)
assert vm.read_register("dr7") == 0b00110011001100010000000010000000
vm.unset_hard_breakpoint(0x3)
assert vm.read_register("dr7") == 0b00110011001100010000000000000000
def _t6():
print("* Soft/hard exec breakpoint")
vm.halt()
# keep in mind that FDP soft and page breakpoints do not work just after a restore
# (see VMR3AddSoftBreakpoint())
vm.unset_all_breakpoints()
vm.single_step()
assert not vm.is_breakpoint_hit()
vm.interrupt_and_take_snapshot()
vm.single_step()
vm.single_step()
rip = vm.read_register("rip")
vm.interrupt_and_restore_last_snapshot()
vm.single_step()
bpid = vm.set_soft_exec_breakpoint(rip)
assert 0 <= bpid <= 254
assert not vm.is_breakpoint_hit()
vm.resume()
time.sleep(0.100)
vm.halt()
assert vm.is_breakpoint_hit()
vm.interrupt_and_restore_last_snapshot()
vm.single_step()
vm.set_hard_breakpoint("e", 0x0, rip)
assert not vm.is_breakpoint_hit()
vm.resume()
time.sleep(0.100)
vm.halt()
assert vm.is_breakpoint_hit()
if exe_ctx.process.is_running:
vm.interrupt()
vm.unset_all_breakpoints()
for _t in (_t1, _t2, _t3, _t4, _t5, _t6):
_t()
print("* All tests passed!")
def __lldb_init_module(debugger, internal_dict):
# FDP
debugger.HandleCommand("command script add -f lldbagility.fdp_attach fdp-attach")
debugger.HandleCommand("command script add -f lldbagility.fdp_save fdp-save")
debugger.HandleCommand("command script add -f lldbagility.fdp_restore fdp-restore")
debugger.HandleCommand(
"command script add -f lldbagility.fdp_interrupt fdp-interrupt"
)
debugger.HandleCommand(
"command script add -f lldbagility.fdp_hbreakpoint fdp-hbreakpoint"
)
debugger.HandleCommand("command script add -f lldbagility.fdp_test fdp-test")
debugger.HandleCommand("command alias fa fdp-attach")
debugger.HandleCommand("command alias fs fdp-save")
debugger.HandleCommand("command alias fr fdp-restore")
debugger.HandleCommand("command alias fi fdp-interrupt")
debugger.HandleCommand("command alias fh fdp-hbreakpoint")
# VMSN
debugger.HandleCommand("command script add -f lldbagility.vmsn_attach vmsn-attach")
debugger.HandleCommand("command alias va vmsn-attach")
| 2.203125 | 2 |
pytglib/api/functions/write_generated_file_part.py | iTeam-co/pytglib | 6 | 12796354 |
from ..utils import Object
class WriteGeneratedFilePart(Object):
"""
Writes a part of a generated file. This method is intended to be used only if the client has no direct access to TDLib's file system, because it is usually slower than a direct write to the destination file
Attributes:
ID (:obj:`str`): ``WriteGeneratedFilePart``
Args:
generation_id (:obj:`int`):
The identifier of the generation process
offset (:obj:`int`):
The offset from which to write the data to the file
data (:obj:`bytes`):
The data to write
Returns:
Ok
Raises:
:class:`telegram.Error`
"""
ID = "writeGeneratedFilePart"
def __init__(self, generation_id, offset, data, extra=None, **kwargs):
self.extra = extra
self.generation_id = generation_id # int
self.offset = offset # int
self.data = data # bytes
@staticmethod
def read(q: dict, *args) -> "WriteGeneratedFilePart":
generation_id = q.get('generation_id')
offset = q.get('offset')
data = q.get('data')
return WriteGeneratedFilePart(generation_id, offset, data)
| 3.140625 | 3 |
Month 03/Week 04/Day 03/c.py | KevinKnott/Coding-Review | 0 | 12796355 | <filename>Month 03/Week 04/Day 03/c.py
# Merge k Sorted Lists: https://leetcode.com/problems/merge-k-sorted-lists/
# You are given an array of k linked-lists lists, each linked-list is sorted in ascending order.
# Merge all the linked-lists into one sorted linked-list and return it.
from types import List, Optional
# Definition for singly-linked list.
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
# This problem is actually just a merge sort no need to explain really
# There are two solutions one with a for loop that creates a new list of ll
# and one that simply merges the current list which saves us o (N) space
# to become O(1)
class Solution:
def mergeKLists(self, lists: List[Optional[ListNode]]) -> Optional[ListNode]:
if lists is None or len(lists) == 0:
return None
interval = 1
total = len(lists)
while interval < total:
for i in range(0, total - interval, interval * 2):
lists[i] = self.merge2(lists[i], lists[i + interval])
interval *= 2
return lists[0] if total > 0 else []
def merge2(self, l1, l2):
dummy = ListNode()
cur = dummy
while l1 and l2:
if l1.val < l2.val:
cur.next = l1
l1 = l1.next
else:
cur.next = l2
l2 = l2.next
cur = cur.next
if l1:
cur.next = l1
else:
cur.next = l2
return dummy.next
# This works although I forgot that my range needs to decrease then number of nodes to review
# by the number of merged (going up 2 at a time)
# This runs in o(nlogk) where k is the number of ll and uses O(1) additional space
# Score Card
# Did I need hints? Nope
# Did you finish within 30 min? 12
# Was the solution optimal? Yup
# Were there any bugs? None
# 5 5 5 5 = 5
| 3.828125 | 4 |
pset6/crack.py | holodon/CS50 | 0 | 12796356 | <reponame>holodon/CS50
##
# Cracks up to 4 letter alphabetical passwords by bruteforce
##
import sys
import crypt
import itertools
def main():
# check if called with exactly one argument
if len(sys.argv) != 2:
show_usage()
exit(1)
# all possible characters
aA = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
h = sys.argv[1]
# check the hash length
if len(h) != 13:
show_usage()
exit(1)
# crack with itertools
for i in range(1, 5):
for c in itertools.product(aA, repeat=i):
p = ''.join(c)
if crypt.crypt(''.join(p), h) == h:
print(p)
exit(0)
# not cracked
print("All variations tested - none matched!")
exit(1)
# shows usage
def show_usage():
print("Usage: ./crack hash")
if __name__ == "__main__":
main()
| 3.03125 | 3 |
awards/signals.py | Sundaybrian/prodev | 0 | 12796357 | #signal fired after an obj is saved in this cas when a user is created
from django.db.models.signals import post_save
#post to sende the signal
from .models import Post
#reciever of the signal
from django.dispatch import receiver
from .models import Review
@receiver(post_save,sender=Post)
def create_review(sender,instance,created,**kwargs):
'''
post_save:is the signal that is fired after and object is saved
Post:model is the sender of the signal
receiver:is the create rating function that fetches the signal and performs some task
instance:is the instance of Post class
created : if a post was created
'''
if created:
Review.objects.create(post=instance)
| 2.5 | 2 |
sparse_matrices/sparse_gauss.py | j-adamczyk/Matrix_algorithms | 1 | 12796358 | <gh_stars>1-10
from copy import deepcopy
from typing import Union
import numpy as np
from .sparse_matrices import CoordinateSparseMatrix, CSRMatrix
def sparse_gauss_elimination_row(A: Union[CoordinateSparseMatrix, CSRMatrix]) \
-> Union[CoordinateSparseMatrix, CSRMatrix]:
"""
Performs Gaussian elimination on sparse matrix A row-wise. Allows either
coordinate format or CSR format.
:param A: sparse square matrix of shape (n, n)
:return: matrix A after Gaussian elimination
"""
A = deepcopy(A)
A.dtype = np.float
if isinstance(A, CoordinateSparseMatrix):
A = _coordinate_row(A)
elif isinstance(A, CSRMatrix):
pass # not yet implemented
return A
def _coordinate_row(A: CoordinateSparseMatrix) -> CoordinateSparseMatrix:
"""
Performs Gaussian elimination on sparse matrix A in the coordinate format
row-wise.
:param A: sparse square matrix in the coordinate format of shape (n, n)
:return: matrix A after Gaussian elimination
"""
n = A.shape[0]
for k in range(n - 1):
Akk = A.get(k, k)
assert Akk != 0, "Akk = 0"
for i in range(k + 1, n):
Aki, ki_index = A.get(k, i, index=True)
if ki_index != -1:
A.vals[ki_index] /= Akk
for j in range(k + 1, n):
Ajk = A.get(j, k)
if Ajk == 0:
continue
for i in range(k + 1, n):
Aki = A.get(k, i)
if Aki == 0:
continue
Aji, ji_index = A.get(j, i, index=True)
val = (-1) * (Aki * Ajk)
if ji_index >= 0:
# value already exists
A.vals[ji_index] += val
else:
# we have to insert new non-zero value
A.insert(j, i, val)
return A
| 2.625 | 3 |
setup.py | kim-younghan/MultiResUNet | 23 | 12796359 | from setuptools import setup, find_packages
setup(
name = 'multiresunet',
version = '0.1',
description = 'MultiResUNet implementation in PyTorch; MultiResUNet: Rethinking the U-Net Architecture for Multimodal',
author = '<NAME>',
author_email = '<EMAIL>',
install_requires= [],
packages = find_packages(),
python_requires = '>=3.6'
)
| 1.195313 | 1 |
generate.py | chteuchteu/Material-Colors-SCSS-Variables | 0 | 12796360 | <reponame>chteuchteu/Material-Colors-SCSS-Variables
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import urllib.request
from bs4 import BeautifulSoup
import re
material_guidelines_url = 'http://www.google.com/design/spec/style/color.html#color-color-palette'
output_file = 'dist/_material-colors.scss'
foreground_color_light = '#ffffff'
foreground_color_dark = '#000000'
def slugify(string):
return re.sub(r'[-\s]+', '-',
(re.sub(r'[^\w\s-]', '', string).strip().lower()))
def print_scss_map(output_handle, name, keys, values):
output_handle.write('$' + name + ': (\n')
longest_key = max(len(key) for key in keys)
pattern = ' "{key}": {indent}{value},\n'
for key, value in zip(keys, values):
output_handle.write(fill_placeholders(pattern, {
'key': key,
'value': value,
'indent': ' ' * (longest_key - len(key))
}))
output_handle.write(');\n')
def print_scss_vars(output_handle, names, values):
indent = max(len(name) for name in names)
pattern = '${var_name}: {indent}{value};\n'
for name, value in zip(names, values):
output_handle.write(fill_placeholders(pattern, {
'var_name': name,
'value': value,
'indent': ' ' * (indent - len(name))
}))
def has_class(element, classname):
class_attr = element.get('class')
if class_attr is None:
return False
return classname in class_attr
def fill_placeholders(string, dict):
for what, with_what in dict.items():
string = string.replace('{' + what + '}', with_what)
return string
# Download & parse guidelines HTML
response = urllib.request.urlopen(material_guidelines_url)
data = response.read()
raw_html = data.decode('utf-8')
parsed_html = BeautifulSoup(raw_html, 'html.parser')
# Parse it!
html_palette = parsed_html.body.find('div', attrs={'class': 'color-palette'})
color_groups = html_palette.find_all('section', attrs={'class', 'color-group'})
colors = []
for color_group in color_groups:
name_span = color_group.find(attrs={'class', 'name'})
# We skip black + white colors
if name_span is None:
continue
color_name = name_span.text
color_slug = slugify(color_name)
# Find each shade
html_shades = color_group.find_all('li')
shades = []
for shade in html_shades:
if has_class(shade, 'main-color'):
continue
shade_name = shade.find(attrs={'class', 'shade'}).text
hex = shade.find(attrs={'class', 'hex'}).text
foreground = foreground_color_dark if has_class(shade, 'dark') else foreground_color_light
shades.append({
'name': shade_name,
'hex': hex,
'foreground': foreground,
})
colors.append({
'name': color_name,
'slug': color_slug,
'shades': shades
})
# Print vars & map definitions to output file
with open(output_file, 'w') as output:
output.truncate()
output.writelines('\n'.join([
"/**",
" * Material-Colors-SCSS-Variables",
" * https://github.com/chteuchteu/Material-Colors-SCSS-Variables",
" */\n\n"
]))
for color in colors:
color_name = color['name']
color_slug = color['slug']
shades = color['shades']
# Write to file
output.writelines('\n'.join([
'//',
'// ' + color_name,
'//',
''
]))
# Map
print_scss_map(output, 'color-' + color_slug + '-list',
[shade['name'] for shade in shades],
[shade['hex'] for shade in shades])
output.write('\n')
# Separate colors
# Main shade
main_shade = next(shade for shade in shades if shade['name'] == '500')
print_scss_vars(output, ['color-' + color_slug], [main_shade['hex']])
output.write('\n')
# All shades
print_scss_vars(output,
['color-' + color_slug + '-' + shade['name'] for shade in shades],
[shade['hex'] for shade in shades])
# Foreground color
output.writelines('\n'.join([
'',
'// Foreground',
''
]))
print_scss_map(output, 'color-' + color_slug + '-foreground-list',
[shade['name'] for shade in shades],
[shade['foreground'] for shade in shades])
output.write('\n')
# Separate colors
# Main shade
main_shade = next(shade for shade in shades if shade['name'] == '500')
print_scss_vars(output, ['color-' + color_slug + '-foreground'], [main_shade['hex']])
output.write('\n')
print_scss_vars(output,
['color-' + color_slug + '-' + shade['name'] + '-foreground' for shade in shades],
[shade['hex'] for shade in shades])
output.write('\n\n')
# Print a map of all colors
print_scss_map(output, 'colors',
[color['slug'] for color in colors],
['$color-' + color['slug'] + '-list' for color in colors])
colors_count = len(colors)
shades_count = sum([len(color['shades']) for color in colors])
print(output_file + ' created, containing ' + str(colors_count) + ' colors and ' + str(shades_count) + ' shades')
| 2.75 | 3 |
buy_stuff.py | learning-python/testing | 0 | 12796361 | <filename>buy_stuff.py
import calc
class Pocket:
def __init__(self, settings):
self.money = settings['money']
self.apples = settings['apples']
self.price = settings['price']
def buy_apples(self, num_apples):
needed = calc.multiply(self.price, num_apples)
would_be_left = calc.subtract(self.money, needed)
if would_be_left >= 0:
self.money -= self.price * num_apples
self.apples += num_apples
else:
raise Exception("Not enough money to buy " + str(num_apples) + " apples") | 3.640625 | 4 |
programming_lesson5/make_coin_sorter_version5.py | zhazhijibaba/zhazhijibaba_programming_lessons | 0 | 12796362 | <gh_stars>0
from vector import *
import openmesh as om
import numpy as np
# version3 slope 15 degree
theta = np.pi / 12.0
# https://en.wikipedia.org/wiki/Coins_of_the_Canadian_dollar
# create mesh for coin sorter
# Nickel 5 cents 21.2mm (1.76mm)
# Dime 10 cents 18.03mm (1.22mm)
# Quatoer 25 cents 23.88mm (1.58mm)
# Loonie $1 26.5mm (1.75mm)
# Toonie $2 28mm (1.8mm)
dd0 = [18.03, 21.2, 23.88, 26.5, 28]
d0 = 0.8
dd = [d + d0 for d in dd0]
d1 = 35
d2 = 2
# slope 30 degree
d3 = d1 / np.cos(theta)
mesh = om.TriMesh()
# part 1 sorter
x0 = 0
# screen layer
for i in range(len(dd)):
ri = dd[i] / 2
make_block_mesh(mesh, [x0, 0, 0], [d3 - dd[i], 0, 0], [0, d1, 0], [0, 0, d2])
x0 = x0 + d3 - dd[i]
make_pipe_square_mesh(mesh, [x0 + ri, ri, 0], [x0 + ri, ri, d2], p2=[x0 + ri, 0.0, 0.0], r1=ri, r2=ri, n=256)
make_block_mesh(mesh, [x0, dd[i], 0], [dd[i], 0, 0], [0, d1 - dd[i], 0], [0, 0, d2])
x0 = x0 + dd[i]
make_block_mesh(mesh, [x0, 0, 0], [d2, 0, 0], [0, d1, 0], [0, 0, d2])
x0 = x0 + d2
# wall block
# total length of sortor
d4 = x0
make_block_mesh(mesh, [0, 0, 0], [0, -d2, 0], [d4, 0, 0], [0, 0, 8*d2])
om.write_mesh("coin_sorter_v5_p1.obj", mesh)
# part 2 holder
mesh = om.TriMesh()
d5 = d4 * np.cos(theta)
d6 = d2 * np.cos(theta)
d7 = 40 + 2*d2
d8 = 1.0
x1 = 0
make_block_mesh(mesh, [0, 0, 0], [d5, 0, 0], [0, d1, 0], [0, 0, d2])
make_block_mesh(mesh, [x1, 0, 0], [d6, 0, 0], [0, d1, 0], [0, 0, d7 + d4 * np.sin(theta)])
make_block_mesh(mesh, [x1, 0, 0], [d6 + 2*d2, 0, 0], [0, d8, 0], [0, 0, d7 + d4 * np.sin(theta) + 6*d2])
make_block_triangle_mesh(mesh, [x1, 0, d7 + d4 * np.sin(theta)], [0, d1, d1*np.tan(theta)], [0, d1, 0], [d6, 0, 0])
x1 = x1 + d6
for i in range(len(dd)):
x1 = x1 + d1
dh = d7 + (d4 - (i + 1) * d3) * np.sin(theta)
make_block_mesh(mesh, [x1, 0, 0], [d8, 0, 0], [0, d1, 0], [0, 0, dh])
make_block_triangle_mesh(mesh, [x1, 0, dh], [0, d1, d1*np.tan(theta)], [0, d1, 0], [d8, 0, 0])
if i < len(dd) - 1:
make_block_mesh(mesh, [x1, 0, 0], [d8 + 2*d2, 0, 0], [0, d8, 0], [0, 0, dh + 6*d2])
else:
make_block_mesh(mesh, [x1, 0, 0], [d8 + d8, 0, 0], [0, d8, 0], [0, 0, dh + 6*d2])
make_block_mesh(mesh, [x1 + d8, 0, 0], [d8, 0, 0], [0, d1, 0], [0, 0, dh + 6*d2])
make_block_triangle_mesh(mesh, [x1 + d8, 0, dh + 6*d2], [0, d1, d1*np.tan(theta)], [0, d1, 0], [d8, 0, 0])
om.write_mesh("coin_sorter_v5_p2.obj", mesh)
# part 2 coin collector
mesh = om.TriMesh()
d9 = d1 - d8
d10 = d9 - 2*d8
d11 = d7 - 2*d2
make_rod_mesh(mesh, [0, 0, 0], [0, 0, d8], p2=[1.0, 0.0, 0.0], r=d9 / 2.0, n=256)
make_pipe_mesh(mesh, [0,0,d8], [0,0,d11], p2=[1.0, 0.0, 0.0], r1=d10/2.0, r2=d9/2.0, n=256)
om.write_mesh("coin_sorter_v5_p3.obj", mesh)
| 2.3125 | 2 |
examples/image/plot_dataset_mtf.py | Pandinosaurus/pyts | 1,217 | 12796363 | """
====================================
Data set of Markov transition fields
====================================
A Markov transition field is an image obtained from a time series, representing
a field of transition probabilities for a discretized time series.
Different strategies can be used to bin time series.
It is implemented as :class:`pyts.image.MarkovTransitionField`.
In this example, we consider the training samples of the
`GunPoint dataset <http://timeseriesclassification.com/description.php?Dataset=GunPoint>`_,
consisting of 50 univariate time series of length 150.
The Markov transition field of each time series is independently computed and
the 50 Markov transition fields are plotted.
""" # noqa:E501
# Author: <NAME> <<EMAIL>>
# License: BSD-3-Clause
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import ImageGrid
from pyts.image import MarkovTransitionField
from pyts.datasets import load_gunpoint
# Load the GunPoint dataset
X, _, _, _ = load_gunpoint(return_X_y=True)
# Get the recurrence plots for all the time series
mtf = MarkovTransitionField(n_bins=8)
X_mtf = mtf.fit_transform(X)
# Plot the 50 Gramian angular fields
fig = plt.figure(figsize=(10, 5))
grid = ImageGrid(fig, 111, nrows_ncols=(5, 10), axes_pad=0.1, share_all=True,
cbar_mode='single')
for i, ax in enumerate(grid):
im = ax.imshow(X_mtf[i], cmap='rainbow', origin='lower', vmin=0., vmax=1.)
grid[0].get_yaxis().set_ticks([])
grid[0].get_xaxis().set_ticks([])
plt.colorbar(im, cax=grid.cbar_axes[0])
ax.cax.toggle_label(True)
fig.suptitle("Markov transition fields for the 50 time series in the "
"'GunPoint' dataset", y=0.92)
plt.show()
| 3.046875 | 3 |
vprocess/older/yolo_detection_face_reco.py | rezeck/itelicam | 0 | 12796364 | #!/usr/bin/env python3.5
import os
import dlib
import numpy as np
import cv2
import time
import darknet
from ctypes import *
import math
import random
class YOLO_NN:
def __init__(self, yoloDataFolder):
self.configPath = yoloDataFolder + "/cfg/yolov3-tiny.cfg"
self.weightPath = yoloDataFolder + "/yolov3-tiny.weights"
self.metaPath = yoloDataFolder + "/cfg/coco.data"
print("self.configPath: " + self.configPath)
print("self.weightPath: " + self.weightPath)
print("self.metaPath: " + self.metaPath)
self.netMain = None
self.metaMain = None
self.altNames = None
if not os.path.exists(self.configPath):
raise ValueError("Invalid config path `" +
os.path.abspath(self.configPath)+"`")
if not os.path.exists(self.weightPath):
raise ValueError("Invalid weight path `" +
os.path.abspath(self.weightPath)+"`")
if not os.path.exists(self.metaPath):
raise ValueError("Invalid data file path `" +
os.path.abspath(self.metaPath)+"`")
if self.netMain is None:
self.netMain = darknet.load_net_custom(self.configPath.encode(
"ascii"), self.weightPath.encode("ascii"), 0, 1) # batch size = 1
if self.metaMain is None:
self.metaMain = darknet.load_meta(self.metaPath.encode("ascii"))
if self.altNames is None:
try:
with open(self.metaPath) as metaFH:
metaContents = metaFH.read()
import re
match = re.search("names *= *(.*)$", metaContents,
re.IGNORECASE | re.MULTILINE)
if match:
result = match.group(1)
else:
result = None
try:
if os.path.exists(result):
with open(result) as namesFH:
namesList = namesFH.read().strip().split("\n")
self.altNames = [x.strip() for x in namesList]
except TypeError:
pass
except Exception:
pass
# Create an image we reuse for each detect
self.darknet_image = darknet.make_image(darknet.network_width(self.netMain),
darknet.network_height(self.netMain),3)
self.data_dir = os.path.expanduser(yoloDataFolder+'/face_data')
self.faces_folder_path = self.data_dir + '/users/'
self.face_detector = dlib.get_frontal_face_detector()
self.shape_predictor = dlib.shape_predictor(self.data_dir + '/dlib/shape_predictor_68_face_landmarks.dat')
self.face_recognition_model = dlib.face_recognition_model_v1(self.data_dir + '/dlib/dlib_face_recognition_resnet_model_v1.dat')
def convertBack(self, x, y, w, h):
xmin = int(round(x - (w / 2)))
xmax = int(round(x + (w / 2)))
ymin = int(round(y - (h / 2)))
ymax = int(round(y + (h / 2)))
return xmin, ymin, xmax, ymax
def cvDrawBoxes(self, detections, img):
for detection in detections:
x, y, w, h = detection[2][0],\
detection[2][1],\
detection[2][2],\
detection[2][3]
xmin, ymin, xmax, ymax = self.convertBack(
float(x), float(y), float(w), float(h))
pt1 = (xmin, ymin)
pt2 = (xmax, ymax)
cv2.rectangle(img, pt1, pt2, (0, 255, 0), 1)
cv2.putText(img,
detection[0].decode() +
" [" + str(round(detection[1] * 100, 2)) + "]",
(pt1[0], pt1[1] - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
[0, 255, 0], 2)
return img
def get_face_encodings(self, face):
bounds = self.face_detector(face, 1)
faces_landmarks = [self.shape_predictor(face, face_bounds) for face_bounds in bounds]
try:
h = [np.array(self.face_recognition_model.compute_face_descriptor(face, face_pose, 1)) for face_pose in faces_landmarks]
except:
return []
return h
def get_face_matches(self, known_faces, face):
return np.linalg.norm(known_faces - face, axis=1)
def find_match(self, known_faces, person_names, face):
matches = self.get_face_matches(known_faces, face) # get a list of True/False
min_index = matches.argmin()
min_value = matches[min_index]
if min_value < 0.55:
return person_names[min_index]+"! ({0:.2f})".format(min_value)
if min_value < 0.58:
return person_names[min_index]+" ({0:.2f})".format(min_value)
if min_value < 0.65:
return person_names[min_index]+"?"+" ({0:.2f})".format(min_value)
return 'Not Found'
def load_face_encodings(self):
image_filenames = filter(lambda x: x.endswith('.jpg'), os.listdir(self.faces_folder_path))
image_filenames = sorted(image_filenames)
person_names = [x[:-4] for x in image_filenames]
full_paths_to_images = [self.faces_folder_path + x for x in image_filenames]
face_encodings = []
win = dlib.image_window()
for path_to_image in full_paths_to_images:
print("Loading user: " + path_to_image)
#face = io.imread(path_to_image)
face = cv2.imread(path_to_image)
face = cv2.cvtColor(face, cv2.COLOR_BGR2RGB)
faces_bounds = self.face_detector(face, 1)
if len(faces_bounds) != 1:
print("Expected one and only one face per image: " + path_to_image + " - it has " + str(len(faces_bounds)))
exit()
face_bounds = faces_bounds[0]
face_landmarks = self.shape_predictor(face, face_bounds)
face_encoding = np.array(self.face_recognition_model.compute_face_descriptor(face, face_landmarks, 1))
win.clear_overlay()
win.set_image(face)
win.add_overlay(face_bounds)
win.add_overlay(face_landmarks)
face_encodings.append(face_encoding)
#print(face_encoding)
#dlib.hit_enter_to_continue()
return face_encodings, person_names
def detect(self, frame_read):
prev_time = time.time()
frame_resized = cv2.resize(frame_read,
(darknet.network_width(rn.netMain),
darknet.network_height(rn.netMain)),
interpolation=cv2.INTER_LINEAR)
frame_rgb = cv2.cvtColor(frame_resized, cv2.COLOR_BGR2RGB)
darknet.copy_image_from_bytes(self.darknet_image, frame_rgb.tobytes())
detections = darknet.detect_image(self.netMain, self.metaMain, self.darknet_image, thresh=0.25)
#print(1/(time.time()-prev_time))
return detections
# function to get the output layer names
# in the architecture
def get_output_layers(self,net):
layer_names = net.getLayerNames()
output_layers = [layer_names[i[0] - 1] for i in net.getUnconnectedOutLayers()]
return output_layers
# function to draw bounding box on the detected object with class name
def draw_bounding_box(self,img, class_id, confidence, x, y, x_plus_w, y_plus_h):
cv2.rectangle(img, (x,y), (x_plus_w,y_plus_h), (0, 0, 255), 2)
#cv2.putText(img, label, (x-10,y-10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
if __name__ == "__main__":
# Start Yolo Setup
rn = YOLO_NN('.')
# initialize video input
cap = cv2.VideoCapture(1)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 360)
face_encodings, person_names = rn.load_face_encodings()
faceClassifier = cv2.CascadeClassifier(rn.data_dir + '/dlib/haarcascade_frontalface_default.xml')
#rn.recognize_faces_in_video(face_encodings, person_names)
while True:
ret, frame_read = cap.read()
draw_frame = frame_read.copy()
gray = cv2.cvtColor(frame_read, cv2.COLOR_BGR2GRAY)
overlay = frame_read.copy()
cv2.rectangle(overlay, (0, 0), (640, 35), (0, 0, 0), -1)
alpha = 0.8
draw_frame = cv2.addWeighted(overlay, alpha, draw_frame, 1 - alpha, 0)
# Yolo Detection
detections = rn.detect(frame_read.copy())
filter_detections = []
n_users = 0
n_persons = 0
for detection in detections:
if detection[0] == b'person': # It is a person
filter_detections.append(detection)
if len(filter_detections) == 0: # Case Yolo didn't detected any person, try with dlib
face_rects = faceClassifier.detectMultiScale( # Detect faces with dlib
gray,
scaleFactor = 1.1,
minNeighbors = 5,
minSize = (50, 50),
flags = cv2.CASCADE_SCALE_IMAGE)
n_persons = len(face_rects)
if len(face_rects) > 0: # Case find any face
for (x, y, w, h) in face_rects:
face = draw_frame[y:y + h, x:x + w]
face_encodings_in_image = rn.get_face_encodings(face)
if (face_encodings_in_image):
match = rn.find_match(face_encodings, person_names, face_encodings_in_image[0])
if match == "Not Found":
cv2.putText(draw_frame, "Unknow", (x+5, y-15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
cv2.rectangle(draw_frame, (x, y), (x + w, y + h), (0, 0, 255), 2)
else:
cv2.putText(draw_frame, match, (x+5, y-15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
cv2.rectangle(draw_frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
n_users += 1
else:
cv2.putText(draw_frame, "Unknow", (x+5, y-15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
cv2.rectangle(draw_frame, (x, y), (x + w, y + h), (0, 0, 255), 2)
else:
for detection in filter_detections:
x1, y1, w1, h1 = detection[2][0],\
detection[2][1],\
detection[2][2],\
detection[2][3]
xmin, ymin, xmax, ymax = rn.convertBack(
float(x1), float(y1), float(w1), float(h1))
sx = 640.0/416.0
sy = 360.0/416.0
xmin = int(xmin*sx)
ymin = int(ymin*sy)
xmax = int(xmax*sx)
ymax = int(ymax*sy)
pt1 = (xmin, ymin)
pt2 = (xmax, ymax)
cropped = gray[ymin:ymax, xmin:xmax]
face_rects = faceClassifier.detectMultiScale( # Detect faces with dlib
gray,
scaleFactor = 1.1,
minNeighbors = 5,
minSize = (50, 50),
flags = cv2.CASCADE_SCALE_IMAGE)
n_persons += 1
if len(face_rects) > 0:
for (x, y, w, h) in face_rects:
face = cropped[y:y + h, x:x + w]
face_encodings_in_image = rn.get_face_encodings(face)
#x += xmin
#y += ymin
if (face_encodings_in_image):
match = rn.find_match(face_encodings, person_names, face_encodings_in_image[0])
if match == "Not Found":
cv2.putText(draw_frame, "Unknow", (x+5, y-15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
cv2.rectangle(draw_frame, (x, y), (x + w, y + h), (0, 0, 255), 2)
else:
cv2.putText(draw_frame, match, (x+5, y-15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
cv2.rectangle(draw_frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
n_users += 1
else:
cv2.putText(draw_frame, "Unknow", (x+5, y-15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
cv2.rectangle(draw_frame, (x, y), (x + w, y + h), (0, 0, 255), 2)
else:
cv2.rectangle(draw_frame, pt1, pt2, (0, 0, 255), 2)
cv2.putText(draw_frame, "Unknow", (pt1[0], pt1[1] - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
cv2.putText(draw_frame, "InteliCam Users: " + str(n_users) + " | "+ \
"Persons: " + str(n_persons),
(5, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
[255, 255, 255], 1)
cv2.imshow("Frame", draw_frame)
key = cv2.waitKey(3) & 0xFF
# if the `q` key was pressed, break from the loop
if key == ord("q"):
break
cv2.destroyAllWindows() | 2.359375 | 2 |
color_guess.py | Software-Cat/Python-Graphics-Project | 0 | 12796365 | # import all the required python libaries: graphics and random
from graphics import *
import random
# create the graphics window and set background colour
win = GraphWin("Colour Guessing Game", 1000, 500)
win.setBackground('#232323')
# create a title for your game
titleBg = Rectangle(Point(0, 0), Point(1000, 135))
titleBg.setOutline('steelblue')
titleBg.setFill('steelblue')
titleBg.draw(win)
title = Text(Point(500, 67.5),"RGB Colour Guessing Game")
title.setTextColor('white')
title.setSize(48)
title.setFace('times roman')
title.draw(win)
colors = []
correctChoice = int
# generate random colors and questions
def randomise_answers():
global colors
global correctChoice
colors = []
for i in range(4):
rand_r = random.randint(0, 255)
rand_g = random.randint(0, 255)
rand_b = random.randint(0, 255)
colors.append([rand_r, rand_g, rand_b])
correctChoice = random.randint(0, 3)
randomise_answers()
squares = []
# create 4 squares of random colour evenly spaced across the page
def create_squares(x, y, sideLen, spacing):
global squares
squares = []
for i in range(4):
# create a square (Rectangle) that is positioned based on the current 'i' value
square = Rectangle(Point(x+i*sideLen+i*spacing, y), Point(x+(i+1)*sideLen+i*spacing, y+sideLen))
# set the fill of the square to the random values of r,g and b
square.setFill(color_rgb(colors[i][0], colors[i][1], colors[i][2]))
# draw the square in the window
square.draw(win)
squares.append(square)
create_squares(225, 325, 100, 50)
def wait_for_click():
while True:
# get the click position of the mouse
mousePos = win.getMouse()
mouseX = mousePos.getX()
mouseY = mousePos.getY()
# check if the mouse clicked on the correct square, if it did display correct otherwise incorrect
for i in range(4):
currentSquare = squares[i]
currentX1 = currentSquare.getP1().getX()
currentY1 = currentSquare.getP1().getY()
currentX2 = currentSquare.getP2().getX()
currentY2 = currentSquare.getP2().getY()
if mouseX > currentX1 and mouseX < currentX2 and mouseY > currentY1 and mouseY < currentY2:
return i
''' main game '''
gameover = False
# create a rectangle that fills the whole screen
bgRect = Rectangle(Point(0, 0), Point(1000, 500))
# create a Text box that will display the results of the guess (correct/incorrect)
resultText = Text(Point(500, 125),"")
resultText.setSize(128)
resultText.setFill('white')
# create a Text box that will display the rgb of the correct choice
questionText = Text(Point(500, 225), f"rgb({colors[correctChoice][0]}, {colors[correctChoice][1]}, {colors[correctChoice][2]})")
questionText.setFill('white')
questionText.setSize(25)
questionText.setStyle('bold')
questionText.draw(win)
# create a Text box that will display the score of the player
score = 0
scoreText = Text(Point(500, 155), f"SCORE: {score}")
scoreText.setFill('white')
scoreText.setSize(12)
scoreText.draw(win)
while gameover == False:
square_clicked = wait_for_click()
if square_clicked == correctChoice:
score += 1
scoreText.setText(f"SCORE: {score}")
randomise_answers()
create_squares(225, 325, 100, 50)
questionText.setText(f"rgb({colors[correctChoice][0]}, {colors[correctChoice][1]}, {colors[correctChoice][2]})")
else:
bgRect.setFill(color_rgb(colors[square_clicked][0], colors[square_clicked][1], colors[square_clicked][2]))
bgRect.draw(win)
resultText.setText("TOO BAD")
resultText.draw(win)
scoreText.setSize(24)
scoreText.anchor = Point(500, 350)
scoreText.undraw()
scoreText.draw(win)
gameover = True
# wait for click to close window
win.getMouse()
| 3.875 | 4 |
utils/scripts/OOOlevelGen/src/levels/Release_The_Cheese.py | fullscreennl/monkeyswipe | 0 | 12796366 | <gh_stars>0
import LevelBuilder
from sprites import *
def render(name,bg):
lb = LevelBuilder.LevelBuilder(name+".plist",background=bg)
lb.addObject(Beam.BeamSprite(x=217, y=83,width=167,height=36,angle='90' ,restitution=0.2,static='false',friction=0.5,density=20 ))
lb.addObject(Wizard.WizardSprite(x=409,y=68))
lb.addObject(Enemy.EnemySprite(x=217, y=238,width=136,height=136,angle='0',restitution=0.2,static='false',friction=0.5,density=20 ))
lb.addObject(Hero.HeroSprite(x=22, y=21,width=32,height=32))
lb.addObject(Star.StarSprite(x=217, y=238,width=32,height=32))
lb.addObject(Contacts.Contact(body1='Hero',body2=':hat_top',event_name='onReleaseStar'))
lb.addObject(Joints.RevoluteJoint(body1='Enemy',body2='Star',motor_speed='1',enable_motor='true',torque='1000',lower_angle='12',upper_angle='50',userData='star_joint',enable_limit='false',collide_connected='false'))
lb.render() | 2.171875 | 2 |
example.py | papamoose/nvidia-ml-py | 0 | 12796367 | #!/usr/bin/python3
import nvidia_smi
import json
mydict = nvidia_smi.JsonDeviceQuery()
# Example print JSON
print(json.dumps(mydict, indent=2))
| 2.390625 | 2 |
utests/cfg_up.py | georgsp/mdmTerminal2 | 24 | 12796368 | <gh_stars>10-100
import json
import unittest
import run
from lib.tools import config_updater
def dummy(*_, **__):
pass
def new_updater():
return config_updater.ConfigUpdater(CFG(), dummy)
def CFG():
return run.get_cfg()
class ConfigUpdater(unittest.TestCase):
ADD_5 = {'ip_server': '', 'ip': 1, 'two__': '2', 'three__': 3, 'four__': '4'}
TXT_4 = '{"PROVIDERTTS":"NoYandex","APIKEYTTS":"y_key","PROVIDERSTT":"NoGoogle","APIKEYSTT":"g_key",' \
'"ALARMKWACTIVATED":"1","ALARMTTS":"1","ALARMSTT":"1","newer__":{"fdfd":"777"}}'
def test_self(self):
updater = new_updater()
self.assertEqual(updater.from_dict(CFG()), 0)
self.assertEqual(updater._updated_count, 0)
self.assertEqual(updater._updated_count, updater._change_count)
self.assertFalse(updater.save_ini)
def test_5(self):
updater = new_updater()
self.assertEqual(updater.from_dict({'be never': self.ADD_5}), 5)
self.assertFalse(updater.save_ini)
def test_5_0_str(self):
add = json.dumps({'be never': self.ADD_5})
updater = new_updater()
self.assertEqual(updater.from_json(add), 0)
self.assertFalse(updater.save_ini)
def test_5_4_str(self):
add = json.dumps(self.ADD_5)
updater = new_updater()
self.assertEqual(updater.from_json(add), 4)
self.assertTrue(updater.save_ini)
def test_5_5_dict(self):
updater = new_updater()
self.assertEqual(updater.from_dict({'settings': self.ADD_5}), 5)
self.assertFalse(updater.save_ini)
def test_prov(self):
updater = new_updater()
self.assertEqual(updater.from_json(self.TXT_4), 4)
self.assertTrue(updater.save_ini)
self.assertEqual(updater.from_dict(CFG()), 2)
self.assertFalse(updater.save_ini)
self.assertEqual(updater.from_dict(CFG()), 0)
self.assertFalse(updater.save_ini)
def test_prov_proxy(self):
txt_5 = self.TXT_4[:-1] + ',"proxy":{"enable": "1"}}'
updater = new_updater()
self.assertEqual(updater.from_json(txt_5), 5)
self.assertTrue(updater.save_ini)
self.assertEqual(updater.from_dict(CFG()), 3)
self.assertFalse(updater.save_ini)
self.assertEqual(updater.from_dict(CFG()), 0)
self.assertFalse(updater.save_ini)
| 2.546875 | 3 |
Dirt.py | damianStrojek/Python-GameOfLife | 4 | 12796369 | <gh_stars>1-10
# OOP PG WETI PROJECT NR 2
# <NAME> s184407 2021 IT/CS
# @ Copyright 2021, <NAME>, All rights reserved.
import os, pygame
from Organism import Organism
class Dirt(Organism):
# Default block that makes our world
def __init__(self, _currentWorld, _positionX, _positionY):
super(Dirt, self).__init__(0, 0, _currentWorld, _positionX, _positionY, False)
def getImage(self):
if self.currentWorld.getWorldType() == 1:
self.image = pygame.image.load(os.path.join('icons', 'dirthex.jpg'))
else:
self.image = pygame.image.load(os.path.join('icons', 'dirt.jpg'))
self.image = pygame.transform.scale(self.image, (self.currentWorld.getIconWidth(), self.currentWorld.getIconHeight()))
return self.image
def getName(self):
return "Dirt" | 2.9375 | 3 |
agora_analytica/analytics/dummy.py | Kyrmy/prototyyppi | 0 | 12796370 | <reponame>Kyrmy/prototyyppi<filename>agora_analytica/analytics/dummy.py
"""
Dummy module for generating fixed - or random - distances.
"""
from numpy.random import randint
from numpy import abs, int
import pandas as pd
def distance(source: pd.Series, target: pd.Series, answers: pd.DataFrame, answer_scale=5, answer_source=None, answer_target=None):
def _maybe_random(answer: int) -> int:
""" Return either :param:`answer` or random number in `answer_scale` """
return answer if answer is not None else randint(0, answer_scale)
answer_source = _maybe_random(answer_source)
answer_target = _maybe_random(answer_target)
return abs(answer_source - answer_target)
| 2.78125 | 3 |
nr_common/fs_utils/fs_utils.py | nitred/nr-common | 0 | 12796371 | <reponame>nitred/nr-common<gh_stars>0
"""Simple file system utils."""
import os
def makedirs(path, is_file, to_abs=False):
"""Make directory from path (filename or directory) if it doesn't already exist.
Args:
path (str): Absolute path of directory or filename.
is_file (bool): Whether the path is a directory of filename.
If True, then path is treated as a filename and its parent directory is created.
If False, then path is treated as a directory and it is created.
to_abs (bool): If True, convert the path to absolute path.
If False, assume that the path is absolute. Default False.
Returns:
None
"""
if to_abs:
if not os.path.isabs(path):
path = os.path.abspath(path)
if not os.path.isabs(path):
raise ValueError("Path must be an absolute path. This method does not deal with relative paths. "
"Use `to_abs` if you want to explicitly convert path to absolute.")
if is_file:
directory = os.path.dirname(path)
else:
directory = path
# If directory does not exist, then simple create it.
if not os.path.exists(directory):
os.makedirs(directory)
# elif the directory exists
else:
# Assert that the path is a directory
if not os.path.isdir(directory):
raise OSError("Path already exists but is not a directory! Path: {}".format(directory))
else:
# Alles in Ordnung
pass
return
| 3.671875 | 4 |
tony-setanddict.py | tonythott/tonnewcode | 0 | 12796372 | <filename>tony-setanddict.py
'''
#set is a container that stores a collection of unique values
#Union , intersection , substraction for sets
#Substruction means remove elements from intersection of a and b
list =[] #empty list
setN = {} #error
setN = set() #empty set
cast = {"tony","preetha","luke"}
if luke in cast:
for tony in sorted(cast) :
print (tony)
cast = set(["tony","preetha","luke"])
cast.add("tessa")
#to get to know the hash
print(hash("tony"))
namelist = ["tt","nn","jj","rr","uu"]
names = set(namelist) #copy list and create a new set
names.add("nn") #You cant have same elements in a set so elemensts wont repeat but in a list we can repeat items
names.discard("rr") #we can use "remove" also
for elem in names:
print(elem)
#To remove everything from the set
names.clear()
namelist1 = ["tt1","nn2","jj3","rr4","uu5"]
namelist2 = ["tt1","nn","jj3","rr4","uu5"]
names1 = set(namelist1)
names2 = set(namelist2)
#nameunion = names1.union(names2)
#nameunion = names1.intersection(names2)
nameunion = names1.difference(names2)
for elem in nameunion:
print(elem)
if names2.issubset(names1):
print("True")
else:
print("False")
'''
#Dictionary or map ; key is unique and value can be anything
d = dict()
d = {}
d = dict()
d["tony"] = 98
print (d)
sd = {"Tony":98,"preetha":99,"pre":99,"etha":99,"tha":99}
print (sd)
td = {}
td["tony"] = 98
print (td)
if "tony" in d:
print("YES")
td["tony"] = 101
print(td["tony"])
print(len(sd))
l = sd.values()
print (l)
sd.pop("preetha")
print(sd)
print(sd.get("tony","not found"))
print(sd.get("Tony","not found")) #if key doesnt exist then print "not found" as per the given example
for elem in sd:
print(sd[elem]) # print all the value for the keys exist in the dict
#item() # this will return both key and value ; it will return it as tuple
for item in sd.items():
print(item[0], " ",item[1])
for (u,v) in sd.items():
print(u, " ",v)
| 4.21875 | 4 |
moanna/model/Autoencoder.py | rlupat/moanna | 2 | 12796373 | <gh_stars>1-10
# Libraries
import torch
import torch.nn as nn
import numpy as np
import pandas as pd
from tensorboardX import SummaryWriter
import torch.nn.functional as F
import pdb
import seaborn as sns
import matplotlib.pyplot as plt
from livelossplot import PlotLosses
#Encoder
class LayerBlockEncode(nn.Module):
def __init__(self, hidden_size_1, hidden_size_2, p):
super().__init__()
self.layer = nn.Linear(hidden_size_1, hidden_size_2)
self.activation = nn.Tanh()
self.bn = nn.BatchNorm1d(hidden_size_2)
self.dropout = nn.Dropout(p)
def forward(self, x):
x = self.layer(x)
x = self.activation(x)
x = self.bn(x)
x = self.dropout(x)
return (x)
class Encoder(nn.Module):
def __init__(self, input_shape, hidden_size, encoded_size, n_layers=3, drop_prob=0.5):
super(Encoder, self).__init__()
self.n_layers = n_layers
self.drop_prob = drop_prob
self.e1 = nn.Linear(input_shape, hidden_size)
self.activation1 = nn.Tanh()
self.bn1 = nn.BatchNorm1d(hidden_size)
if (hidden_size // (2**n_layers)) > encoded_size:
self.layers = nn.ModuleList([])
for i in range(n_layers):
self.layers.append(LayerBlockEncode(hidden_size//(2**i), hidden_size//(2**(i+1)), self.drop_prob))
else:
self.n_layers = 0
self.e2 = nn.Linear((hidden_size//(2**n_layers)), encoded_size)
def forward(self, input):
x = self.e1(input)
x = self.activation1(x)
x = self.bn1(x)
for i in range(self.n_layers):
encode_block = self.layers[i]
x = encode_block(x)
#block1 = F.dropout(self.bn1(F.elu(self.e1(input))), p=self.drop_prob)
#encoded_representation = torch.tanh(self.e2(block1))
encoded_representation = self.e2(x)
return encoded_representation
#Decoder:
class LayerBlockDecode(nn.Module):
def __init__(self, hidden_size_1, hidden_size_2, p):
super().__init__()
self.layer = nn.Linear(hidden_size_1, hidden_size_2)
self.activation = nn.Tanh()
self.bn = nn.BatchNorm1d(hidden_size_2)
self.dropout = nn.Dropout(p)
def forward(self, x):
x = self.layer(x)
x = self.activation(x)
x = self.bn(x)
x = self.dropout(x)
return (x)
class Decoder(nn.Module):
def __init__(self, output_shape, hidden_size, encoded_size, n_layers=3, drop_prob=0.5):
super(Decoder, self).__init__()
self.n_layers = n_layers
self.drop_prob = drop_prob
self.second_last_layer_size = hidden_size // (2**n_layers)
self.d1 = nn.Linear(encoded_size, self.second_last_layer_size)
self.activation1 = nn.Tanh()
self.bn1 = nn.BatchNorm1d(self.second_last_layer_size)
if (self.second_last_layer_size) > encoded_size:
self.layers = nn.ModuleList([])
for i in range(self.n_layers):
self.layers.append(LayerBlockDecode(hidden_size//(2**(n_layers-i)), hidden_size//(2**(n_layers-i-1)), self.drop_prob))
else:
self.n_layers = 0
self.d2 = nn.Linear(hidden_size, output_shape)
def forward(self, input):
x = self.d1(input)
x = self.activation1(x)
x = self.bn1(x)
for i in range(self.n_layers):
decode_block = self.layers[i]
x = decode_block(x)
#block = F.dropout(self.bn(F.elu(self.d(input))), p=self.drop_prob)
#reconstruction = torch.tanh(self.d4(block))
reconstruction = self.d2(x)
return reconstruction
# Training AutoEncoders Function
def train_ae(input_tensor, target_tensor, encoder, decoder,
encoder_optimizer, decoder_optimizer, criterion, hidden_size, encoded_size, n_layers, drop_prob, phase):
if phase == 'train':
encoder.train()
decoder.train()
else:
encoder.eval()
decoder.eval()
# clear the gradients in the optimizers
if phase == 'train':
encoder_optimizer.zero_grad()
decoder_optimizer.zero_grad()
# Forward pass through
encoded_representation = encoder(input_tensor)
reconstruction = decoder(encoded_representation)
# Compute the loss
loss = criterion(reconstruction, target_tensor)
if phase == 'train':
# Compute the gradients
loss.backward()
# Step the optimizers to update the model weights
encoder_optimizer.step()
decoder_optimizer.step()
# Return the loss value to track training progress
return loss.item()
# Training Loop
def trainIters(encoder, decoder, data_tensor, data_tensor_valid, epochs,
hidden_size, encoded_size, n_layers, drop_prob,
print_every_n_batches=100, learning_rate=0.01,
phases=["train", "validation"],):
# Live Loss
liveloss = PlotLosses()
# keep track of losses
train_plot_losses = []
test_plot_losses = []
# Initialize Encoder Optimizer
encoder_optimizer = torch.optim.Adam(encoder.parameters(), lr=learning_rate, weight_decay=1e-5)
# Initialize Decoder Optimizer
decoder_optimizer = torch.optim.Adam(decoder.parameters(), lr=learning_rate, weight_decay=1e-5)
# Specify loss function
criterion = torch.nn.MSELoss(reduce=True)
# Cycle through epochs
for epoch in range(epochs):
logs = {}
for phase in phases:
print(f'Epoch {epoch + 1}/{epochs}')
if phase == 'train':
loss = train_ae(data_tensor, data_tensor, encoder, decoder,
encoder_optimizer, decoder_optimizer, criterion,
hidden_size, encoded_size, n_layers, drop_prob, phase)
train_plot_losses.append(loss)
else:
loss = train_ae(data_tensor_valid, data_tensor_valid, encoder, decoder,
encoder_optimizer, decoder_optimizer, criterion,
hidden_size, encoded_size, n_layers, drop_prob, phase)
test_plot_losses.append(loss)
print(loss)
#plot_losses.append(loss)
prefix = ''
if phase == 'validation':
prefix = 'val_'
logs[prefix + 'log loss'] = loss
liveloss.update(logs) #liveloss
liveloss.draw() #liveloss
return train_plot_losses, test_plot_losses
| 2.53125 | 3 |
project/face_api_server/proxy/redis_function.py | 1step6thswmaestro/29 | 0 | 12796374 | import datetime
import json
import redis
redis_device_key = 'redis_device_key'
device_expire_second = 60
class RedisProxy(object):
def __init__(self, host='127.0.0.1', port=6379):
self.redis_pool = redis.ConnectionPool(host=host, port=port, db=0)
def connect(self):
return redis.Redis(connection_pool=self.redis_pool)
def get_device_datas(self):
device_datas = []
r = self.connect()
result = r.hgetall(redis_device_key)
remove_device_list = []
for user_pair in result.items():
values = user_pair[1].split('@')
'''
device_id = user_pair[0]
update_time = datetime.datetime.strptime(values[1], "%Y-%m-%d %H:%M:%S.%f")
now_time = datetime.datetime.today()
expire_time_delta = datetime.timedelta(seconds=device_expire_second)
if now_time > update_time + expire_time_delta:
device_ids.append(device_id)
else:
remove_device_list.append(device_id)
'''
device_datas.append(json.loads(values[0]))
self.remove_devices(remove_device_list)
return device_datas
def remove_devices(self, device_list):
r = self.connect()
p = r.pipeline()
for device_id in device_list:
p.hdel(redis_device_key, device_id)
p.execute()
def update_device(self, device_id, websocket_send_data):
r = self.connect()
insert_value = "%s@%s" % (json.dumps(websocket_send_data), datetime.datetime.now())
return r.hset(redis_device_key, device_id, insert_value)
| 2.5 | 2 |
fungit/style/cursor.py | zlj-zz/pyzgit | 0 | 12796375 | <reponame>zlj-zz/pyzgit
class Cursor:
"""Class with collection of cursor movement functions: .t[o](line, column) | .r[ight](columns) | .l[eft](columns) | .u[p](lines) | .d[own](lines) | .save() | .restore()"""
@staticmethod
def to(line: int, col: int) -> str:
# * Move cursor to line, column
return "\033[{};{}f".format(line, col)
@staticmethod
def right(dx: int) -> str:
return "\033[{}C".format(dx)
@staticmethod
def left(dx: int) -> str:
return "\033[{}D".format(dx)
@staticmethod
def up(dy: int) -> str:
return "\033[{}A".format(dy)
@staticmethod
def down(dy: int) -> str:
return "\033[{}B".format(dy)
save: str = "\033[s" # * Save cursor position
restore: str = "\033[u" # * Restore saved cursor postion
t = to
r = right
l = left
u = up
d = down
| 3.109375 | 3 |
src/cors.py | danztensai/covid-19-api | 53 | 12796376 |
from flask_cors import CORS
cors = CORS(resources={r"/maskmap/*": {"origins": "*"}})
def init_app(app):
cors.init_app(app)
| 1.757813 | 2 |
day08.py | Yalfoosh/Advent-of-Code-2019 | 0 | 12796377 | import numpy as np
image_dimensions = (25, 6)
def load(image_dims, path: str = "input/08.txt"):
with open(path) as file:
return np.array([c for c in file.read()]).reshape((-1, image_dims[0] * image_dims[1]))
def number_of_values_in_layer(layer, value):
return np.count_nonzero(layer == value)
def stack_layers(image_layers):
final_layer = list()
for i in range(len(image_layers[0])):
for j in range(len(image_layers)):
if image_layers[j][i] != "2":
final_layer.append(image_layers[j][i])
break
return np.array(final_layer)
# Prep
layers = load(image_dimensions)
# First
wanted_layer = None
minimum = None
for l in layers:
n = number_of_values_in_layer(l, "0")
if minimum is None or wanted_layer is None or n < minimum:
minimum = n
wanted_layer = l
wanted_1 = number_of_values_in_layer(wanted_layer, "1") * number_of_values_in_layer(wanted_layer, "2")
print(f"[1]\t{wanted_1}")
# Second
stacked_layer = stack_layers(layers).reshape(image_dimensions[::-1])
final_image = list()
for row in stacked_layer:
r = ""
for element in row:
r += "##" if element == "1" else " " if element == "0" else " "
final_image.append(r)
print(f"[2]")
for r in final_image:
print(r)
| 3 | 3 |
Robotix/apps/miscellaneous/models.py | Robotix/rbtxportal | 0 | 12796378 | <filename>Robotix/apps/miscellaneous/models.py<gh_stars>0
from django.db import models
class Country(models.Model):
name = models.CharField(max_length=20)
class Meta:
verbose_name_plural = 'Countries'
def __str__(self):
return self.name
class State(models.Model):
name = models.CharField(max_length=20)
country = models.ForeignKey(Country)
def __str__(self):
return self.name
class College(models.Model):
name = models.CharField(
max_length=255,
blank=False,
verbose_name='College Name'
)
abbv = models.CharField(
max_length=30,
blank=False,
verbose_name='Abbreviation'
)
state = models.ForeignKey(State)
@staticmethod
def autocomplete_search_fields():
return 'name', 'abbv'
def __str__(self):
return self.name
| 2.484375 | 2 |
senet/keras_fn/se_resnet_test.py | AI-Huang/SENet | 1 | 12796379 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Date : Jan-02-21 20:43
# @Author : <NAME> (<EMAIL>)
import tensorflow as tf
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.platform import test
from tensorflow.keras.utils import plot_model
from senet.keras_fn.se_resnet import SE_ResNet_18, SE_ResNet_50, SE_ResNet_101, SE_ResNet_152
class TestModelArchitectures(keras_parameterized.TestCase):
def test_se_resnet_18(self):
model_type = "SE_ResNet_18"
input_shape = (224, 224, 3)
num_classes = 2
model = SE_ResNet_18(
include_top=True,
weights=None,
input_shape=input_shape,
classes=num_classes
)
plot_model(model, to_file=model_type + ".png", show_shapes=True)
def test_se_resnet_50(self):
model_type = "SE_ResNet_50"
input_shape = (224, 224, 3)
num_classes = 2
model = SE_ResNet_50(
include_top=True,
weights=None,
input_shape=input_shape,
classes=num_classes
)
plot_model(model, to_file=model_type + ".png", show_shapes=True)
def test_se_resnet_101(self):
model_type = "SE_ResNet_101"
input_shape = (224, 224, 3)
num_classes = 2
model = SE_ResNet_101(
include_top=True,
weights=None,
input_shape=input_shape,
classes=num_classes
)
plot_model(model, to_file=model_type + ".png", show_shapes=True)
def test_se_resnet_152(self):
model_type = "SE_ResNet_152"
input_shape = (224, 224, 3)
num_classes = 2
model = SE_ResNet_152(
include_top=True,
weights=None,
input_shape=input_shape,
classes=num_classes
)
plot_model(model, to_file=model_type + ".png", show_shapes=True)
if __name__ == "__main__":
test.main()
| 2.578125 | 3 |
stage_data/main.py | shermanflan/clinical-data-architecture | 0 | 12796380 | """
Usage:
- From Spark 3.1.1 base container with Python bindings:
docker run --rm -it --name test_pyspark spark-ingest:latest /bin/bash
./bin/spark-submit spark-ingest/main.py --filepath ./examples/src/main/python/pi.py
- From binaries:
./pyspark --packages io.delta:delta-core_2.12:1.0.0 \
--conf "spark.sql.extensions=io.delta.sql.DeltaSparkSessionExtension" \
--conf "spark.sql.catalog.spark_catalog=org.apache.spark.sql.delta.catalog.DeltaCatalog"
./spark-sql --packages io.delta:delta-core_2.12:1.0.0 \
--conf "spark.sql.extensions=io.delta.sql.DeltaSparkSessionExtension" \
--conf "spark.sql.catalog.spark_catalog=org.apache.spark.sql.delta.catalog.DeltaCatalog"
"""
from datetime import datetime, date, timedelta
import os
import shutil
import boto3
import click
from pyspark.sql import SparkSession
from spark_etl import logger, SPARK_LOG_LEVEL
from spark_etl.etl import (
create_vitals_delta, cache_mpmi, save_mpmi,
load_vitals, upsert_vitals, time_travel
)
from spark_etl.secret import get_secret
"""
To configure AWS bucket-specific authorization, use the
`fs.s3a.bucket.[bucket name].access.key` configuration setting.
As specified here:
- https://hadoop.apache.org/docs/current2/hadoop-aws/tools/hadoop-aws/index.html#Configuring_different_S3_buckets
TODO: Consider optimizing the S3A for I/O.
- https://spark.apache.org/docs/3.1.1/cloud-integration.html#recommended-settings-for-writing-to-object-stores
"""
spark_session = (
SparkSession
.builder
.appName("stage_data")
# AWS general authorization
# .config("spark.hadoop.fs.s3a.access.key", os.environ['P3_AWS_ACCESS_KEY'])
# .config("spark.hadoop.fs.s3a.secret.key", os.environ['P3_AWS_SECRET_KEY'])
# AWS bucket-specific authorization
# .config(f"fs.s3a.bucket.{os.environ['P3_BUCKET']}.access.key", os.environ['P3_AWS_ACCESS_KEY'])
# .config(f"fs.s3a.bucket.{os.environ['P3_BUCKET']}.secret.key", os.environ['P3_AWS_SECRET_KEY'])
# .config(f"fs.s3a.bucket.{os.environ['P3_BUCKET']}.session.token", os.environ['P3_AWS_SESSION_TOKEN'])
# Or
.config(f"spark.hadoop.fs.s3a.bucket.{os.environ['P3_BUCKET']}.access.key", os.environ['P3_AWS_ACCESS_KEY'])
.config(f"spark.hadoop.fs.s3a.bucket.{os.environ['P3_BUCKET']}.secret.key", os.environ['P3_AWS_SECRET_KEY'])
# .config("spark.hadoop.fs.s3a.bucket.bangkok.access.key", os.environ['BK_AWS_ACCESS_KEY'])
# .config("spark.hadoop.fs.s3a.bucket.bangkok.secret.key", os.environ['BK_AWS_SECRET_KEY'])
# .config("spark.hadoop.fs.s3a.bucket.condesa.access.key", os.environ['CO_AWS_ACCESS_KEY'])
# .config("spark.hadoop.fs.s3a.bucket.condesa.secret.key", os.environ['CO_AWS_SECRET_KEY'])
# TODO: S3A Optimizations
.config("spark.hadoop.mapreduce.fileoutputcommitter.algorithm.version", "2")
.config("spark.hadoop.mapreduce.fileoutputcommitter.cleanup-failures.ignored", "true")
# TODO: S3A Optimizations: PathOutputCommitProtocol cannot be resolved
# .config("spark.hadoop.fs.s3a.committer.name", "directory")
# .config("spark.sql.sources.commitProtocolClass",
# "org.apache.spark.internal.io.cloud.PathOutputCommitProtocol")
# .config("spark.sql.parquet.output.committer.class",
# "org.apache.spark.internal.io.cloud.BindingParquetOutputCommitter")
# TODO: Parquet Optimizations
.config("spark.hadoop.parquet.enable.summary-metadata", "false")
.config("spark.sql.parquet.mergeSchema", "false")
.config("spark.sql.parquet.filterPushdown", "true")
.config("spark.sql.hive.metastorePartitionPruning", "true")
# Specify different location for Hive metastore
# .config("spark.sql.warehouse.dir", "/opt/spark/hive_warehouse")
# .config("spark.sql.catalogImplementation", "hive")
# Delta lake integration with Spark DataSourceV2 and Catalog
# .config("spark.jars.packages", "io.delta:delta-core_2.12:1.0.0")
# .config("spark.sql.extensions", "io.delta.sql.DeltaSparkSessionExtension")
# .config("spark.sql.catalog.spark_catalog", "org.apache.spark.sql.delta.catalog.DeltaCatalog")
.getOrCreate()
)
spark_session.sparkContext.setLogLevel(SPARK_LOG_LEVEL)
@click.group()
def cli():
pass
@cli.command()
def smoke_test():
pass
@cli.command()
@click.option('--filepath', required=False, help='The input file path')
@click.option('--filepath2', required=False, help='The input file path')
@click.option(
'--output-path', required=False, help='The output file path')
@click.option(
'--delta-truncate/--no-delta-truncate', default=True, help='Clear previous delta runs')
def acquire_vitals(
filepath: str,
filepath2: str,
output_path: str,
delta_truncate: bool) -> None:
"""
"""
# TODO: Import spark_etl to Jupyter container
# TODO: Build Spark 3.2 container with Python bindings
# TODO: RE: patient matches, load demographics as a Delta and keep sync'd
# TODO: Partition demographics Delta by prac
# TODO: Implement "Current" tables as delta lake tables (merge/upsert)
# TODO: How to write parent/child tables to db at scale?
# See here: https://www.youtube.com/watch?v=aF2hRH5WZAU
# monotonically_increasing_id() can also be used.
start = datetime.now()
delta_path = "{root}/public/vitals/delta".format(root=output_path)
if delta_truncate:
logger.info(f"Clearing vitals delta: {delta_path}")
shutil.rmtree(delta_path, ignore_errors=True)
# logger.info(f"Creating vitals delta: {output_path}")
# delta_path = create_vitals_delta(spark_session, output_path)
# logger.info(f"Create finished in {datetime.now() - start}")
logger.info(f"Caching mpmi")
mpmi = cache_mpmi(spark_session)
logger.info(f"Cache finished in {datetime.now() - start}")
# logger.info(f"Persisting mpmi")
# mpmi_path = save_mpmi(spark_session, output_path)
# logger.info(f"Save finished in {datetime.now() - start}")
logger.info(f"Processing vitals: {filepath}")
load_vitals(spark_session, mpmi, filepath, output_path)
logger.info(f"Load process finished in {datetime.now() - start}")
logger.info(f"Processing vitals: {filepath2}")
upsert_vitals(spark_session, mpmi, filepath2, output_path)
logger.info(f"Upsert process finished in {datetime.now() - start}")
logger.info(f"Time-travel vitals: {delta_path}")
time_travel(
spark_session,
delta_path
)
logger.info(f"Time-travel finished in {datetime.now() - start}")
input("Press enter to exit...") # keep alive for Spark UI
@cli.command()
@click.option('--source-path', required=False, help='The Delta path')
@click.option('--output-path', required=False, help='The output file path')
def stream_vitals(source_path: str, output_path: str) -> None:
"""
JDBC streaming is not supported so I'm not sure how to use this.
It may be that Kafka is necessary for true streaming.
"""
logger.info(f"Stream (append mode) to delta on: {source_path}")
(
spark_session
.readStream
.format("delta")
# .option("ignoreDeletes", "true")
# .option("ignoreChanges", "true")
.load(source_path)
.writeStream
# .format("console") # debug
.format("delta")
.outputMode("append")
.option("checkpointLocation", f"{output_path}/_checkpoints/stream-from-delta")
.queryName('vitals_stream')
.start(output_path)
.awaitTermination(timeout=60*5) # 5 min
)
if __name__ == "__main__":
cli()
| 1.765625 | 2 |
src/runner/views.py | OpenROAD-Cloud/flow-runner | 0 | 12796381 | from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from runner.tasks import start_flow_task
class RunnerStartFlow(APIView):
def post(self, request):
flow_uuid = request.POST.get('flow_uuid', None)
flow_repo_url = request.POST.get('flow_repo_url', None)
if not flow_uuid or not flow_repo_url:
return Response('Missing parameters', status=status.HTTP_400_BAD_REQUEST)
start_flow_task.delay(flow_uuid, flow_repo_url)
return Response('Received', status=status.HTTP_202_ACCEPTED)
| 2.15625 | 2 |
libp2p/network/connection/raw_connection.py | ChihChengLiang/py-libp2p | 0 | 12796382 | from .raw_connection_interface import IRawConnection
class RawConnection(IRawConnection):
def __init__(self, ip, port, reader, writer, initiator):
# pylint: disable=too-many-arguments
self.conn_ip = ip
self.conn_port = port
self.reader = reader
self.writer = writer
self._next_id = 0 if initiator else 1
self.initiator = initiator
async def write(self, data):
self.writer.write(data)
self.writer.write("\n".encode())
await self.writer.drain()
async def read(self):
line = await self.reader.readline()
adjusted_line = line.decode().rstrip('\n')
# TODO: figure out a way to remove \n without going back and forth with
# encoding and decoding
return adjusted_line.encode()
def close(self):
self.writer.close()
def next_stream_id(self):
"""
Get next available stream id
:return: next available stream id for the connection
"""
next_id = self._next_id
self._next_id += 2
return next_id
| 2.9375 | 3 |
app.py | porcelainruler/Image-Search-Engine | 1 | 12796383 | <reponame>porcelainruler/Image-Search-Engine
import os
from flask import Flask, render_template, request, jsonify
import numpy as np
import cv2
from Searcher import Searcher
from ColorDescriptor import ColorDescriptor
app = Flask(__name__)
INDEX = os.path.join(os.path.dirname(__file__), 'index.csv')
cd = ColorDescriptor()
@app.route('/', methods=['GET', 'POST'])
def search():
if request.method == 'POST':
query = cv2.imdecode(np.fromstring(request.files['img'].read(), np.uint8), cv2.IMREAD_COLOR)
features = cd.describe(query)
searcher = Searcher(INDEX)
results = searcher.search(features, 10)
res = []
for (score, resultID) in results:
res.append({"Image": str(resultID), "Score": str(score)})
context = {"images": res}
print(context)
return render_template('index.html', context=context)
res = []
context = {"images": res}
return render_template('index.html', context=context)
if __name__ == '__main__':
app.run('127.0.0.1', debug=True)
| 2.546875 | 3 |
sdk/python/pulumi_gcp/compute/get_default_service_account.py | 23doors/pulumi-gcp | 1 | 12796384 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class GetDefaultServiceAccountResult:
"""
A collection of values returned by getDefaultServiceAccount.
"""
def __init__(__self__, display_name=None, email=None, name=None, project=None, unique_id=None, id=None):
if display_name and not isinstance(display_name, str):
raise TypeError("Expected argument 'display_name' to be a str")
__self__.display_name = display_name
"""
The display name for the service account.
"""
if email and not isinstance(email, str):
raise TypeError("Expected argument 'email' to be a str")
__self__.email = email
"""
Email address of the default service account used by VMs running in this project
"""
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
__self__.name = name
"""
The fully-qualified name of the service account.
"""
if project and not isinstance(project, str):
raise TypeError("Expected argument 'project' to be a str")
__self__.project = project
if unique_id and not isinstance(unique_id, str):
raise TypeError("Expected argument 'unique_id' to be a str")
__self__.unique_id = unique_id
"""
The unique id of the service account.
"""
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
__self__.id = id
"""
id is the provider-assigned unique ID for this managed resource.
"""
class AwaitableGetDefaultServiceAccountResult(GetDefaultServiceAccountResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetDefaultServiceAccountResult(
display_name=self.display_name,
email=self.email,
name=self.name,
project=self.project,
unique_id=self.unique_id,
id=self.id)
def get_default_service_account(project=None,opts=None):
"""
Use this data source to retrieve default service account for this project
:param str project: The project ID. If it is not provided, the provider project is used.
> This content is derived from https://github.com/terraform-providers/terraform-provider-google/blob/master/website/docs/d/compute_default_service_account.html.markdown.
"""
__args__ = dict()
__args__['project'] = project
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = utilities.get_version()
__ret__ = pulumi.runtime.invoke('gcp:compute/getDefaultServiceAccount:getDefaultServiceAccount', __args__, opts=opts).value
return AwaitableGetDefaultServiceAccountResult(
display_name=__ret__.get('displayName'),
email=__ret__.get('email'),
name=__ret__.get('name'),
project=__ret__.get('project'),
unique_id=__ret__.get('uniqueId'),
id=__ret__.get('id'))
| 1.960938 | 2 |
data_visualization/chp15_GeneratingData/plottingSquares.py | sergejsm/pythonCrashCourseProjects | 0 | 12796385 | <reponame>sergejsm/pythonCrashCourseProjects
import matplotlib.pyplot as plt
x_values = list(range(1, 1001))
y_values = [x**2 for x in x_values]
plt.title("Square Numbers", fontsize=24)
plt.xlabel("Value", fontsize=14)
plt.ylabel("Square of Value", fontsize=14)
# Set size of tick labels.
plt.tick_params(axis='both', which='major', labelsize=14)
plt.axis([0, 1100, 0, 1100000])
plt.scatter(x_values, y_values, c=y_values, cmap=plt.cm.Reds,
edgecolor='none', s=40)
plt.savefig('squares_plot.png', bbox_inches='tight')
plt.show() | 3.578125 | 4 |
power_sizing.py | ThyagoFRTS/power-eletric | 1 | 12796386 | import math
def calculate_power_luminance(ambient_area):
#area in m^2
potency = 0
if ambient_area <= 6:
print('Lighting Potency: '+ str(100) +' (VA)')
potency = 100
else:
print('extra potency: ' + str((ambient_area - 6)))
potency = 100 + 60 * int((ambient_area - 6)/4)
print('Lighting Potency: '+ str(potency) +' (VA)')
print('')
return potency
"""#Dimensionamento de TUGs"""
def calculate_number_and_power_of_tugs(ambient_name, perimeter = 0):
#area in m^2
#perimeter in m
class1 = ['banheiro']
class2 = ['cozinha', 'copa','copa-cozinha', 'area de servico', 'lavanderia']
class3 = ['varanda']
class4 = ['sala', 'quarto', 'dormitorio', 'escritorio']
number_tugs = 0
power_tugs = 0
if ambient_name in class1:
number_tugs = 1
power_tugs = number_tugs * 600
elif ambient_name in class2:
number_tugs = math.ceil(perimeter/3.5)
if number_tugs <= 3:
power_tugs = number_tugs * 600
else:
power_tugs = 3 * 600 + 100 * (number_tugs - 3)
elif ambient_name in class3:
number_tugs = 1
power_tugs = number_tugs * 100
elif ambient_name in class4:
number_tugs = math.ceil(perimeter/5)
power_tugs = number_tugs * 100
else:
print('No matches found')
print('warning: ambient is calculated by area, see in 54.10 norma\nEntry with area: ')
area = float(input())
if area <= 2.55:
number_tugs = 1
power_tugs = number_tugs * 100
return 0
print('Numbers TUG: ' + str(number_tugs) + '\nTUG Potency:' + str(power_tugs) +'(VA)')
print('')
return number_tugs, power_tugs
| 3.671875 | 4 |
lab3/index.py | xiong35/HUST-BigData | 0 | 12796387 | import pandas as pd
SUPPORT = 0.005
CONF = 0.5
def csv2list():
df = pd.read_csv("./实验三/数据/Groceries.csv")
itemsets = []
for itemset_str in df["items"]:
itemsets.append(set(itemset_str[1:-1].split(",")))
return itemsets
itemsets = csv2list()
itemsets_len = itemsets.__len__()
def build1deg(itemsets):
SAVE_PATH = "./one_deg_support.txt"
one_deg = {}
for itemset in itemsets:
for item in itemset:
one_deg[item] = one_deg.get(item, 0) + 1
one_deg_count = 0
items = list(one_deg.keys())
with open(SAVE_PATH, "w") as fw:
for item in items:
support = one_deg[item] / itemsets_len
if support > SUPPORT:
one_deg[item] = support
fw.write(f"{item}: {support}\n")
one_deg_count += 1
else:
del one_deg[item]
print(f"频繁一项集数量: {one_deg_count}", )
print(f"频繁一项集保存在`{SAVE_PATH}`")
return one_deg
one_deg = build1deg(itemsets)
def build2deg(one_deg, itemsets):
SAVE_PATH = "./two_deg_support.txt"
items = list(one_deg.keys())
two_deg = {}
for i in range(0, len(items)):
for j in range(i+1, len(items)):
key = (items[i], items[j])
for itemset in itemsets:
if key[0] in itemset and key[1] in itemset:
two_deg[key] = two_deg.get(key, 0) + 1
pairs = list(two_deg.keys())
two_deg_count = 0
with open(SAVE_PATH, "w") as fw:
for pair in pairs:
support = two_deg[pair] / itemsets_len
if support > SUPPORT:
two_deg[pair] = support
fw.write(f"{pair}: {support}\n")
two_deg_count += 1
else:
del two_deg[pair]
print(f"频繁二项集数量: {two_deg_count}", )
print(f"频繁二项集保存在`{SAVE_PATH}`")
return two_deg
two_deg = build2deg(one_deg, itemsets)
def gen2deg_rules(one_deg, two_deg):
SAVE_PATH = "./two_deg_rules.txt"
pairs = list(two_deg.keys())
rules = {}
for pair in pairs:
rule = (pair[0], pair[1])
conf = two_deg[pair] / one_deg[rule[0]]
if conf > CONF:
rules[rule] = conf
rule = (pair[1], pair[0])
conf = two_deg[pair] / one_deg[rule[0]]
if conf > CONF:
rules[rule] = conf
with open(SAVE_PATH, "w") as fw:
for k, v in rules.items():
fw.write(f"{k[0]}->{k[1]}: {v}\n")
print(f"频繁二项集规则数量: {len(rules.keys())}", )
print(f"频繁二项集规则保存在`{SAVE_PATH}`")
gen2deg_rules(one_deg, two_deg)
def build3deg(two_deg, itemsets):
SAVE_PATH = "./three_deg_support.txt"
pairs = list(two_deg.keys())
itemset_3 = set()
for pair in pairs:
itemset_3.add(pair[0])
itemset_3.add(pair[1])
itemset_3 = list(itemset_3)
itemset_3.sort()
three_deg = {}
for i in range(0, len(itemset_3)):
for j in range(i+1, len(itemset_3)):
for k in range(j+1, len(itemset_3)):
item_i = itemset_3[i]
item_j = itemset_3[j]
item_k = itemset_3[k]
for itemset in itemsets:
if item_i in itemset and item_j in itemset and item_k in itemset:
tup = (item_i, item_j, item_k)
three_deg[tup] = three_deg.get(tup, 0)+1
three_deg_count = 0
tups = list(three_deg.keys())
with open(SAVE_PATH, "w") as fw:
for tup in tups:
support = three_deg[tup] / itemsets_len
if support > SUPPORT:
three_deg[tup] = support
fw.write(f"{tup}: {support}\n")
three_deg_count += 1
else:
del three_deg[tup]
print(f"频繁三项集数量: {three_deg_count}", )
print(f"频繁三项集保存在`{SAVE_PATH}`")
return three_deg
three_deg = build3deg(two_deg, itemsets)
def gen3deg_rules(one_deg, two_deg, three_deg):
SAVE_PATH = "./three_deg_rules.txt"
tups = list(three_deg.keys())
rules = {}
def enumTup(tup):
return [
(tup, tup[0], (tup[1], tup[2])),
(tup, tup[1], (tup[0], tup[2])),
(tup, tup[2], (tup[0], tup[1])),
(tup, (tup[1], tup[2]), tup[0]),
(tup, (tup[0], tup[2]), tup[1]),
(tup, (tup[0], tup[1]), tup[2]),
]
three_deg_rule_num = 0
with open(SAVE_PATH, "w") as fw:
for tup in tups:
rules = enumTup(tup)
for three, one, two in rules[:3]:
conf = three_deg[three] / one_deg[one]
if conf > CONF:
fw.write(f"{one}->{two}: {conf}\n")
three_deg_rule_num += 1
for three, two, one in rules[3:]:
try:
conf = three_deg[three] / two_deg[two]
except:
try:
conf = three_deg[three] / two_deg[(two[1], two[0])]
except:
print(two, "not found")
if conf > CONF:
fw.write(f"{two}->{one}: {conf}\n")
three_deg_rule_num += 1
print(f"频繁三项集规则数量: {three_deg_rule_num}", )
print(f"频繁三项集规则保存在`{SAVE_PATH}`")
gen3deg_rules(one_deg, two_deg, three_deg)
| 3.171875 | 3 |
core/src/autogluon/core/searcher/bayesopt/tuning_algorithms/defaults.py | zhiqiangdon/autogluon | 4,462 | 12796388 | <gh_stars>1000+
from .bo_algorithm_components import LBFGSOptimizeAcquisition
from ..models.meanstd_acqfunc_impl import EIAcquisitionFunction
DEFAULT_ACQUISITION_FUNCTION = EIAcquisitionFunction
DEFAULT_LOCAL_OPTIMIZER_CLASS = LBFGSOptimizeAcquisition
DEFAULT_NUM_INITIAL_CANDIDATES = 250
DEFAULT_NUM_INITIAL_RANDOM_EVALUATIONS = 3
| 1.203125 | 1 |
analysis/study_definition.py | orlamac/Antidepressants-in-LD-A | 0 | 12796389 | <filename>analysis/study_definition.py<gh_stars>0
from cohortextractor import StudyDefinition, patients, codelist, codelist_from_csv, filter_codes_by_category, combine_codelists, Measure # NOQA
# Import codelists from codelist.py folder
from codelists import SSRI_codes, LD_codes, Autism_codes
# Define Study time variables
from datetime import datetime
end_date = datetime.today().strftime('%Y-%m-%d')
# Define Study population and variables
study = StudyDefinition(
default_expectations={
"date": {"earliest": "2019-01-01", "latest": end_date},
"rate": "uniform",
"incidence": 0.5,
},
population=patients.registered_with_one_practice_between(
"2019-01-01", end_date
),
)
# Set index date
index_date = "2019-01-01"
# Define Medication variables
# Patients who are taking SSRIs
SSRI_cohort = patients.with_these_medications(
SSRI_codes,
on_or_before=index_date,
returning="binary_flag",
return_expectations={"incidence": 0.5}
)
# Define patient populations
# Patients with a learning disability
learning_disability = patients.with_these_clinical_events(
LD_codes,
on_or_before=index_date,
returning="binary_flag",
return_expectations={"incidence": 0.5}
)
# Patients with Autism
autism = patients.with_these_clinical_events(
Autism_codes,
on_or_before=index_date,
returning="binary_flag",
return_expectations={"incidence": 0.5},
)
| 2.359375 | 2 |
app/models/Address.py | winlongit/shop_pc_server | 1 | 12796390 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
-------------------------------------------------
@ Author : pengj
@ date : 2019/12/10 15:45
@ IDE : PyCharm
@ GitHub : https://github.com/JackyPJB
@ Contact : <EMAIL>
-------------------------------------------------
Description :
-------------------------------------------------
"""
import datetime
__author__ = 'Max_Pengjb'
from app.models import db
from app.models.User import User
class Address(db.Document):
user_id = db.ReferenceField(User, required=True, verbose_name='用户id')
streetName = db.StringField(max_length=512, required=True, verbose_name='地址')
userName = db.StringField(max_length=128, required=True, verbose_name='收货人姓名')
tel = db.StringField(max_length=64, required=True, verbose_name='收货人手机号')
isDefault = db.BooleanField(default=False, required=True, verbose_name='是否默认地址')
create_time = db.DateTimeField(default=datetime.datetime.now, verbose_name='创建时间')
def __unicode__(self):
return str(self.streetName) + str(self.userName)
| 2.390625 | 2 |
src/allennlp/fortex/allennlp/__init__.py | Piyush13y/forte-wrappers | 3 | 12796391 | from fortex.allennlp.allennlp_processors import AllenNLPProcessor
| 1.0625 | 1 |
tensorflow/27.pyflink-kafka/notebooks/tensorflow_predict.py | huseinzol05/Gather-Tensorflow-Serving | 267 | 12796392 | from pyflink.datastream import StreamExecutionEnvironment, TimeCharacteristic
from pyflink.table import StreamTableEnvironment, DataTypes, EnvironmentSettings
from pyflink.table.descriptors import (
Schema,
Kafka,
Json,
Rowtime,
OldCsv,
FileSystem,
)
from pyflink.table.udf import udf
s_env = StreamExecutionEnvironment.get_execution_environment()
s_env.set_stream_time_characteristic(TimeCharacteristic.EventTime)
s_env.set_parallelism(1)
st_env = StreamTableEnvironment.create(
s_env,
environment_settings = EnvironmentSettings.new_instance()
.in_streaming_mode()
.use_blink_planner()
.build(),
)
X, Y, sess = None, None, None
@udf(result_type = DataTypes.STRING())
def predict(string):
global X, Y, sess
import tensorflow as tf
import json
import numpy as np
def load_graph(frozen_graph_filename):
with tf.gfile.GFile(frozen_graph_filename, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
with tf.Graph().as_default() as graph:
tf.import_graph_def(graph_def)
return graph
if X is None or Y is None or sess is None:
g = load_graph('/notebooks/frozen_model.pb')
X = g.get_tensor_by_name('import/Placeholder:0')
Y = g.get_tensor_by_name('import/logits:0')
sess = tf.Session(graph = g)
label = ['negative', 'positive']
maxlen = 50
UNK = 3
with open('/notebooks/dictionary-test.json', 'r') as fopen:
dic = json.load(fopen)
sentences = [string]
x = np.zeros((len(sentences), maxlen))
for i, sentence in enumerate(sentences):
for no, k in enumerate(sentence.split()[:maxlen][::-1]):
x[i, -1 - no] = dic.get(k, UNK)
indices = np.argmax(sess.run(Y, feed_dict = {X: x}), axis = 1)
return label[indices[0]]
st_env.set_python_requirements('/notebooks/requirements.txt')
st_env.register_function('predict', predict)
st_env.connect(
Kafka()
.version('universal')
.topic('test')
.start_from_earliest()
.property('zookeeper.connect', 'zookeeper:2181')
.property('bootstrap.servers', 'kafka:9092')
).with_format(
Json()
.fail_on_missing_field(True)
.schema(
DataTypes.ROW(
[
DataTypes.FIELD('datetime', DataTypes.STRING()),
DataTypes.FIELD('text', DataTypes.STRING()),
]
)
)
).with_schema(
Schema()
.field('datetime', DataTypes.STRING())
.field('text', DataTypes.STRING())
).in_append_mode().register_table_source(
'source'
)
result_path = '/notebooks/output-tensorflow.csv'
t_env.connect(FileSystem().path(result_path)).with_format(
OldCsv()
.field_delimiter(',')
.field('datetime', DataTypes.STRING())
.field('sentence', DataTypes.STRING())
.field('label', DataTypes.STRING())
).with_schema(
Schema()
.field('datetime', DataTypes.STRING())
.field('sentence', DataTypes.STRING())
.field('label', DataTypes.STRING())
).in_append_mode().register_table_sink(
'sink'
)
st_env.from_path('source').select(
'datetime, sentence, predict(sentence)'
).insert_into('sink')
st_env.execute('predict')
| 1.960938 | 2 |
example/servers/migrations/0004_auto_20170524_0707.py | jayvdb/django-test-tools | 9 | 12796393 | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-05-24 12:07
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('servers', '0003_auto_20170523_1409'),
]
operations = [
migrations.AlterField(
model_name='server',
name='operating_system',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='servers', to='servers.OperatingSystem'),
),
]
| 1.53125 | 2 |
src/datasets.py | vidalt/BA-Trees | 45 | 12796394 | <filename>src/datasets.py<gh_stars>10-100
# MIT License
# Copyright(c) 2020 <NAME>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files(the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions :
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import pathlib
import pandas as pd
import visualization as tree_view
from IPython.display import display
dataset_names = [
"Breast-Cancer-Wisconsin",
"COMPAS-ProPublica",
"FICO",
"HTRU2",
"Pima-Diabetes",
"Seeds"
]
def create_dataset_selection(show=True ,no_fico_sa=True):
import ipywidgets as widgets
selected_datasets = widgets.Select(
options=dataset_names,
value=dataset_names[0],
description="Datasets",
disabled=False
)
if show:
display(selected_datasets)
return selected_datasets
def create_kfold_selection(min_v=1, max_v=10, show=True):
import ipywidgets as widgets
select = widgets.IntSlider(
value=1,
min=min_v,
max=max_v,
step=1,
description='Fold:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d',
)
if show:
display(select)
return select
def create_cplex_linking_selection(show=True):
import ipywidgets as widgets
select = widgets.Checkbox(
value=False,
description='CPLEX linking',
disabled=False,
indent=False
)
if show:
display(select)
return select
def load_info(dataset, df_train, fn):
info = {
'classes': {},
'filename': fn,
'colors': None,
}
info['features'] = {k:v for k,v in zip(range(len(df_train.columns)-1), df_train.columns[:-1])}
return info
def load(dataset, fold, F=None, S=None):
respath = str(pathlib.Path(__file__).parent.absolute()) + '/resources/datasets/'
if F or S:
fn = respath+'{}/F{}.S{}/{}.F{}.S{}.train{}.csv'.format(dataset, F, S, dataset, F, S, fold)
df_train = pd.read_csv(fn)
df_test = pd.read_csv(respath+'{}/F{}.S{}/{}.F{}.S{}.test{}.csv'.format(dataset, F, S, dataset, F, S, fold))
else:
fn = respath+'{}/{}.train{}.csv'.format(dataset, dataset, fold)
df_train = pd.read_csv(fn)
df_test = pd.read_csv(respath+'{}/{}.test{}.csv'.format(dataset, dataset, fold))
return df_train, df_test, load_info(dataset, df_train, fn)
| 1.992188 | 2 |
py/py_0558_irrational_base.py | lcsm29/project-euler | 0 | 12796395 | # Solution of;
# Project Euler Problem 558: Irrational base
# https://projecteuler.net/problem=558
#
# Let r be the real root of the equation x3 = x2 + 1. Every positive integer
# can be written as the sum of distinct increasing powers of r. If we require
# the number of terms to be finite and the difference between any two
# exponents to be three or more, then the representation is unique. For
# example, 3 = r -10 + r -5 + r -1 + r 2 and 10 = r -10 + r -7 + r 6.
# Interestingly, the relation holds for the complex roots of the equation. Let
# w(n) be the number of terms in this unique representation of n. Thus w(3) =
# 4 and w(10) = 3. More formally, for all positive integers n, we have:n =
# $\displaystyle \sum_{k=-\infty}^{\infty}$ bk rkunder the conditions that:bk
# is 0 or 1 for all k;bk + bk+1 + bk+2 ≤ 1 for all k;w(n) = $\displaystyle
# \sum_{k=-\infty}^{\infty}$ bk is finite. Let S(m) = $\displaystyle
# \sum_{j=1}^{m}$ w(j2). You are given S(10) = 61 and S(1000) = 19403. Find
# S(5 000 000).
#
# by lcsm29 http://github.com/lcsm29/project-euler
import timed
def dummy(n):
pass
if __name__ == '__main__':
n = 1000
i = 10000
prob_id = 558
timed.caller(dummy, n, i, prob_id)
| 3.375 | 3 |
pyoptools/raytrace/__init__.py | fcichos/pyoptools | 1 | 12796396 | <reponame>fcichos/pyoptools<gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2007, 2008, 2009,2010 <NAME>
# <<EMAIL>>,
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
Package containing modules and submodules defining an *API* that can be
used to describe Optical surfaces, components and systems.
'''
#~ import calc
#~ import component
#~ import comp_lib
#~ import library
#~ import mat_lib
#~ import shape
#~ import surface
#~ import system
#~
#~ __all__=["calc",
#~ "component",
#~ "comp_lib",
#~ "library",
#~ "mat_lib",
#~ "shape",
#~ "surface",
#~ "system"]
#~ from calc import *
#~ from component import *
#~ from comp_lib import *
#~ #from config import *
#~ from library import *
#~ from mat_lib import *
#~ from ray import *
#~ from shape import *
#~ from surface import *
#~ from system import *
| 1.289063 | 1 |
tests/test_legacy_version.py | GamzeUnlu95/message_ix | 0 | 12796397 | <reponame>GamzeUnlu95/message_ix
import numpy as np
from message_ix import Scenario
msg_args = ('canning problem (MESSAGE scheme)', 'standard')
msg_multiyear_args = ('canning problem (MESSAGE scheme)', 'multi-year')
def test_solve_legacy_scenario(test_legacy_mp):
scen = Scenario(test_legacy_mp, *msg_args)
exp = scen.var('OBJ')['lvl']
# solve scenario, assert that the new objective value is close to previous
scen.remove_solution()
scen.solve()
assert np.isclose(exp, scen.var('OBJ')['lvl'])
| 2.296875 | 2 |
src/cogs/moderation.py | Necrozma200/Zeron | 1 | 12796398 | <gh_stars>1-10
import nextcord
from nextcord.channel import CategoryChannel,DMChannel
from nextcord.colour import Color
from nextcord.components import Button
from nextcord.embeds import Embed
from nextcord.ext import commands
from nextcord.ext.commands.cooldowns import BucketType
from nextcord.ui.view import View
from nextcord.ext import commands
import json
import random
import asyncio
from datetime import datetime
from difflib import get_close_matches
from nextcord.webhook import sync
class AllConfirm(nextcord.ui.View):
def __init__(self,ctx):
super().__init__(timeout=200)
self.value = None
self.ctx=ctx
@nextcord.ui.button(
label="Confirm", style=nextcord.ButtonStyle.grey,emoji="<a:yes:909765403801182208>")
async def confirm(
self, button: nextcord.ui.Button, interaction: nextcord.Interaction):
self.value = True
self.stop()
@nextcord.ui.button(label="Cancel", style=nextcord.ButtonStyle.grey, emoji="<a:no:909765403872481280>")
async def cancel(
self, button: nextcord.ui.Button, interaction: nextcord.Interaction):
self.value = False
self.stop()
async def interaction_check(self, interaction) -> bool:
if interaction.user !=self.ctx.author:
await interaction.response.send_message("You can't use that!!" , ephemeral=True)
else:
return True
BOT_USER_ID="897762972603150346"
class Moderation(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.Cog.listener()
async def on_message(self, message):
if str(message.author.id) != str(BOT_USER_ID):
send = message.channel.send
@commands.command(name="tempban")
@commands.has_permissions(ban_members=True)
async def tempban(self,ctx, user:nextcord.User, time=None,reason=None):
if reason==None:
reason="No Reason"
if user!= None:
if time==None:
em = nextcord.Embed(title=f"<a:yes:909765403801182208> | {user.name} Was Banned indefinitely")
await ctx.send(embed=em)
await ctx.guild.ban(user)
if time !=None :
time_convert = {"s": 1, "m": 60, "h": 3600, "d": 86400}
tempban1 = int(time[0]) * time_convert[time[-1]]
em = nextcord.Embed(title=f"<a:yes:909765403801182208> | `{user.name}` Was Banned | Duration: {tempban1}{time[1:]} | Reason:{reason}")
await ctx.send(embed=em)
if bool(user.bot)==True:
await ctx.guild.ban(user)
await asyncio.sleep(tempban1)
await ctx.guild.unban(user)
else:
await DMChannel.send(user,f"**{ctx.guild.name}**: You have been banned for {tempban1}{time[1:]}\n**Reason:** {reason}")
await ctx.guild.ban(user)
await asyncio.sleep(tempban1)
await ctx.guild.unban(user)
else:
em = nextcord.Embed(title=f"<a:no:909765403872481280> | Member To Ban Was Found")
await ctx.send(embed=em)
@commands.command(name="ban", description="Bans the member from your server.")
async def ban(self, ctx, member: nextcord.Member = None, *, reason=None):
"""
**Info**: Bans a member
"""
if member == None:
embed1 = nextcord.Embed(
title="Ban Error", description="Member to ban - Not Found"
)
return await ctx.send(embed=embed1)
if member.id == ctx.author.id:
embed69 = nextcord.Embed(
title="Ban Error",
description="Ban yourself... only a skid would do that XD !",
)
return await ctx.send(embed=embed69)
elif ctx.author.top_role.position < member.top_role.position:
em3 = nextcord.Embed(
title="Ban Error",
description="Member **higher** than you in the role heirarchy - Invalid Permission",
)
return await ctx.send(embed=em3)
elif ctx.author.top_role.position == member.top_role.position:
em3 = nextcord.Embed(
title="Ban Error",
description="Member has same role as you in the role heirarchy - Invalid Permission",)
return await ctx.send(embed=em3)
guild = ctx.guild
banEmbed = nextcord.Embed(title=f"Moderation Action <:moderation:910472145824542721> | Ban Case ",color=nextcord.Color.red())
banEmbed.add_field(name="Reason: ", value=reason)
view=AllConfirm(ctx)
await ctx.send(embed=banEmbed,view=view)
await view.wait()
if view.value==False:
em = nextcord.Embed(title=f"<a:no:909765403872481280> | *{member.name} Was Not Banned!*")
await ctx.send(embed=em)
elif view.value== True:
em = nextcord.Embed(title=f"<a:yes:909765403801182208> | *{member.name} Was Banned!*")
await ctx.send(embed=em)
await member.send(f"You got banned in **{guild}** | Reason: **{reason}**")
await member.ban(reason=reason)
@commands.command(description="Lucas unban method")
@commands.has_permissions(ban_members=True)
async def unban(self, ctx,*,member):
f"""
**Info**: Unbans a member
"""
banned_user = await ctx.guild.bans()
member_name, member_discriminator = member.split('#')
for ban_entry in banned_user:
user = ban_entry.user
if (user.name, user.discriminator) == (member_name, member_discriminator):
await ctx.guild.unban(user)
view=AllConfirm(ctx)
if view.value==False:
em = nextcord.Embed(title=f"<a:no:909765403872481280> | *{member.name} Was Not Unbanned!*")
await ctx.send(embed=em)
elif view.value== True:
em = nextcord.Embed(title=f"<a:yes:909765403801182208> | *{member.name} Was Unbanned!*")
await ctx.send(embed=em)
@commands.command(name="kick", description="Kicks the member from your server.")
@commands.has_permissions(kick_members=True)
async def kick(self, ctx, member: nextcord.Member = None, *, reason=None):
if member == None:
embed1 = nextcord.Embed(
title="Kick Error", description="Member to kick - Not Found")
return await ctx.send(embed=embed1)
if not (ctx.guild.me.guild_permissions.kick_members):
embed2 = nextcord.Embed(title="Kick Error",description="I require the ``Kick Members`` permisson to run this command - Missing Permission")
return await ctx.send(embed=embed2)
if member.id == ctx.author.id:
embed = nextcord.Embed(title="Kick Error", description="Can't kick yourself ",)
return await ctx.send(embed=embed)
elif ctx.author.top_role.position < member.top_role.position:
em3 = nextcord.Embed(
title="Kick Error",
description="Member **higher** than you in the role heirarchy - Invalid Permission",
)
return await ctx.send(embed=em3)
elif ctx.author.top_role.position == member.top_role.position:
em3 = nextcord.Embed(
title="Kick Error",
description="Member has same role as you in the role heirarchy - Invalid Permission",
)
return await ctx.send(embed=em3)
kickEmbed = nextcord.Embed(title=f"Moderation Action <:moderation:910472145824542721> | Kick Case ",color=nextcord.Color.red())
kickEmbed.add_field(name="Reason: ", value=reason)
view=AllConfirm(ctx)
await ctx.send(embed=kickEmbed,view=view)
await view.wait()
view=AllConfirm(ctx)
if view.value==False:
em = nextcord.Embed(title=f"<a:no:909765403872481280> | *{member.name} Was Not Kicked!*")
await ctx.send(embed=em)
elif view.value== True:
em = nextcord.Embed(title=f"<a:yes:909765403801182208> | *{member.name} Was Kicked!*")
await ctx.send(embed=em)
await member.send(f"You got kicked in **{ctx.guild}** | Reason: **{reason}**")
await member.kick(reason=reason)
@commands.command(aliases=["cs", "ci", "channelinfo"])
async def channelstats(self, ctx, channel: nextcord.TextChannel = None):
f"""
**Info**: Get ChannelStats
*Syntax*: "{self.ctx.prefix}" channelstats [channel]
"""
if channel == None:
channel = ctx.channel
embed = nextcord.Embed(
title=f"**ChannelStats for {channel.name}**",
description=f"{'Category :{}'.format(channel.category.name) if channel.category else 'Channel is not in any category'}",
color=nextcord.Color.random())
embed.add_field(name="Channel Guild:-", value=ctx.guild.name, inline=True)
embed.add_field(name="Channel Id:-", value=channel.id, inline=False)
embed.add_field(name="Channel Topic:-",value=f"{channel.topic if channel.topic else 'No topic.'}",inline=False,)
embed.add_field(name="Channel Position:-", value=channel.position, inline=True)
embed.add_field(name="Channel Slowmode?", value=channel.slowmode_delay, inline=True)
embed.add_field(name="Channel is NSFW?", value=channel.is_nsfw(), inline=True)
embed.add_field(name="Channel Permissions Synced?", value=bool(CategoryChannel.permissions_synced), inline=True)
embed.add_field(name="Channel is Annoucement?", value=channel.is_news(), inline=True)
embed.add_field(name="Channel Hash:", value=hash(channel), inline=True)
embed.add_field(name="Channel Creation Time:", value=channel.created_at.strftime("%a, %d %B %Y , %I:%M %p"), inline=False)
embed.set_thumbnail(url=ctx.guild.icon.url)
await ctx.send(embed=embed)
@commands.command(name="tempmute", description="Mutes a member indefinitely.")
@commands.has_permissions(manage_messages=True)
async def tempmute(
self, ctx, member: nextcord.Member = None, time=None, *, reason=None
):
guild = ctx.guild
mutedRole = nextcord.utils.get(guild.roles, name="Muted")
if member == None:
em1 = nextcord.Embed(
title="Tempmute Error", description="Member to mute - Not Found"
)
return await ctx.send(embed=em1)
elif member.id == ctx.author.id:
em5 = nextcord.Embed(
title="Tempmute Error", description="Don't bother, ive tried"
)
return await ctx.send(embed=em5)
if time == None:
em2 = nextcord.Embed(
title="Tempmute Error", description="Time to mute - Not Found"
)
return await ctx.send(embed=em2)
elif ctx.author.top_role.position < member.top_role.position:
em3 = nextcord.Embed(
title="Tempmute Error",
description="Member **higher** than you in the role heirarchy - Invalid Permission",
)
return await ctx.send(embed=em3)
if not (ctx.guild.me.guild_permissions.manage_roles):
embed2 = nextcord.Embed(
title="Tempmute Error",
description="I require the ``Manage Roles`` permisson to run this command - Missing Permission",
)
return await ctx.send(embed=embed2)
elif ctx.author.top_role.position == member.top_role.position:
em4 = nextcord.Embed(
title="Tempmute Error",
description="Member has same role as you in the role heirarchy - Invalid Permission",
)
return await ctx.send(embed=em4)
if ctx.guild.me.top_role.position < mutedRole.position:
em3 = nextcord.Embed(
title="Tempmute Error",
description="Muted role too high to give to a member",
)
return await ctx.send(embed=em3)
if not mutedRole:
mutedRole = await guild.create_role(name="Muted")
await ctx.send("No mute role found. Creating mute role...")
for channel in guild.channels:
await channel.set_permissions(
mutedRole,
speak=False,
send_messages=False,
read_message_history=True,
)
if not time == None:
time_convert = {"s": 1, "m": 60, "h": 3600, "d": 86400}
tempmute = int(time[0]) * time_convert[time[-1]]
embed = nextcord.Embed(
title="Tempmute Success",
description=f"{member.mention} was muted ",
colour=nextcord.Colour.blue(),
)
embed.add_field(name="Reason:", value=reason, inline=False)
embed.add_field(name="Duration", value=time)
await ctx.send(embed=embed)
await member.add_roles(mutedRole, reason=reason)
await member.send(
f"You have been muted from: **{guild.name}** | Reason: **{reason}** | Time: **{time}**"
)
if not time == None:
await asyncio.sleep(tempmute)
await member.remove_roles(mutedRole)
await member.send(f"You have been unmuted from **{guild}**")
return
@commands.command(
name="mute", description="Mutes a member for a specific amount of time."
)
@commands.has_permissions(manage_messages=True)
async def mute(self, ctx, member: nextcord.Member = None, *, reason=None):
guild = ctx.guild
mutedRole = nextcord.utils.get(guild.roles, name="Muted")
if not mutedRole:
mutedRole = await guild.create_role(name="Muted")
await ctx.send("No mute role found. Creating mute role...")
for channel in guild.channels:
await channel.set_permissions(
mutedRole,
speak=False,
send_messages=False,
read_message_history=True,
)
if member == None:
em1 = nextcord.Embed(
title="Mute Error", description="Member to mute - Not Found"
)
return await ctx.send(embed=em1)
elif member.id == ctx.author.id:
em5 = nextcord.Embed(
title="Mute Error", description="Error"
)
return await ctx.send(embed=em5)
elif ctx.author.top_role.position < member.top_role.position:
em3 = nextcord.Embed(
title="Mute Error",
description="Member **higher** than you in the role heirarchy - Invalid Permission",
)
return await ctx.send(embed=em3)
elif ctx.author.top_role.position == member.top_role.position:
em4 = nextcord.Embed(
title="Mute Error",
description="Member has same role as you in the role heirarchy - Invalid Permission",
)
return await ctx.send(embed=em4)
if not (ctx.guild.me.guild_permissions.manage_roles):
embed2 = nextcord.Embed(
title="Mute Error",
description="I require the **Manage Roles** permisson to run this command - Missing Permission",
)
return await ctx.send(embed=embed2)
mutedRole = nextcord.utils.get(guild.roles, name="Muted")
if ctx.guild.me.top_role.position < mutedRole.position:
em3 = nextcord.Embed(
title="Mute Error",
description="Muted role too high to give to a member",
)
return await ctx.send(embed=em3)
embed = nextcord.Embed(
title="Mute Success",
description=f"{member.mention} was muted Indefinitly ",
colour=nextcord.Colour.blue(),
)
embed.add_field(name="Reason:", value=reason, inline=False)
await ctx.send(embed=embed)
await member.add_roles(mutedRole, reason=reason)
await member.send(
f"You have been muted from: **{guild.name}** | Reason: **{reason}**"
)
return
@commands.command(name="unmute", description="Unmutes a muted member.")
@commands.has_permissions(manage_messages=True)
async def unmute(self, ctx, member: nextcord.Member = None, *, reason=None):
guild = ctx.guild
if member == None:
em1 = nextcord.Embed(
title="Unmute Error", description="Member to unmute - Not Found"
)
return await ctx.send(embed=em1)
elif member.id == ctx.author.id:
em5 = nextcord.Embed(
title="Unmute Error", description="wHat? <:WHA:815331017854025790>"
)
return await ctx.send(embed=em5)
elif ctx.author.top_role.position < member.top_role.position:
em3 = nextcord.Embed(
title="Unmute Error",
description="Member **higher** than you in the role heirarchy - Invalid Permission",
)
return await ctx.send(embed=em3)
elif ctx.author.top_role.position == member.top_role.position:
em4 = nextcord.Embed(
title="Unmute Error",
description="Member has same role as you in the role heirarchy - Invalid Permission",
)
return await ctx.send(embed=em4)
if not (ctx.guild.me.guild_permissions.manage_roles):
embed2 = nextcord.Embed(
title="Unmute Error",
description="I require the ``Manage Roles`` permisson to run this command - Missing Permission",
)
return await ctx.send(embed=embed2)
mutedRole = nextcord.utils.get(guild.roles, name="Muted")
if ctx.guild.me.top_role.position < mutedRole.position:
em3 = nextcord.Embed(
title="Unmute Error",
description="Muted role too high to remove from a member",
)
return await ctx.send(embed=em3)
if not mutedRole:
mutedRole = await guild.create_role(name="Muted")
await ctx.send("No mute role found. Creating mute role...")
for channel in guild.channels:
await channel.set_permissions(
mutedRole,
speak=False,
send_messages=False,
read_message_history=True,
)
embed = nextcord.Embed(
title="Unmute Success",
description=f"{member.mention} was unmuted ",
colour=nextcord.Colour.blue(),
)
embed.add_field(name="Reason:", value=reason, inline=False)
await ctx.send(embed=embed)
await member.remove_roles(mutedRole, reason=reason)
await member.send(
f"You have been unmuted from: **{guild.name}** | Reason: **{reason}**"
)
return
@commands.command(description="Clears a bundle of messages.",aliases=['purge'])
@commands.has_permissions(manage_messages=True)
async def clear(self, ctx, amount=10):
amount = amount + 1
if amount > 101:
em1 = nextcord.Embed(
title="Clear Error",
description="Purge limit exedeed - Greater than 100",
)
return await ctx.send(embed=em1)
else:
await ctx.channel.purge(limit=amount)
msg = await ctx.send("Cleared Messages")
asyncio.sleep(10)
await msg.delete()
@commands.command(description="Change the channels slowmode.")
@commands.has_permissions(manage_channels=True)
async def slowmode(self, ctx, time: int):
try:
if time == 0:
em1 = nextcord.Embed(
title="Slowmode Success", description="Slowmode turned off"
)
await ctx.send(embed=em1)
await ctx.channel.edit(slowmode_delay=0)
elif time > 21600:
em2 = nextcord.Embed(
title="Slowmode Error", description="Slowmode over 6 hours"
)
await ctx.send(embed=em2)
else:
await ctx.channel.edit(slowmode_delay=time)
em3 = nextcord.Embed(
title="Slowmode Success",
description=f"Slowmode set to {time} seconds",
)
await ctx.send(embed=em3)
except Exception:
await ctx.send("Error has occoured, notifying dev team")
print(Exception)
@commands.command(
aliases=["giverole", "addr"], description="Gives a member a certain role."
)
@commands.has_permissions(manage_roles=True)
async def addrole(
self, ctx, member: nextcord.Member = None, *, role: nextcord.Role = None
):
if member is None:
embed = nextcord.Embed(
title="Add Role Error",
description="Please ping a user to give them a role!",
)
await ctx.send(embed=embed)
return
if role is None:
embed = nextcord.Embed(
title="Add Role Error",
description="Please ping a role to give {} that role!".format(
member.mention
),
)
await ctx.send(embed=embed)
return
if ctx.author.top_role.position < role.position:
em = nextcord.Embed(
title="Add Role Error",
description="You do not have enough permissions to give this role",
)
return await ctx.send(embed=em)
if ctx.guild.me.top_role.position < role.position:
embed = nextcord.Embed(
title="Add Role Error",
description="That role is too high for me to perform this action",
)
return await ctx.send(embed=embed)
try:
addRole = True
for role_ in member.roles:
if role_ == role:
addRole = False
break
if not addRole:
embed = nextcord.Embed(
title="Add Role Error",
description=f"{member.mention} already has the role you are trying to give",
)
await ctx.send(embed=embed)
return
else:
em = nextcord.Embed(
title="Add Role Success",
description=f"{role.mention} has been assigned to {member.mention}",
)
await ctx.send(embed=em)
await member.add_roles(role)
return
except Exception:
print(Exception)
@commands.command(
aliases=["takerole", "remover"],
description="Removes a certain role from a member.",
)
@commands.has_permissions(manage_roles=True)
async def removerole(
self,
ctx,
member: nextcord.Member = None,
role: nextcord.Role = None,
*,
reason=None,
):
if member is None:
embed = nextcord.Embed(
title="Remove Role Error",
description="Please ping a user to remove a role from them!",
)
await ctx.send(embed=embed)
return
if role is None:
embed = nextcord.Embed(
title="Remove Role Error",
description="Please ping a role to remove the role from {}!".format(
member.mention
),
)
await ctx.send(embed=embed)
return
if ctx.author.top_role.position < role.position:
em = nextcord.Embed(
title="Remove Role Error",
description="You do not have enough permissions to remove this role",
)
return await ctx.send(embed=em)
if ctx.guild.me.top_role.position < role.position:
embed = nextcord.Embed(
title="Remove Role Error",
description="That role is too high for me to perform this action",
)
return await ctx.send(embed=embed)
try:
roleRemoved = False
for role_ in member.roles:
if role_ == role:
await member.remove_roles(role)
roleRemoved = True
break
if not roleRemoved:
embed = nextcord.Embed(
title="Remove Role Error",
description=f"{member.mention} already has the role you are trying to give",
)
await ctx.send(embed=embed)
return
else:
em = nextcord.Embed(
title="Remove Role Success!",
description=f"{role.mention} has been removed from {member.mention}",
)
await ctx.send(embed=em)
return
except Exception:
print(Exception)
@commands.command(description="Locks the channel.")
@commands.has_permissions(kick_members=True)
async def lock(self, ctx, channel: nextcord.TextChannel = None, setting = None):
if setting == '--server':
view = LockConfirm()
em = nextcord.Embed(
title="Are you sure?",
description="This is a very risky command only to be used in important situations such as, `Raid on the Server`. **If this command is used for the wrong purpose you may risk getting demoted if not banned from the staff team.**",
)
await ctx.author.send(embed = em, view=view)
await view.wait()
if view.value is None:
await ctx.author.send("Command has been Timed Out, please try again.")
elif view.value:
for channel in ctx.guild.channels:
await channel.set_permissions(
ctx.guild.default_role,
reason=f"{ctx.author.name} locked {channel.name} using --server override",
send_messages=False,
)
embed = nextcord.Embed(
title="Lockdown Success",
description=f"Locked entire server ",
)
await ctx.send(embed=embed)
else:
lockEmbed = nextcord.Embed(
title="Lock Cancelled",
description="Lets pretend like this never happened them :I",
)
await ctx.author.send(embed=lockEmbed)
return
if channel is None:
channel = ctx.message.channel
await channel.set_permissions(
ctx.guild.default_role,
reason=f"{ctx.author.name} locked {channel.name}",
send_messages=False, #
)
embed = nextcord.Embed(
title="Lockdown Success",
description=f"Locked {channel.mention} ",
)
await ctx.send(embed=embed)
@commands.command(description="Unlocks the channel.")
@commands.has_permissions(kick_members=True)
async def unlock(self, ctx, channel: nextcord.TextChannel = None, setting=None):
if setting == '--server':
for channel in ctx.guild.channels:
await channel.set_permissions(
ctx.guild.default_role,
reason=f"{ctx.author.name} unlocked {channel.name} using --server override",
send_messages=None,
)
embed = nextcord.Embed(
title="Unlock Success",
description=f"Unlocked entire server ",
)
await ctx.send(embed=embed)
return
if channel is None:
channel = ctx.channel
await channel.set_permissions(
ctx.guild.default_role,
reason=f"{ctx.author.name} unlocked {channel.name}",
send_messages=True,
)
embed = nextcord.Embed(
title="Unlock Success",
description=f"Unlocked {channel.mention} ",
)
await ctx.send(embed=embed)
@commands.command(description="Modbans the member.")
@commands.has_permissions(kick_members=True)
@commands.cooldown(1, 21600, commands.BucketType.user)
async def modban(self, ctx, member, *, reason=None):
if reason is None:
reason = f"{ctx.author.name} modbanned {member.name}"
else:
reason = (
f"{ctx.author.name} modbanned {member.name} for the reason of {reason}"
)
if member == None:
embed1 = nextcord.Embed(
title="Ban Error", description="Member to ban - Not Found"
)
return await ctx.send(embed=embed1)
if member.id == ctx.author.id:
embed69 = nextcord.Embed(
title="Ban Error",
description="No banning yourself...",
)
return await ctx.send(embed=embed69)
em = nextcord.Embed(
title="Are you sure?",
description="This is a very risky command only to be used in important situations such as, `NSFW or NSFLPosting` or `Raid on the Server`. Only use this command if no admin is online or responding. **If this command is used for the wrong purpose you may risk getting demoted if not banned from the staff team.**",
)
await ctx.author.send(embed=em, view=view)
await view.wait()
if view.value is None:
await ctx.author.send("Command has been Timed Out, please try again.")
elif view.value:
guild = ctx.guild
banMsg = random.choice("BANNED")
banEmbed = nextcord.Embed(
title="Ban Success", description=f"{member.mention} {banMsg}"
)
banEmbed.add_field(name="Reason", value=reason)
await ctx.author.send(embed=banEmbed)
await member.ban(reason=reason)
await member.send(f"You got banned in **{guild}** | Reason: **{reason}**")
else:
banEmbed = nextcord.Embed(
title="Ban Cancelled",
description="Lets pretend like this never happened them :I",
)
await ctx.author.send(embed=banEmbed)
def setup(bot):
bot.add_cog(Moderation(bot)) | 1.921875 | 2 |
apps/discovery_pyre/uniflex_app_discovery_pyre/pyre_discovery_master_module.py | danieldUKIM/uniflex_wishrem | 0 | 12796399 | from pyre import Pyre
from pyre import zhelper
import threading
import zmq
import logging
import json
import time
from uniflex.core import modules
__author__ = "<NAME>"
__copyright__ = "Copyright (c) 2015, Technische Universitat Berlin"
__version__ = "0.1.0"
__email__ = <EMAIL>"
class PyreDiscoveryMasterModule(modules.ControlApplication):
def __init__(self, iface, groupName="uniflex", downlink=None, sub=None,
uplink=None, pub=None):
super(PyreDiscoveryMasterModule, self).__init__()
self.log = logging.getLogger('pyre_discovery_module.main')
pyreLogger = logging.getLogger('pyre')
pyreLogger.setLevel(logging.CRITICAL)
self.running = False
self.iface = iface
self.sub = downlink
if not self.sub:
self.sub = sub
self.pub = uplink
if not self.pub:
self.pub = pub
self.groupName = groupName
self.ctx = zmq.Context()
def _sending_announcements(self):
while self.running:
self.log.debug("Discovery Announcements:"
" SUB={}, PUB={}"
.format(self.sub, self.pub))
msg = json.dumps({'downlink': self.sub,
'uplink': self.pub})
self.discovery_pipe.send(msg.encode('utf_8'))
time.sleep(2)
@modules.on_start()
def start_discovery_announcements(self):
self.log.debug("Start discovery announcements".format())
self.running = True
self.discovery_pipe = zhelper.zthread_fork(
self.ctx, self.discovery_task)
d = threading.Thread(target=self._sending_announcements)
d.setDaemon(True)
d.start()
return True
@modules.on_exit()
def stop_discovery_announcements(self):
self.log.debug("Stop discovery announcements".format())
if self.running:
self.running = False
self.discovery_pipe.send("$$STOP".encode('utf_8'))
def discovery_task(self, ctx, pipe):
self.log.debug("Pyre on iface : {}".format(self.iface))
n = Pyre(self.groupName, sel_iface=self.iface)
n.set_header("DISCOVERY_Header1", "DISCOVERY_HEADER")
n.join(self.groupName)
n.start()
poller = zmq.Poller()
poller.register(pipe, zmq.POLLIN)
while(True):
items = dict(poller.poll())
if pipe in items and items[pipe] == zmq.POLLIN:
message = pipe.recv()
# message to quit
if message.decode('utf-8') == "$$STOP":
break
n.shout(self.groupName, message)
n.stop()
| 2.078125 | 2 |
spire/wsgi/upload.py | siq/spire | 0 | 12796400 | <filename>spire/wsgi/upload.py
import os, json
from scheme import Json, Text
from werkzeug.exceptions import MethodNotAllowed
from werkzeug.formparser import parse_form_data
from werkzeug.utils import secure_filename
from spire.core import Configuration, Unit, Dependency
from spire.util import uniqid
from spire.wsgi.util import Mount
from spire.wsgi.sessions import SessionMiddleware
from spire.context import ContextMiddleware, HeaderParser, SessionParser
from bastion.security.middleware import RedirectMiddleware
class UploadEndpoint(Mount):
session_middleware = Dependency(SessionMiddleware)
context_middleware = ContextMiddleware([HeaderParser(), SessionParser()])
redirect_middleware = Dependency(RedirectMiddleware)
configuration = Configuration({
'upload_directory': Text(nonempty=True, default='/tmp'),
})
def _dispatch_request(self, request, response):
directory = self.configuration['upload_directory']
if request.method == 'GET':
return
elif request.method != 'POST':
raise MethodNotAllowed()
mapping = {}
for name, uploaded_file in request.files.iteritems():
filename = mapping[name] = '%s_%s' % (
uniqid(), secure_filename(uploaded_file.filename))
uploaded_file.save(os.path.join(directory, filename))
response.mimetype = 'text/html'
response.data = Json.serialize(mapping)
class UploadManager(Unit):
configuration = Configuration({
'upload_directory': Text(nonempty=True, default='/tmp'),
})
def acquire(self, id):
return open(self.find(id))
def dispose(self, id):
try:
filename = self.find(id)
except ValueError:
pass
else:
os.unlink(filename)
def find(self, id):
filename = os.path.join(self.configuration['upload_directory'], id)
if os.path.exists(filename):
return filename
else:
raise ValueError(id)
| 2.03125 | 2 |
pythonProject/venv/Desafio 34.py | lucasjlgc/Aulas-de-Python- | 0 | 12796401 | # Pergunte salario e calcule aumento, maior que 1250 é 10%, menor é 15%.
salario = int(input('Qual o valor do seu salário? '))
salario1= (1250*0.1)+salario
salario2=(1250*0.15)+salario
if salario>=1250:
print('Você teve aumento de 10% e agora receberá {}'.format(salario1))
else:
print('Você teve aumento de 15% e agora receberá {}'.format(salario2))
print('FIM') | 3.703125 | 4 |
frds/mktstructure/measures/__init__.py | mgao6767/wrds | 1 | 12796402 | <reponame>mgao6767/wrds<gh_stars>1-10
from . import bidask_spread, effective_spread, realized_spread, price_impact
| 1.046875 | 1 |
analyzer/benchmark_analyzer.py | alexirae/benchmark-analyzer-visualizer | 0 | 12796403 | import argparse
import numpy as np
from benchmark_statistics import Statistics
from benchmark_containers import BenchmarkResultsContainer
##############################################################################
def createBenchmarkResults(benchmark_samples, operation):
benchmark_results = BenchmarkResultsContainer()
benchmark_results.operation = operation
# Filter outliers
lower_fence, upper_fence = Statistics.getTukeyFences(benchmark_samples)
lower_outliers_samples = benchmark_samples[benchmark_samples < lower_fence]
benchmark_no_outliers_samples = benchmark_samples[(benchmark_samples >= lower_fence) & (benchmark_samples <= upper_fence)]
upper_outliers_samples = benchmark_samples[benchmark_samples > upper_fence]
benchmark_results.sorted_lower_outliers_samples = np.sort(lower_outliers_samples).tolist()
benchmark_results.sorted_no_outliers_samples = np.sort(benchmark_no_outliers_samples).tolist()
benchmark_results.sorted_upper_outliers_samples = np.sort(upper_outliers_samples).tolist()
# Create statistics info from benchmark samples
for key in benchmark_results.statistics:
without_outliers = key == "Without outliers"
benchmark_samples_to_process = benchmark_no_outliers_samples if without_outliers else benchmark_samples
benchmark_stats = benchmark_results.statistics[key]
benchmark_stats.num_analyzed_samples = Statistics.getNumAnalyzedSamples(benchmark_samples_to_process)
benchmark_stats.minimum = Statistics.getMin(benchmark_samples_to_process)
benchmark_stats.lower_fence = benchmark_results.sorted_no_outliers_samples[0] # Plotly uses first non outlier point, for exact lower_fence set to: lower_fence
benchmark_stats.q1 = Statistics.getPercentile(benchmark_samples_to_process, 25)
benchmark_stats.mean = Statistics.getMean(benchmark_samples_to_process)
benchmark_stats.median = Statistics.getPercentile(benchmark_samples_to_process, 50)
benchmark_stats.q3 = Statistics.getPercentile(benchmark_samples_to_process, 75)
benchmark_stats.upper_fence = benchmark_results.sorted_no_outliers_samples[-1] # Plotly uses last non outlier point, for exact upper_fence set to: upper_fence
benchmark_stats.maximum = Statistics.getMax(benchmark_samples_to_process)
benchmark_stats.iqr = Statistics.getIQR(benchmark_samples_to_process)
benchmark_stats.std_dev = Statistics.getStdDev(benchmark_samples_to_process)
benchmark_stats.std_err = Statistics.getStdErr(benchmark_samples_to_process)
benchmark_stats.std_err_percentage = benchmark_stats.std_err / benchmark_stats.mean * 100.0 if benchmark_stats.std_err > 0.0 else 0.0
benchmark_stats.margin = Statistics.getMargin(benchmark_samples_to_process)
benchmark_stats.margin_percentage = benchmark_stats.margin / benchmark_stats.mean * 100.0 if benchmark_stats.margin > 0.0 else 0.0
benchmark_stats.confidence_interval = Statistics.getConfidenceInterval(benchmark_samples_to_process)
benchmark_stats.skewness = Statistics.getSkewness(benchmark_samples_to_process)
benchmark_stats.kurtosis = Statistics.getKurtosis(benchmark_samples_to_process)
return benchmark_results
##############################################################################
def printBenchmarkResults(benchmark_samples, benchmark_results):
print("Samples:")
print(benchmark_samples, "\n")
print("Sorted Samples:")
print(benchmark_results.sorted_lower_outliers_samples, benchmark_results.sorted_no_outliers_samples, benchmark_results.sorted_upper_outliers_samples, "\n")
for key in benchmark_results.statistics:
without_outliers = key == "Without outliers"
statistics_results = benchmark_results.getFormatedStatisticsResultsWithoutOutliers() if without_outliers else benchmark_results.getFormatedStatisticsResultsWithOutliers()
text_alignment_offset = len(max(statistics_results, key=len)) + 3
print(key + ":")
for stat_key in statistics_results:
print(stat_key + "= ".rjust(text_alignment_offset - len(stat_key)) + statistics_results[stat_key])
print("\n")
##############################################################################
def runAnalyzer(kwargs=None):
# Parse args
parser = argparse.ArgumentParser(description="Benchmark Analyzer")
parser.add_argument("-in",
"--benchmark_samples_file",
type=str,
required=True,
help="File path containing the benchmark observations as comma separated numbers.")
parser.add_argument("-out",
"--json_output_path",
type=str,
required=True,
help="JSON output path for file containing the statistical information of the analyzed benchmark.")
parser.add_argument("-op",
"--operation_name",
type=str,
required=True,
help="Name of the operation related to the benchmark observations.")
parser.add_argument("-out_name",
"--output_file_name",
type=str,
required=False,
help="(Optional) The name of the output file, if this option is not used the file will be called Benchmark_Results_<MONTH>-<DAY>-<YEAR>_<HOUR>h<MINUTE>m<SECOND>s.")
args = parser.parse_args()
# Input Params
benchmark_samples_file = args.benchmark_samples_file
json_output_path = args.json_output_path
operation_name = args.operation_name
output_file_name = args.output_file_name
# Create an array from benchmark samples in file
with open(benchmark_samples_file) as file:
benchmark_samples = np.fromfile(file, dtype=float, sep=",")
# Create benchmark results
benchmark_results = createBenchmarkResults(benchmark_samples, operation_name)
# Print benchmark results
printBenchmarkResults(benchmark_samples, benchmark_results)
# Export benchmark results to a JSON file
benchmark_results.toJSONFile(json_output_path, operation_name, output_file_name)
##############################################################################
#-----------------------------------------------------------------------------
# Main
#-----------------------------------------------------------------------------
if __name__ == '__main__':
runAnalyzer()
| 2.578125 | 3 |
running_modes/enums/__init__.py | marco-foscato/Lib-INVENT | 26 | 12796404 | from running_modes.enums.diversity_filter_enum import DiversityFilterEnum
from running_modes.enums.learning_strategy_enum import LearningStrategyEnum
from running_modes.enums.logging_mode_enum import LoggingModeEnum
from running_modes.enums.running_mode_enum import RunningModeEnum
from running_modes.enums.generative_model_regime import GenerativeModelRegimeEnum
from running_modes.enums.generative_model_parameters import GenerativeModelParametersEnum
from running_modes.enums.scoring_strategy_enum import ScoringStrategyEnum
| 1.226563 | 1 |
code/dataloaders/__init__.py | ShawnCheung/Attention-depth | 87 | 12796405 | from .nyu_dataloader import NYUDataset
from .kitti_dataloader import KITTIDataset
from .dataloader import MyDataloader
from .transforms import Resize, Rotate, RandomCrop, CenterCrop, \
ColorJitter, HorizontalFlip, ToTensor, \
Compose, Crop
from .get_datasets import create_datasets
__all__ = ['MyDataloader', 'NYUDataset', 'KITTIDataset',
'Resize', 'Rotate', 'RandomCrop', 'CenterCrop',
'ColorJitter', 'HorizontalFlip', 'ToTensor',
'Compose', 'Crop', 'create_datasets'] | 1.476563 | 1 |
uri/iniciante/1037.py | AllefLobo/AlgorithmsProblemsSolution | 2 | 12796406 |
valor = float(input())
if valor >= 0 and valor <= 25:
print "Intervalo [0,25]"
elif valor >= 0 and valor <= 50:
print "Intervalo (25,50]"
elif valor >= 0 and valor <= 75:
print "Intervalo (50, 75]"
elif valor >= 0 and valor <= 100:
print "Intervalo (75,100]"
else:
print "Fora de intervalo"
| 3.890625 | 4 |
csv_cleaner.py | jing-viz/radiohead | 1 | 12796407 | <reponame>jing-viz/radiohead
import csv
k_frame_count = 2101
k_skip_frame = 5
for idx in range(1, k_frame_count, k_skip_frame):
org_filename = 'data/csv_org/%s.csv' % idx
new_filename = 'data/csv/%s.csv' % idx
with open(org_filename) as input_csv:
with open(new_filename, 'w') as output_csv:
csv_writer = csv.writer(output_csv)
row_id = 0
for row in csv.reader(input_csv):
row_id = row_id + 1
if row_id % 2 == 0: continue
x = (int(float(row[0])))
y = (int(float(row[1])))
z = (int(float(row[2])))
w = (int(float(row[3])))
if x < 0 or x > 170: continue
if y < 30 or y > 220: continue
if z > 0 or z < -130: continue
csv_writer.writerow([x,y,z,w]) | 2.6875 | 3 |
brains/namelist/models.py | crisisking/udbraaains | 1 | 12796408 | <gh_stars>1-10
import datetime
from django.db import models
class Category(models.Model):
class Meta:
verbose_name_plural = 'categories'
name = models.CharField(max_length=25, null=False, blank=False)
color_code = models.CharField(max_length=7, null=False, blank=False)
def __unicode__(self):
return self.name
class Player(models.Model):
name = models.CharField(max_length=50, null=False, db_index=True)
profile_id = models.IntegerField(null=False, unique=True, db_index=True)
group_name = models.CharField(max_length=50, blank=True, null=True,
default=None, db_index=True)
category = models.ForeignKey(Category, null=True, blank=True)
join_date = models.DateTimeField(default=datetime.datetime.now)
scrape_date = models.DateTimeField(auto_now=True, auto_now_add=True)
is_dead = models.BooleanField(default=False, db_index=True)
def last_known_position(self):
"""Grabs the player's last known location from the report set."""
try:
last_filed = self.report_set.filter(zombies_only=False)
last_filed = last_filed.order_by('-reported_date')[0]
except IndexError:
last_filed = None
try:
last_spotted = self.reported_at.order_by('-reported_date')[0]
except IndexError:
last_spotted = None
if last_filed is None and last_spotted is None:
return u"Never seen"
else:
if last_filed is None:
return last_spotted
elif last_spotted is None:
return last_filed
else:
if last_filed.reported_date >= last_spotted.reported_date:
return last_filed
else:
return last_spotted
def __unicode__(self):
return self.name
| 2.375 | 2 |
prog/stepik_course/lab_2/task5.py | phen0menon/university-tasks | 0 | 12796409 | from collections import Counter
def find_occurrencies():
text = str(input())
num_inputs = input().split(" ")
k = int(num_inputs[0])
L = int(num_inputs[1])
t = int(num_inputs[2])
curr_pos = 0
occur = set()
for curr_pos, i in enumerate(range(len(text) - L), 0):
curr_str = text[curr_pos:L + curr_pos + 1]
words = Counter(curr_str[i:i+k] for i in range(len(curr_str) - k + 1)).most_common()
occur.update({word[0] for word in words if word[1] == t})
print(" ".join(sorted(list(occur))))
if __name__ == "__main__":
find_occurrencies()
| 3.953125 | 4 |
hashdist/spec/hook_api.py | krafczyk/hashdist | 67 | 12796410 | <filename>hashdist/spec/hook_api.py
"""
The API exported to Python hook files that are part of stack descriptions.
A significant portion of the package building logic should eventually find
its way into here.
Hook files are re-loaded for every package build, and so decorators etc.
are run again. The machinery used to HashDist to load hook files is
found in .hook.
"""
import types
from .utils import substitute_profile_parameters
from .exceptions import ProfileError, IllegalHookFileError
class PackageBuildContext(object):
def __init__(self, package_name, dependency_dir_vars, parameters):
import hook
self._build_stage_handlers = {'bash': hook.bash_handler}
self._modules = []
self._bundled_files = {}
# Available in API
self.package_name = package_name
self.parameters = dict(parameters)
self.dependency_dir_vars = list(dependency_dir_vars)
def register_build_stage_handler(self, handler_name, handler_func):
"""
Registers a function as a handler for a given stage handler type.
"""
self._build_stage_handlers[handler_name] = handler_func
def register_module(self, mod):
"""
Hold a reference to the registered module; this is necesary to avoid
them getting deallocated under our feet, as we don't allow them to live
in sys.modules.
"""
self._modules.append(mod)
def dispatch_build_stage(self, stage):
# Copy stage dict and substitute all string arguments
stage = self.deep_sub(stage)
handler = stage['handler']
if handler not in self._build_stage_handlers:
raise ProfileError(stage, 'build stage handler "%s" not registered' % handler)
return self._build_stage_handlers[handler](self, stage)
def sub(self, s):
"""
Substitute ``{{var}}`` in `s` with variables from `self.parameters` in `s`,
and return resulting string.
"""
return substitute_profile_parameters(s, self.parameters)
def deep_sub(self, doc):
"""
Recursively walk the document `doc`, and for all non-key strings, make
a substitution as described in `sub`. A deep copy is returned.
"""
if isinstance(doc, dict):
return dict((key, self.deep_sub(value)) for key, value in doc.iteritems())
elif isinstance(doc, (list, tuple)):
return [self.deep_sub(item) for item in doc]
elif isinstance(doc, basestring):
return self.sub(doc)
elif isinstance(doc, (int, bool, float, types.NoneType)):
return doc
elif (not doc):
return None
else:
raise TypeError("unexpected item in documents of type %r: %s" % (type(doc), doc))
def bundle_file(self, filename, target_name=None):
"""
Makes sure that a file located in the same directory as the
package spec YAML-file can be found in the ``_hastdist``
sub-directory of the build directory during the build.
"""
if target_name is None:
target_name = filename
self._bundled_files[target_name] = filename
def build_stage(handler_name=None):
"""
Decorator used to register a function as a handler generating the
code for a given build stage.
Parameters
----------
handler_name : str (optional)
Name of the handler, defaults to the name of the function.
"""
def decorator(func):
handler_name_ = handler_name
if handler_name_ is None:
handler_name_ = func.__name__
import hook
hook.current_package_context.register_build_stage_handler(handler_name_, func)
return func
return decorator
| 2.359375 | 2 |
COT/vm_description/ovf/hardware.py | morneaup/cot | 81 | 12796411 | #!/usr/bin/env python
#
# hardware.py - OVFHardware class
#
# June 2016, <NAME>
# Copyright (c) 2013-2016, 2019 the COT project developers.
# See the COPYRIGHT.txt file at the top-level directory of this distribution
# and at https://github.com/glennmatthews/cot/blob/master/COPYRIGHT.txt.
#
# This file is part of the Common OVF Tool (COT) project.
# It is subject to the license terms in the LICENSE.txt file found in the
# top-level directory of this distribution and at
# https://github.com/glennmatthews/cot/blob/master/LICENSE.txt. No part
# of COT, including this file, may be copied, modified, propagated, or
# distributed except according to the terms contained in the LICENSE.txt file.
"""Representation of OVF hardware definitions.
**Classes and Exceptions**
.. autosummary::
:nosignatures:
OVFHardware
OVFHardwareDataError
"""
import copy
import logging
from COT.data_validation import natural_sort
from COT.xml_file import XML
from .item import OVFItem, OVFItemDataError
logger = logging.getLogger(__name__)
class OVFHardwareDataError(Exception):
"""The input data used to construct an :class:`OVFHardware` is not sane."""
class OVFHardware(object):
"""Helper class for :class:`~COT.vm_description.ovf.ovf.OVF`.
Represents all hardware items defined by this OVF;
i.e., the contents of all Items in the VirtualHardwareSection.
Fundamentally it's just a dict of
:class:`~COT.vm_description.ovf.item.OVFItem` objects
with a bunch of helper methods.
"""
def __init__(self, ovf):
"""Construct an OVFHardware object describing all Items in the OVF.
Args:
ovf (OVF): OVF instance to extract hardware information from.
Raises:
OVFHardwareDataError: if any data errors are seen
"""
self.ovf = ovf
self.item_dict = {}
valid_profiles = set(ovf.config_profiles)
item_count = 0
for item in ovf.virtual_hw_section:
namespace = ovf.namespace_for_item_tag(item.tag)
if not namespace:
continue
item_count += 1
# We index the dict by InstanceID as it's the one property of
# an Item that uniquely identifies this set of hardware items.
instance = item.find(namespace + self.ovf.INSTANCE_ID).text
# Pre-sanity check - are all of the profiles associated with this
# item properly defined in the OVF DeploymentOptionSection?
item_profiles = set(item.get(self.ovf.ITEM_CONFIG, "").split())
unknown_profiles = item_profiles - valid_profiles
if unknown_profiles:
raise OVFHardwareDataError("Unknown profile(s) {0} for "
"Item instance {1}"
.format(unknown_profiles, instance))
if instance not in self.item_dict:
self.item_dict[instance] = OVFItem(self.ovf, item)
else:
try:
self.item_dict[instance].add_item(item)
except OVFItemDataError as exc:
logger.debug(exc)
# Mask away the nitty-gritty details from our caller
raise OVFHardwareDataError("Data conflict for instance {0}"
.format(instance))
logger.debug(
"OVF contains %s hardware Item elements describing %s "
"unique devices", item_count, len(self.item_dict))
# Treat the current state as golden:
for ovfitem in self.item_dict.values():
ovfitem.modified = False
def update_xml(self):
"""Regenerate all Items under the VirtualHardwareSection, if needed.
Will do nothing if no Items have been changed.
"""
modified = False
if len(self.item_dict) != len(XML.find_all_children(
self.ovf.virtual_hw_section,
set([self.ovf.ITEM, self.ovf.STORAGE_ITEM,
self.ovf.ETHERNET_PORT_ITEM]))):
modified = True
else:
for ovfitem in self.item_dict.values():
if ovfitem.modified:
modified = True
break
if not modified:
logger.verbose("No changes to hardware definition, "
"so no XML update is required")
return
# Delete the existing Items:
delete_count = 0
for item in list(self.ovf.virtual_hw_section):
if (item.tag == self.ovf.ITEM or
item.tag == self.ovf.STORAGE_ITEM or
item.tag == self.ovf.ETHERNET_PORT_ITEM):
self.ovf.virtual_hw_section.remove(item)
delete_count += 1
logger.debug("Cleared %d existing items from VirtualHWSection",
delete_count)
# Generate the new XML Items, in appropriately sorted order by Instance
ordering = [self.ovf.INFO, self.ovf.SYSTEM, self.ovf.ITEM]
for instance in natural_sort(self.item_dict):
logger.debug("Writing Item(s) with InstanceID %s", instance)
ovfitem = self.item_dict[instance]
new_items = ovfitem.generate_items()
logger.spam("Generated %d items", len(new_items))
for item in new_items:
XML.add_child(self.ovf.virtual_hw_section, item, ordering)
logger.verbose("Updated XML VirtualHardwareSection, now contains %d "
"Items representing %d devices",
len(self.ovf.virtual_hw_section.findall(self.ovf.ITEM)),
len(self.item_dict))
def find_unused_instance_id(self, start=1):
"""Find the first available ``InstanceID`` number.
Args:
start (int): First InstanceID value to consider (disregarding all
lower InstanceIDs, even if available).
Returns:
str: An instance ID that is not yet in use.
"""
instance = int(start)
while str(instance) in self.item_dict.keys():
instance += 1
logger.debug("Found unused InstanceID %d", instance)
return str(instance)
def new_item(self, resource_type, profile_list=None):
"""Create a new OVFItem of the given type.
Args:
resource_type (str): String such as 'cpu' or 'harddisk' - used as
a key to
:data:`~COT.vm_description.ovf.name_helper.OVFNameHelper1.RES_MAP`
profile_list (list): Profiles the new item should belong to
Returns:
tuple: ``(instance_id, ovfitem)``
"""
instance = self.find_unused_instance_id()
ovfitem = OVFItem(self.ovf)
ovfitem.set_property(self.ovf.INSTANCE_ID, instance, profile_list)
ovfitem.set_property(self.ovf.RESOURCE_TYPE,
self.ovf.RES_MAP[resource_type],
profile_list)
# ovftool freaks out if we leave out the ElementName on an Item,
# so provide a simple default value.
ovfitem.set_property(self.ovf.ELEMENT_NAME, resource_type,
profile_list)
self.item_dict[instance] = ovfitem
ovfitem.modified = True
logger.info("Created new %s under profile(s) %s, InstanceID is %s",
resource_type, profile_list, instance)
return (instance, ovfitem)
def delete_item(self, item):
"""Delete the given Item from the hardware.
Args:
item (OVFItem): Item to delete
"""
instance = item.get_value(self.ovf.INSTANCE_ID)
if self.item_dict[instance] == item:
del self.item_dict[instance]
# TODO: error handling - currently a no-op if item not in item_dict
def clone_item(self, parent_item, profile_list):
"""Clone an OVFItem to create a new instance.
Args:
parent_item (OVFItem): Instance to clone from
profile_list (list): List of profiles to clone into
Returns:
tuple: ``(instance_id, ovfitem)``
"""
instance = self.find_unused_instance_id(start=parent_item.instance_id)
logger.spam("Cloning existing Item %s with new instance ID %s",
parent_item, instance)
ovfitem = copy.deepcopy(parent_item)
# Delete any profiles from the parent that we don't need now,
# otherwise we'll get an error when trying to set the instance ID
# on our clone due to self-inconsistency (#64).
for profile in self.ovf.config_profiles:
if ovfitem.has_profile(profile) and profile not in profile_list:
ovfitem.remove_profile(profile)
ovfitem.set_property(self.ovf.INSTANCE_ID, instance, profile_list)
ovfitem.modified = True
self.item_dict[instance] = ovfitem
logger.spam("Added clone of %s under %s, instance is %s",
parent_item, profile_list, instance)
return (instance, ovfitem)
def item_match(self, item, resource_type, properties, profile_list):
"""Check whether the given item matches the given filters.
Args:
item (OVFItem): Item to validate
resource_type (str): Resource type string like 'scsi' or 'serial'
properties (dict): Properties and their values to match
profile_list (list): List of profiles to filter on
Returns:
bool: True if the item matches all filters, False if not.
"""
if resource_type and (self.ovf.RES_MAP[resource_type] !=
item.get_value(self.ovf.RESOURCE_TYPE)):
return False
if profile_list:
for profile in profile_list:
if not item.has_profile(profile):
return False
for (prop, value) in properties.items():
if item.get_value(prop) != value:
return False
return True
def find_all_items(self, resource_type=None, properties=None,
profile_list=None):
"""Find all items matching the given type, properties, and profiles.
Args:
resource_type (str): Resource type string like 'scsi' or 'serial'
properties (dict): Properties and their values to match
profile_list (list): List of profiles to filter on
Returns:
list: Matching OVFItem instances
"""
items = [self.item_dict[instance] for instance in
natural_sort(self.item_dict)]
filtered_items = []
if properties is None:
properties = {}
for item in items:
if self.item_match(item, resource_type, properties, profile_list):
filtered_items.append(item)
logger.spam("Found %s Items of type %s with properties %s and"
" profiles %s", len(filtered_items), resource_type,
properties, profile_list)
return filtered_items
def find_item(self, resource_type=None, properties=None, profile=None):
"""Find the only OVFItem of the given :attr:`resource_type`.
Args:
resource_type (str): Resource type string like 'scsi' or 'serial'
properties (dict): Properties and their values to match
profile (str): Single profile ID to search within
Returns:
OVFItem: Matching instance, or None
Raises:
LookupError: if more than one such Item exists.
"""
matches = self.find_all_items(resource_type, properties, [profile])
if len(matches) > 1:
raise LookupError(
"Found multiple matching '{0}' Items (instances {1})"
.format(resource_type, [m.instance_id for m in matches]))
elif len(matches) == 0:
return None
else:
return matches[0]
def get_item_count(self, resource_type, profile):
"""Get the number of Items of the given type for the given profile.
Wrapper for :meth:`get_item_count_per_profile`.
Args:
resource_type (str): Resource type string like 'scsi' or 'serial'
profile (str): Single profile identifier string to look up.
Returns:
int: Number of items of this type in this profile.
"""
return (self.get_item_count_per_profile(resource_type, [profile])
[profile])
def get_item_count_per_profile(self, resource_type, profile_list):
"""Get the number of Items of the given type per profile.
Items present under "no profile" will be counted against
the total for each profile.
Args:
resource_type (str): Resource type string like 'scsi' or 'serial'
profile_list (list): List of profiles to filter on
(default: apply across all profiles)
Returns:
dict: mapping profile strings to the number of items under each
profile.
"""
count_dict = {}
if not profile_list:
# Get the count under all profiles
profile_list = self.ovf.config_profiles + [None]
for profile in profile_list:
count_dict[profile] = 0
for ovfitem in self.find_all_items(resource_type):
for profile in profile_list:
if ovfitem.has_profile(profile):
count_dict[profile] += 1
for (profile, count) in count_dict.items():
logger.spam("Profile '%s' has %s %s Item(s)",
profile, count, resource_type)
return count_dict
def _update_existing_item_profiles(self, resource_type,
count, profile_list):
"""Change profile membership of existing items as needed.
Helper method for :meth:`set_item_count_per_profile`.
Args:
resource_type (str): 'cpu', 'harddisk', etc.
count (int): Desired number of items
profile_list (list): List of profiles to filter on
(default: apply across all profiles)
Returns:
tuple: (count_dict, items_to_add, last_item)
"""
count_dict = self.get_item_count_per_profile(resource_type,
profile_list)
items_seen = dict.fromkeys(profile_list, 0)
last_item = None
# First, iterate over existing Items.
# Once we've seen "count" items under a profile, remove all subsequent
# items from this profile.
# If we don't have enough items under a profile, add any items found
# under other profiles to this profile as well.
for ovfitem in self.find_all_items(resource_type):
last_item = ovfitem
for profile in profile_list:
if ovfitem.has_profile(profile):
if items_seen[profile] >= count:
# Too many items - remove this one!
ovfitem.remove_profile(profile)
else:
items_seen[profile] += 1
else:
if count_dict[profile] < count:
# Add this profile to this Item
ovfitem.add_profile(profile)
count_dict[profile] += 1
items_seen[profile] += 1
# How many new Items do we need to create in total?
items_to_add = 0
for profile in profile_list:
delta = count - items_seen[profile]
if delta > items_to_add:
items_to_add = delta
return count_dict, items_to_add, last_item
def _update_cloned_item(self, new_item, new_item_profiles, item_count):
"""Update a cloned item to make it distinct from its parent.
Helper method for :meth:`set_item_count_per_profile`.
Args:
new_item (OVFItem): Newly cloned Item
new_item_profiles (list): Profiles new_item should belong to
item_count (int): How many Items of this type (including this
item) now exist. Used with
:meth:`COT.platform.Platform.guess_nic_name`
Returns:
OVFItem: Updated :param:`new_item`
Raises:
NotImplementedError: No support yet for updating ``Address``
NotImplementedError: If updating ``AddressOnParent`` but the
prior value varies across config profiles.
NotImplementedError: if ``AddressOnParent`` is not an integer.
"""
resource_type = new_item.hardware_type
address = new_item.get(self.ovf.ADDRESS)
if address:
raise NotImplementedError("Don't know how to ensure a unique "
"Address value when cloning an Item "
"of type {0}".format(resource_type))
address_on_parent = new_item.get(self.ovf.ADDRESS_ON_PARENT)
if address_on_parent:
address_list = new_item.get_all_values(self.ovf.ADDRESS_ON_PARENT)
if len(address_list) > 1:
raise NotImplementedError("AddressOnParent is not common "
"across all profiles but has "
"multiple values {0}. COT can't "
"handle this yet."
.format(address_list))
address_on_parent = address_list[0]
# Currently we only handle integer addresses
try:
address_on_parent = int(address_on_parent)
address_on_parent += 1
new_item.set_property(self.ovf.ADDRESS_ON_PARENT,
str(address_on_parent),
new_item_profiles)
except ValueError:
raise NotImplementedError("Don't know how to ensure a "
"unique AddressOnParent value "
"given base value '{0}'"
.format(address_on_parent))
if resource_type == 'ethernet':
# Update ElementName to reflect the NIC number
element_name = self.ovf.platform.guess_nic_name(item_count)
new_item.set_property(self.ovf.ELEMENT_NAME, element_name,
new_item_profiles)
return new_item
def set_item_count_per_profile(self, resource_type, count, profile_list):
"""Set the number of items of a given type under the given profile(s).
If the new count is greater than the current count under this
profile, then additional instances that already exist under
another profile will be added to this profile, starting with
the lowest-sequence instance not already present, and only as
a last resort will new instances be created.
If the new count is less than the current count under this profile,
then the highest-numbered instances will be removed preferentially.
Args:
resource_type (str): 'cpu', 'harddisk', etc.
count (int): Desired number of items
profile_list (list): List of profiles to filter on
(default: apply across all profiles)
"""
if not profile_list:
# Set the profile list for all profiles, including the default
profile_list = self.ovf.config_profiles + [None]
count_dict, items_to_add, last_item = \
self._update_existing_item_profiles(
resource_type, count, profile_list)
logger.debug("Creating %d new items", items_to_add)
while items_to_add > 0:
# Which profiles does this Item need to belong to?
new_item_profiles = []
for profile in profile_list:
if count_dict[profile] < count:
new_item_profiles.append(profile)
count_dict[profile] += 1
if last_item is None:
logger.notice("No existing items of type %s found. "
"Will create new %s from scratch.",
resource_type, resource_type)
(_, new_item) = self.new_item(resource_type, new_item_profiles)
else:
(_, new_item) = self.clone_item(last_item, new_item_profiles)
# Check/update other properties of the clone that should be unique:
# TODO - we assume that the count is the same across profiles
new_item = self._update_cloned_item(
new_item, new_item_profiles, count_dict[new_item_profiles[0]])
last_item = new_item
items_to_add -= 1
def set_value_for_all_items(self, resource_type, prop_name, new_value,
profile_list, create_new=False):
"""Set a property to the given value for all items of the given type.
If no items of the given type exist, will create a new ``Item`` if
:attr:`create_new` is set to ``True``; otherwise will log a warning
and do nothing.
Args:
resource_type (str): Resource type such as 'cpu' or 'harddisk'
prop_name (str): Property name to update
new_value (str): New value to set the property to
profile_list (list): List of profiles to filter on
(default: apply across all profiles)
create_new (bool): Whether to create a new entry if no items
of this :attr:`resource_type` presently exist.
"""
ovfitem_list = self.find_all_items(resource_type)
if not ovfitem_list:
if not create_new:
logger.warning("No items of type %s found. Nothing to do.",
resource_type)
return
logger.notice("No existing items of type %s found. "
"Will create new %s from scratch.",
resource_type, resource_type)
(_, ovfitem) = self.new_item(resource_type, profile_list)
ovfitem_list = [ovfitem]
for ovfitem in ovfitem_list:
ovfitem.set_property(prop_name, new_value, profile_list)
logger.debug("Updated %s %s to %s under profiles %s",
resource_type, prop_name, new_value, profile_list)
def set_item_values_per_profile(self, resource_type, prop_name, value_list,
profile_list, default=None):
"""Set value(s) for a property of multiple items of a type.
Args:
resource_type (str): Device type such as 'harddisk' or 'cpu'
prop_name (str): Property name to update
value_list (list): List of values to set (one value per item of the
given :attr:`resource_type`)
profile_list (list): List of profiles to filter on
(default: apply across all profiles)
default (str): If there are more matching items than entries in
:attr:`value_list`, set extra items to this value
"""
if profile_list is None:
profile_list = self.ovf.config_profiles + [None]
for ovfitem in self.find_all_items(resource_type):
if len(value_list):
new_value = value_list.pop(0)
else:
new_value = default
for profile in profile_list:
if ovfitem.has_profile(profile):
ovfitem.set_property(prop_name, new_value, [profile])
logger.info("Updated %s property %s to %s under %s",
resource_type, prop_name, new_value, profile_list)
if len(value_list):
logger.warning("After scanning all known %s Items, not all "
"%s values were used - leftover %s",
resource_type, prop_name, value_list)
| 2.328125 | 2 |
myjson.py | phattarin-kitbumrung/basic-python | 0 | 12796412 | import json
#Convert from JSON to Python
# some JSON:
x = '{ "name":"John", "age":30, "city":"New York"}'
# parse x:
y = json.loads(x)
# the result is a Python dictionary:
print(y["name"])
#Convert from Python to JSON
# a Python object (dict):
x = {
"name": "John",
"age": 30,
"city": "New York"
}
# convert into JSON:
y = json.dumps(x)
# the result is a JSON string:
print(y)
#Convert a Python object containing all the legal data types:
x = {
"name": "John",
"age": 30,
"married": True,
"divorced": False,
"children": ("Ann","Billy"),
"pets": None,
"cars": [
{"model": "BMW 230", "mpg": 27.5},
{"model": "Ford Edge", "mpg": 24.1}
]
}
print(json.dumps(x))
#Use the indent parameter to define the numbers of indents:
#Use the separators parameter change the default separator:
y = json.dumps(x, indent=4, separators=(". ", " = "))
#Use the sort_keys parameter to specify if the result should be sorted or not:
z = json.dumps(x, indent=4, sort_keys=True)
print(y)
print(z) | 3.9375 | 4 |
clispy/macro/system_macro.py | takahish/lispy | 4 | 12796413 | <filename>clispy/macro/system_macro.py
# Copyright 2019 <NAME>. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from clispy.macro import Macro
from clispy.package import assign_helper, use_package_helper
from clispy.type import Cons, Null, Symbol
# ==============================================================================
# Defines base classes.
#
# SystemMacro
# ==============================================================================
class SystemMacro(Macro):
"""SystemMacro provide some macros for defmacro, defun, and lambda etc.
"""
def __new__(cls, *args, **kwargs):
"""Instantiates SystemMacro.
"""
cls.__name__ = 'SYSTEM-MACRO'
return object.__new__(cls)
def __repr__(self):
"""The official string representation.
"""
return "#<SYSTEM-MACRO {0} {{{1:X}}}>".format(self.__class__.__name__, id(self))
# ==============================================================================
# Defines system macro classes.
# ==============================================================================
class BlockSystemMacro(SystemMacro):
"""block establishes a block and then evaluates forms as an implicit progn.
"""
def __new__(cls, *args, **kwargs):
"""Instantiates BlockSystemMacro.
"""
cls.__name__ = 'BLOCK'
return object.__new__(cls)
def __call__(self, forms, var_env, func_env, macro_env):
"""Behavior of BlockSystemMacro.
"""
from clispy.expander import Expander
name, body = forms.car, forms.cdr
# Expands body recursively.
body = Expander.expand(body, var_env, func_env, macro_env)
# The body of a block has an implicit progn.
forms = Cons(Symbol('BLOCK'), Cons(name, Cons(Cons(Symbol('PROGN'), body), Null())))
return forms
class FletSystemMacro(SystemMacro):
"""flet, labels, and macrolet define local functions and macros, and execute
forms using the local definitions. forms are executed in order of occurence.
The body forms (but not the lambda list) of each function created by flet
and labels and each macro created by macrolet are enclosed in an implicit
block whose name is the function block name of the function-name or name,
as appropriate.
flet defines locally named functions and executes a series of forms with
these definition bindings. Any number of such local functions can be defined.
"""
def __new__(cls, *args, **kwargs):
"""Instantiates FletSystemMacro.
"""
cls.__name__ = 'FLET'
return object.__new__(cls)
def __call__(self, forms, var_env, func_env, macro_env):
"""Behavior of FletSystemMacro.
"""
from clispy.expander import Expander
bindings, body = forms.car, forms.cdr
# Expands body recursively.
body = Expander.expand(body, var_env, macro_env, macro_env)
# The body of flet has an implicit progn.
forms = Cons(Symbol('FLET'), Cons(bindings, Cons(Cons(Symbol('PROGN'), body), Null())))
return forms
class IfSystemMacro(SystemMacro):
"""if allows the execution of a form to be dependent on a single test-form.
First test-form is evaluated. If the result is true, then then-form is selected;
otherwise else-form is selected. Whichever form is selected is then evaluated.
"""
def __new__(cls, *args, **kwargs):
"""Instantiates IfSystemMacro.
"""
cls.__name__ = 'IF'
return object.__new__(cls)
def __call__(self, forms, var_env, func_env, macro_env):
"""Behavior of IfSystemMacro.
"""
from clispy.expander import Expander
# If else_form is Null, then else_form is set to Null.
test_form, then_form, else_form = forms.car, forms.cdr.car, forms.cdr.cdr.car
# Expands body recursively.
test_form = Expander.expand(test_form, var_env, func_env, macro_env)
then_form = Expander.expand(then_form, var_env, func_env, macro_env)
else_form = Expander.expand(else_form, var_env, func_env, macro_env)
forms = Cons(Symbol('IF'), Cons(test_form, Cons(then_form, Cons(else_form, Null()))))
return forms
class LabelsSystemMacro(SystemMacro):
"""flet, labels, and macrolet define local functions and macros, and execute
forms using the local definitions. forms are executed in order of occurence.
The body forms (but not the lambda list) of each function created by flet
and labels and each macro created by macrolet are enclosed in an implicit
block whose name is the function block name of the function-name or name,
as appropriate.
labels is equivalent to flet except that the scope of the defined function
names for labels encompasses the function definitions themselves as well
as the body.
"""
def __new__(cls, *args, **kwargs):
"""Instantiates LabelsSystemMacro.
"""
cls.__name__ = 'LABELS'
return object.__new__(cls)
def __call__(self, forms, var_env, func_env, macro_env):
"""Behavior of LablesSystemMacro.
"""
from clispy.expander import Expander
bindings, body = forms.car, forms.cdr
# Expands body recursively.
body = Expander.expand(body, var_env, func_env, macro_env)
# The body of labels has an implicit progn.
forms = Cons(Symbol('LABELS'), Cons(bindings, Cons(Cons(Symbol('PROGN'), body), Null())))
return forms
class LetSystemMacro(SystemMacro):
"""let and let* create new variable bindings and execute a series of forms
that use these bindings. let performs the bindings in parallel and let* does
them sequentially.
"""
def __new__(cls, *args, **kwargs):
"""Instantiates LetSystemMacro.
"""
cls.__name__ = 'LET'
return object.__new__(cls)
def __call__(self, forms, var_env, func_env, macro_env):
"""Behavior of LetSystemMacro.
"""
from clispy.expander import Expander
bindings, body = forms.car, forms.cdr
# Expands body recursively.
body = Expander.expand(body, var_env, func_env, macro_env)
# The body of let has an implicit progn.
forms = Cons(Symbol('LET'), Cons(bindings, Cons(Cons(Symbol('PROGN'), body), Null())))
return forms
class LetAsterSystemMacro(SystemMacro):
"""let and let* create new variable bindings and execute a series of forms
that use these bindings. let performs the bindings in parallel and let* does
them sequentially.
"""
def __new__(cls, *args, **kwargs):
"""Instantiates LetAsterSytemMacro.
"""
cls.__name__ = 'LET*'
return object.__new__(cls)
def __call__(self, forms, var_env, func_env, macro_env):
"""Behavior of LetAsterSystemMacro.
"""
from clispy.expander import Expander
bindings, body = forms.car, forms.cdr
# Expands body recursively.
body = Expander.expand(body, var_env, func_env, macro_env)
# The body of let* has an implicit progn
forms = Cons(Symbol('LET*'), Cons(bindings, Cons(Cons(Symbol('PROGN'), body), Null())))
return forms
class QuoteSystemMacro(SystemMacro):
"""The quote special operator just returns object.
"""
def __new__(cls, *args, **kwargs):
cls.__name__ = 'QUOTE'
return object.__new__(cls)
def __call__(self, forms, var_env, func_env, macro_env):
"""Behavior of QuoteSystemMacro.
"""
# Retruns itself.
return Cons(Symbol('QUOTE'), forms)
class LambdaSystemMacro(SystemMacro):
"""Provides a shorthand notation for a function special form involving a lambda expression.
"""
def __new__(cls, *args, **kwargs):
"""Instantiates LambdaSystemMacro.
"""
cls.__name__ = 'LAMBDA'
return object.__new__(cls)
def __call__(self, forms, var_env, func_env, macro_env):
"""Behavior of LambdaSystemMacro.
"""
from clispy.expander import Expander
params, body = forms.car, forms.cdr
# Expands body recursively.
body = Expander.expand(body, var_env, func_env, macro_env)
# The body of a lambda has an implicit progn.
forms = Cons(Symbol('LAMBDA'), Cons(params, Cons(Cons(Symbol('PROGN'), body), Null())))
return forms
class DefunSystemMacro(SystemMacro):
"""defun implicitly puts a block named block-name around the body forms
"""
def __new__(cls, *args, **kwargs):
"""Instantiates DefunSystemMacro.
"""
cls.__name__ = 'DEFUN'
return object.__new__(cls)
def __call__(self, forms, var_env, func_env, macro_env):
"""Behavior of DefunSystemMacro.
"""
from clispy.expander import Expander
name, params, body = forms.car, forms.cdr.car, forms.cdr.cdr
# Expands body, recursively.
body = Expander.expand(body, var_env, func_env, macro_env)
# The body of a defun has an implicit progn.
body = Cons(Cons(Symbol('PROGN'), body), Null())
# The body of a defun has an implicit block.
forms = Cons(Symbol('DEFUN'), Cons(name, Cons(params, Cons(Cons(Symbol('BLOCK'), Cons(name, body)), Null()))))
return forms
class DefmacroSystemMacro(SystemMacro):
"""Defines name as a macro by associating a macro function with that
name in the global environment. The macro function is defined in
the same lexical environment in which the defmacro form appears.
The expansion function accepts two arguments, a form and an
environment. The expansion function returns a form. The body of
the expansion function is specified by forms. Forms are executed
in order. The value of the last form executed is returned as the
expansion of the macro. The body forms of the expansion function
(but not the lambda-list) are implicitly enclosed in a block whose
name is name.
"""
def __new__(cls, *args, **kwargs):
"""Instantiates DefmacroSystemMacro.
"""
cls.__name__ = 'DEFMACRO'
return object.__new__(cls)
def __call__(self, forms, var_env, func_env, macro_env):
"""Behavior of DefmacroSystemMacro.
"""
from clispy.expander import Expander
name, params, body = forms.car, forms.cdr.car, forms.cdr.cdr
# Expands body, recursively.
body = Expander.expand(body, var_env, func_env, macro_env)
# The value of the last form executed is returned as the expansion of the macro.
body = Cons(Cons(Symbol('PROGN'), body), Null())
# The body of a defmacro has an implicit block.
forms = Cons(Symbol('DEFMACRO'), Cons(name, Cons(params, Cons(Cons(Symbol('BLOCK'), Cons(name, body)), Null()))))
return forms
class BackquoteSystemMacro(SystemMacro):
"""The backquote introduces a template of a data structure to be built.
"""
def __new__(cls, *args, **kwargs):
"""Instantiates BackquoteSystemMacro.
"""
cls.__name__ = 'BACKQUOTE'
return object.__new__(cls)
def __call__(self, forms, var_env, func_env, macro_env):
"""Behavior of BackquoteSystemMacro.
"""
return self.expand_hepler(forms.car)
@classmethod
def expand_hepler(cls, forms):
"""Expand quotes recursively.
"""
if not isinstance(forms, Cons): # An argument is not an instance of Cons, it is quoted.
return Cons(Symbol('QUOTE'), Cons(forms, Null()))
if forms.car is Symbol('UNQUOTE'): # Unquote (,).
return forms.cdr.car
elif isinstance(forms.car, Cons) and forms.car.car is Symbol('UNQUOTE-SPLICING'): # Unquote-splicing (,@).
return Cons(Symbol('APPEND'), Cons(forms.car.cdr.car, Cons(cls.expand_hepler(forms.cdr), Null())))
else: # Expands recursively and returns cons.
return Cons(Symbol('CONS'), Cons(cls.expand_hepler(forms.car), Cons(cls.expand_hepler(forms.cdr), Null())))
# ==============================================================================
# Set functions related on special operators
# ==============================================================================
# For special operators
assign_helper(symbol_name='BLOCK', value=BlockSystemMacro(), package_name='COMMON-LISP', env='MACRO', status=':EXTERNAL')
assign_helper(symbol_name='FLET', value=FletSystemMacro(), package_name='COMMON-LISP', env='MACRO', status=':EXTERNAL')
assign_helper(symbol_name='IF', value=IfSystemMacro(), package_name='COMMON-LISP', env='MACRO', status=':EXTERNAL')
assign_helper(symbol_name='LABELS', value=LabelsSystemMacro(), package_name='COMMON-LISP', env='MACRO', status=':EXTERNAL')
assign_helper(symbol_name='LET', value=LetSystemMacro(), package_name='COMMON-LISP', env='MACRO', status=':EXTERNAL')
assign_helper(symbol_name='LET*', value=LetAsterSystemMacro(), package_name='COMMON-LISP', env='MACRO', status=':EXTERNAL')
assign_helper(symbol_name='QUOTE', value=QuoteSystemMacro(), package_name='COMMON-LISP', env='MACRO', status='EXTERNAL')
# For system functions
assign_helper(symbol_name='LAMBDA', value=LambdaSystemMacro(), package_name='COMMON-LISP', env='MACRO', status=':EXTERNAL')
assign_helper(symbol_name='DEFUN', value=DefunSystemMacro(), package_name='COMMON-LISP', env='MACRO', status=':EXTERNAL')
assign_helper(symbol_name='DEFMACRO', value=DefmacroSystemMacro(), package_name='COMMON-LISP', env='MACRO', status=':EXTERNAL')
assign_helper(symbol_name='BACKQUOTE', value=BackquoteSystemMacro(), package_name='COMMON-LISP', env='MACRO', status=':EXTERNAL')
# COMMON-LISP-USER package
use_package_helper(package_name_to_use='COMMON-LISP', package_name='COMMON-LISP-USER') | 1.929688 | 2 |
examples/calendar_widget.py | whitegreyblack/PyWin | 0 | 12796414 | """Calendar_widget.py"""
import re
import curses
import random
import calendar
import itertools
import source.config as config
from collections import namedtuple
date = namedtuple("Date", "Year Month Day")
def iter_months_years(startDate: object, endDate: object) -> tuple:
"""Returns years and months based on given start and end dates. Expected
date format is YYYY-MM-DD. Ex. 2012-07-15
"""
# TODO: Make the function an iterable
months = []
# begin with all years between start and end date
years = [year for year in range(startDate.Year, endDate.Year + 1)]
if len(years) > 1:
# covering more than a single year, find the months being used
for year in range(len(years)):
monthsRange = (1, 13) # normal year covers between months 1-12
if year == 0:
monthsRange = (startDate.Month, 13) # first year in list
elif year == len(years) - 1:
monthsRange = (1, endDate.Month + 1) # last year in list
months.append([month for month in range(*monthsRange)])
else:
# dates are in the same year. grab the months between the dates
months.append([i for i in range(startDate.Month, endDate.Month + 1)])
# return [(year, m) for year, month in zip(years, months) for m in month]
for year, month in zip(years, months):
for m in month:
yield (year, m)
def days_in_month_year(startDate, endDate):
"""Returns the day/date tuple combination for each month/year input passed
into the calendar.TextCalendar class method months2calendar(year, month).
Differences in TextCalendar methods (W => number of weeks in the month):
monthdatescalendar -> returns Wx7 matrix of datetime objects
monthdays2calendar -> returns Wx7 matrix of tuple objects (date, day)
monthdayscalendar -> returns Wx7 matrix of ints representing the date
"""
# setup calendar settings to retrieve dates based on year/month pairs
tc = calendar.TextCalendar()
tc.setfirstweekday(6) # set to sunday as first day
days_per_monthyear = dict()
for year, month in iter_months_years(startDate, endDate):
days_per_monthyear[(year, month)] = tc.monthdays2calendar(year, month)
return days_per_monthyear
def parse_date(datestring: str) -> object:
"""Takes in a string object representing a formatted date. If not
formatted correctly, will raise an error giving description of the correct
format. Returns a date object with year, month, date properties
"""
if not re.match(config.DATE_FORMAT_REGEX, datestring):
error = f"{config.DATE_FORMAT_INVALID} {config.DATE_FORMAT_EXPECTED}"
raise ValueError(error)
return date(*[int(i) for i in datestring.split('-')])
def initialize_curses_settings():
"""Curses settings that need to be called before the rest of program"""
curses.curs_set(0)
def main(window):
"""Creates a navigatable calendar widget for the dates passed in. Later on
should use min/max dates from the database holding the date infos.
"""
initialize_curses_settings()
loc = 0
# dateParser(db.getMinDate, db.getMaxDate)
start = parse_date("2017-12-1")
end = parse_date("2018-2-1")
# we should now have a list of lists matrix holding weeks per month/year
monthtable = days_in_month_year(start, end)
window.border()
y, x = window.getmaxyx()
window.vline(1, 8, curses.ACS_VLINE, y - 2)
verticaloffset = 2
horizontaloffset = 1
window.addstr(1, 1, "SMTWTFS")
for month in monthtable.values():
for week in month:
window.addstr(verticaloffset, horizontaloffset + 9, str(week))
weekdayindex = 0
for date, dayofweek in week:
if (date) != 0:
window.addstr(verticaloffset,
horizontaloffset + weekdayindex,
'o')
weekdayindex += 1
verticaloffset += 1
ch = window.getch()
print(ch, curses.KEY_PPAGE == ch) #ppage:339, #npage:338
# TODO: implement program loop involving vertical/horiontal scrolling
if __name__ == "__main__":
curses.wrapper(main)
| 3.84375 | 4 |
python/tvm/autotvm/tuner/sampler.py | ryujaehun/chameleon | 1 | 12796415 | <filename>python/tvm/autotvm/tuner/sampler.py
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=no-else-return,invalid-name,consider-using-enumerate,abstract-method
"""Base class for sampler
This type of sampler will build an internal method to prune or reduce number
of configs measured on hardware to speed up tuning without performance loss.
"""
import numpy as np
from ..env import GLOBAL_SCOPE
class Sampler(object):
"""Base class for sampler
This type of sampler will build an internal method to prune or reduce number
of configs measured on hardware to speed up tuning without performance loss.
Parameters
----------
dims: list
knob form of the dimensions for the configs
"""
def __init__(self, dims):
self.dims = dims
def sample(self, xs):
"""Sample a subset of configs from a larger set of configs
Parameters
----------
xs: Array of int
The indexes of configs from a larger set
Returns
-------
a reduced set of configs
"""
raise NotImplementedError()
| 2.1875 | 2 |
templates/includes/loader.py | angeal185/flask-jinja-greensock-portfolio-webapp | 0 | 12796416 | <script id="sf" type="x-shader/x-fragment">
precision highp float;
uniform float time;
uniform vec2 mouse;
uniform vec2 resolution;
float ball(vec2 p, float k, float d) {
vec2 r = vec2(p.x - cos(time * k) * d, p.y + sin(time * k) * d);
return smoothstep(0.0, 1.0, 0.03 / length(r));
}
void main(void) {
vec2 q = gl_FragCoord.xy / resolution.xy;
vec2 p = -1.0 + 2.0 * q;
p.x *= resolution.x / resolution.y;
float col = 0.0;
for (int i = 1; i <= 7; ++i) {
col += ball(p, float(i), 0.3);
}
for (int i = 1; i <= 5; ++i) {
col += ball(p, float(i), 0.1);
}
gl_FragColor = vec4(col*0.8, col, col*1.8, 1.0);
}
</script>
<script id="sv" type="x-shader/x-vertex">
attribute vec4 vPosition;
void main (void) {
gl_Position = vPosition;
}
</script>
<canvas id="cnv"></canvas> | 1.710938 | 2 |
webservice/api/others/func_d.py | galenothiago/switch-automation | 0 | 12796417 | from flask import Flask, Request, Response, request
import json
def devices():
dict_device = request.get_data(as_text=True)
dados_device = json.loads(dict_device)
| 2.203125 | 2 |
python/tests/test_openfermion_integration.py | ausbin/qcor | 59 | 12796418 | <reponame>ausbin/qcor
import faulthandler
faulthandler.enable()
import unittest
from qcor import *
try:
from openfermion.ops import FermionOperator as FOp
from openfermion.ops import QubitOperator as QOp
from openfermion.transforms import reverse_jordan_wigner, jordan_wigner
class TestOpenFermion(unittest.TestCase):
def test_simple_fermion(self):
# Create Operator as OpenFermion FermionOperator
H = FOp('', 0.0002899) + FOp('0^ 0', -.43658) + \
FOp('1 0^', 4.2866) + FOp('1^ 0', -4.2866) + FOp('1^ 1', 12.25)
@qjit
def ansatz(q: qreg, theta: float):
X(q[0])
Ry(q[1], theta)
CX(q[1], q[0])
n_params = 1
obj = createObjectiveFunction(ansatz, H, n_params, {'gradient-strategy':'parameter-shift'})
optimizer = createOptimizer('nlopt', {'nlopt-optimizer':'l-bfgs'})
results = optimizer.optimize(obj)
self.assertAlmostEqual(results[0], -1.74, places=1)
def test_simple_qubit(self):
# Create Operator as OpenFermion FermionOperator
H = QOp('', 5.907) + QOp('Y0 Y1', -2.1433) + \
QOp('X0 X1', -2.1433) + QOp('Z0', .21829) + QOp('Z1', -6.125)
@qjit
def ansatz(q: qreg, theta: float):
X(q[0])
Ry(q[1], theta)
CX(q[1], q[0])
n_params = 1
obj = createObjectiveFunction(ansatz, H, n_params, {'gradient-strategy':'parameter-shift'})
optimizer = createOptimizer('nlopt', {'nlopt-optimizer':'l-bfgs'})
results = optimizer.optimize(obj)
self.assertAlmostEqual(results[0], -1.74, places=1)
def test_convert_to_qcor(self):
H = FOp('', 0.0002899) + FOp('0^ 0', -.43658) + \
FOp('1 0^', 4.2866) + FOp('1^ 0', -4.2866) + FOp('1^ 1', 12.25)
ours = createOperator('fermion', H)
print(ours.toString())
H = QOp('', 5.907) + QOp('Y0 Y1', -2.1433) + \
QOp('X0 X1', -2.1433) + QOp('Z0', .21829) + QOp('Z1', -6.125)
ours = createOperator('pauli', H)
print(ours.toString())
except:
pass
if __name__ == '__main__':
unittest.main() | 2.328125 | 2 |
posts/nano/dog-breed-classifier/dog_scratch_model.py | necromuralist/Neurotic-Networking | 0 | 12796419 | <reponame>necromuralist/Neurotic-Networking
# python
from functools import partial
import argparse
import os
# pypi
from dotenv import load_dotenv
from PIL import Image, ImageFile
from torchvision import datasets
import numpy
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optimizer
import torchvision.transforms as transforms
# this project
from neurotic.tangles.data_paths import DataPathTwo
from neurotic.tangles.timer import Timer
from neurotic.constants.imagenet_map import imagenet
# the output won't show up if you don't flush it when redirecting it to a file
print = partial(print, flush=True)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(device)
ImageFile.LOAD_TRUNCATED_IMAGES = True
load_dotenv()
dog_path = DataPathTwo(folder_key="DOG_PATH")
dog_training_path = DataPathTwo(folder_key="DOG_TRAIN")
dog_testing_path = DataPathTwo(folder_key="DOG_TEST")
dog_validation_path = DataPathTwo(folder_key="DOG_VALIDATE")
human_path = DataPathTwo(folder_key="HUMAN_PATH")
BREEDS = len(set(dog_training_path.folder.iterdir()))
print("Number of Dog Breeds: {}".format(BREEDS))
timer = Timer(beep=SPEAKABLE)
means = [0.485, 0.456, 0.406]
deviations = [0.229, 0.224, 0.225]
IMAGE_SIZE = 224
IMAGE_HALF_SIZE = IMAGE_SIZE//2
train_transform = transforms.Compose([
transforms.RandomRotation(30),
transforms.RandomResizedCrop(IMAGE_SIZE),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(means,
deviations)])
test_transform = transforms.Compose([transforms.Resize(255),
transforms.CenterCrop(IMAGE_SIZE),
transforms.ToTensor(),
transforms.Normalize(means,
deviations)])
training = datasets.ImageFolder(root=str(dog_training_path.folder),
transform=train_transform)
validation = datasets.ImageFolder(root=str(dog_validation_path.folder),
transform=test_transform)
testing = datasets.ImageFolder(root=str(dog_testing_path.folder),
transform=test_transform)
BATCH_SIZE = 10
WORKERS = 0
train_batches = torch.utils.data.DataLoader(training, batch_size=BATCH_SIZE,
shuffle=True, num_workers=WORKERS)
validation_batches = torch.utils.data.DataLoader(
validation, batch_size=BATCH_SIZE, shuffle=True, num_workers=WORKERS)
test_batches = torch.utils.data.DataLoader(
testing, batch_size=BATCH_SIZE, shuffle=True, num_workers=WORKERS)
loaders_scratch = dict(train=train_batches,
validate=validation_batches,
test=test_batches)
LAYER_ONE_OUT = 16
LAYER_TWO_OUT = LAYER_ONE_OUT * 2
LAYER_THREE_OUT = LAYER_TWO_OUT * 2
KERNEL = 3
PADDING = 1
FULLY_CONNECTED_OUT = 500
class Net(nn.Module):
"""Naive Neural Network to classify dog breeds"""
def __init__(self) -> None:
super().__init__()
self.conv1 = nn.Conv2d(3, LAYER_ONE_OUT,
KERNEL, padding=PADDING)
self.conv2 = nn.Conv2d(LAYER_ONE_OUT, LAYER_TWO_OUT,
KERNEL, padding=PADDING)
self.conv3 = nn.Conv2d(LAYER_TWO_OUT, LAYER_THREE_OUT,
KERNEL, padding=PADDING)
# max pooling layer
self.pool = nn.MaxPool2d(2, 2)
# linear layer (64 * 4 * 4 -> 500)
self.fc1 = nn.Linear((IMAGE_HALF_SIZE//4)**2 * LAYER_THREE_OUT, FULLY_CONNECTED_OUT)
self.fc2 = nn.Linear(FULLY_CONNECTED_OUT, BREEDS)
# dropout layer (p=0.25)
self.dropout = nn.Dropout(0.25)
return
def forward(self, x):
# add sequence of convolutional and max pooling layers
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = self.pool(F.relu(self.conv3(x)))
x = x.view(-1, (IMAGE_HALF_SIZE//4)**2 * LAYER_THREE_OUT)
x = self.dropout(x)
x = self.dropout(F.relu(self.fc1(x)))
x = self.fc2(x)
return x
model_scratch = Net()
if torch.cuda.is_available():
print("Using {} GPUs".format(torch.cuda.device_count()))
model_scratch = nn.DataParallel(model_scratch)
model_scratch.to(device)
criterion_scratch = nn.CrossEntropyLoss()
optimizer_scratch = optimizer.SGD(model_scratch.parameters(),
lr=0.001,
momentum=0.9)
def train(epochs: int, train_batches: torch.utils.data.DataLoader,
validation_batches: torch.utils.data.DataLoader,
model: nn.Module,
optimizer: optimizer.SGD,
criterion: nn.CrossEntropyLoss,
epoch_start: int=1,
save_path: str="model_scratch.pt"):
"""Trains the Model
Args:
epochs: number of times to train on the data set
train_batches: the batch-loaders for training
validation_batches: batch-loaders for validation
model: the network to train
optimizer: the gradient descent object
criterion: object to do backwards propagation
epoch_start: number to start the epoch count with
save_path: path to save the best network parameters
"""
validation_loss_min = numpy.Inf
end = epoch_start + epochs
for epoch in range(epoch_start, end):
timer.start()
training_loss = 0.0
validation_loss = 0.0
model.train()
for data, target in train_batches:
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = criterion(output, target)
loss.backward()
optimizer.step()
training_loss += loss.item() * data.size(0)
model.eval()
for data, target in validation_batches:
data, target = data.to(device), target.cuda(device)
output = model(data)
loss = criterion(output, target)
validation_loss += loss.item() * data.size(0)
training_loss /= len(train_batches.dataset)
validation_loss /= len(validation_batches.dataset)
timer.end()
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch,
training_loss,
validation_loss
))
if validation_loss < validation_loss_min:
print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format(
validation_loss_min,
validation_loss))
torch.save(model.state_dict(), save_path)
validation_loss_min = validation_loss
return model
def test(test_batches: torch.utils.data.DataLoader,
model: nn.Module,
criterion: nn.CrossEntropyLoss) -> None:
"""Test the model
Args:
test_batches: batch loader of test images
model: the network to test
criterion: calculator for the loss
"""
test_loss = 0.
correct = 0.
total = 0.
model.eval()
for data, target in test_batches:
data, target = data.to(device), target.to(device)
output = model(data)
loss = criterion(output, target)
test_loss += loss.item() * data.size(0)
# convert output probabilities to predicted class
predictions = output.data.max(1, keepdim=True)[1]
# compare predictions to true label
correct += numpy.sum(
numpy.squeeze(
predictions.eq(
target.data.view_as(predictions))).cpu().numpy())
total += data.size(0)
test_loss /= len(test_batches.dataset)
print('Test Loss: {:.6f}\n'.format(test_loss))
print('\nTest Accuracy: %2d%% (%2d/%2d)' % (
100. * correct / total, correct, total))
return
def train_and_test(train_batches: torch.utils.data.DataLoader,
validate_batches: torch.utils.data.DataLoader,
test_batches: torch.utils.data.DataLoader,
model: nn.Module,
model_path: Path,
optimizer: optimizer.SGD,
criterion: nn.CrossEntropyLoss,
epochs: int=10,
epoch_start: int=1,
load_model: bool=False) -> None:
"""Trains and Tests the Model
Args:
train_batches: batch-loaders for training
validate_batches: batch-loaders for validation
test_batches: batch-loaders for testing
model: the network to train
model_path: where to save the best model
optimizer: the gradient descent object
criterion: object to do backwards propagation
epochs: number of times to train on the data set
epoch_start: number to start the epoch count with
load_model: whether to load the model from a file
"""
if load_model and model_path.is_file():
model.load_state_dict(torch.load(model_path))
print("Starting Training")
timer.start()
model_scratch = train(epochs=epochs,
epoch_start=epoch_start,
train_batches=train_batches,
validation_batches=validate_batches,
model=model,
optimizer=optimizer,
criterion=criterion,
save_path=model_path)
timer.end()
# load the best model
model.load_state_dict(torch.load(model_path))
print("Starting Testing")
timer.start()
test(test_batches, model, criterion)
timer.end()
return
model_path = DataPathTwo(
folder_key="MODELS",
filename="model_scratch.pt")
assert model_path.folder.is_dir()
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Test or Train the Naive Dog Classifier")
parser.add_argument("--test", action="store_true",
help="Only run the test")
parser.add_argument("--epochs", default=10, type=int,
help="Training epochs (default: %(default)s)")
parser.add_argument(
"--epoch-offset", default=0, type=int,
help="Offset for the output of epochs (default: %(default)s)")
parser.add_argument("--restart", action="store_true",
help="Wipe out old model.")
arguments = parser.parse_args()
if arguments.test:
test(loaders_scratch["test"], model_scratch, criterion_scratch)
else:
train_and_test(epochs=arguments.epochs,
train_batches=loaders_scratch["train"],
validate_batches=loaders_scratch["validate"],
test_batches=loaders_scratch["test"],
model=model_scratch,
optimizer=optimizer_scratch,
criterion=criterion_scratch,
epoch_start=arguments.epoch_offset,
model_path=model_path.from_folder,
load_model=not arguments.restart)
| 2.484375 | 2 |
tests/test_public_datasets.py | scaleapi/nucleus-python-client | 13 | 12796420 | from nucleus.dataset import Dataset
PANDASET_ID = "ds_bwhjbyfb8mjj0ykagxf0"
def test_get_pandaset_items(CLIENT):
dataset: Dataset = CLIENT.get_dataset(PANDASET_ID)
items = dataset.items
items_and_annotations = dataset.items_and_annotations()
target_item = items[0]
assert {_["item"].reference_id for _ in items_and_annotations} == set(
[i.reference_id for i in items]
)
ref_item = dataset.refloc(target_item.reference_id)
assert ref_item["item"] == target_item
index_item = dataset.iloc(0)
assert index_item["item"] in items
| 2.5 | 2 |
work/NCS/test_files/test_wtih_restraints.py | youdar/work | 0 | 12796421 | <gh_stars>0
from __future__ import division
import mmtbx.monomer_library.pdb_interpretation
from mmtbx import monomer_library
import mmtbx.monomer_library.server
import getpass
import sys
import os
def run(file_name):
pdb_processed_file = monomer_library.pdb_interpretation.run(args=[file_name],
assume_hydrogens_all_missing=False,
hard_minimum_nonbonded_distance=0.0,
nonbonded_distance_threshold=None,
substitute_non_crystallographic_unit_cell_if_necessary=True)
grm = pdb_processed_file.geometry_restraints_manager()
print 'done'
def set_test_folder():
"""
Change working directory to avoid littering of
phenix_sources\phenix_regression\development\ncs_constraints.py
"""
username = getpass.getuser()
if username.lower() == 'youval':
osType = sys.platform
if osType.startswith('win'):
tempdir = (r'C:\Phenix\Dev\Work\work\NCS\junk')
else:
tempdir = ('/net/cci/youval/Work/work/NCS/junk')
os.chdir(tempdir)
if __name__=='__main__':
set_test_folder()
run('full_asu.pdb') | 1.882813 | 2 |
protocol/__init__.py | KESHAmambo/IEEE_802_11ad_beamforming_simulation | 0 | 12796422 | settings = {
'log': True,
'verbosity': False,
'vcolored': True,
'time_precision': 1e-10
}
| 1.242188 | 1 |
model/MinionType.py | waniz/russian_ai_cup | 0 | 12796423 | <gh_stars>0
class MinionType:
ORC_WOODCUTTER = 0
FETISH_BLOWDART = 1
| 1.1875 | 1 |
main.py | zeek0x/common | 0 | 12796424 | import sys
from urllib import request, parse, error
from multiprocessing import Process
urls = [
'https://github.com/',
'https://twitter.com/',
'https://hub.docker.com/v2/users/'
]
def inspect_status_code(url):
try:
response = request.urlopen(url)
return response.code
except error.HTTPError as e:
return e.code
def inspect(url, user_id):
code = inspect_status_code(url+user_id)
title = parse.urlparse(url).netloc
prefix = '\033[32m' if code == 404 else '\033[31m'
suffix = '\033[0m'
result = '{}{}{}'.format(prefix, code, suffix)
print(title.ljust(16), result)
def main():
if len(sys.argv) < 2:
print('usage: python3 main.py ${USER_ID}')
exit(1)
user_id = sys.argv[1]
ps = [Process(target=inspect, args=(url, user_id)).start() for url in urls]
if __name__ == '__main__':
main()
| 2.703125 | 3 |
data_loader/data_loaders.py | Hhhhhhhhhhao/I2T2I | 0 | 12796425 | import torch
import numpy as np
from torch.utils.data import DataLoader
from torchvision import transforms
from data_loader.datasets_custom import TextImageDataset, COCOTextImageDataset
from base import BaseDataLoader
def text_image_collate_fn(data):
collate_data = {}
# Sort a data list by right caption length (descending order).
data.sort(key=lambda x: x['right_caption'].size(0), reverse=True)
collate_data['right_img_id'] = []
collate_data['class_id'] = []
collate_data['right_txt'] = []
class_ids = []
right_captions = []
right_embeds = []
right_images_32 = []
right_images_64 = []
right_images_128 = []
right_images_256 = []
collate_data['wrong_img_id'] = []
collate_data['wrong_txt'] = []
wrong_captions = []
wrong_embeds = []
wrong_images_32 = []
wrong_images_64 = []
wrong_images_128 = []
wrong_images_256 = []
for i in range(len(data)):
class_ids.append(data[i]['right_img_id'])
collate_data['class_id'].append(data[i]['right_class_id'])
collate_data['right_txt'].append(data[i]['right_txt'])
right_captions.append(data[i]['right_caption'])
right_embeds.append(data[i]['right_embed'])
right_images_32.append(data[i]['right_image_32'])
right_images_64.append(data[i]['right_image_64'])
right_images_128.append(data[i]['right_image_128'])
right_images_256.append(data[i]['right_image_256'])
collate_data['wrong_txt'].append(data[i]['wrong_txt'])
wrong_captions.append(data[i]['wrong_caption'])
wrong_embeds.append(data[i]['wrong_embed'])
wrong_images_32.append(data[i]['wrong_image_32'])
wrong_images_64.append(data[i]['wrong_image_64'])
wrong_images_128.append(data[i]['wrong_image_128'])
wrong_images_256.append(data[i]['wrong_image_256'])
# sort and get captions, lengths, images, embeds, etc.
right_caption_lengths = [len(cap) for cap in right_captions]
collate_data['right_caption_lengths'] = torch.LongTensor(right_caption_lengths)
collate_data['right_captions'] = torch.zeros(len(right_caption_lengths), max(right_caption_lengths)).long()
for i, cap in enumerate(right_captions):
end = right_caption_lengths[i]
collate_data['right_captions'][i, :end] = cap[:end]
# sort and get captions, lengths, images, embeds, etc.
wrong_captions.sort(key=lambda x: len(x), reverse=True)
wrong_caption_lengths = [len(cap) for cap in wrong_captions]
collate_data['wrong_caption_lengths'] = torch.LongTensor(wrong_caption_lengths)
collate_data['wrong_captions'] = torch.zeros(len(wrong_caption_lengths), max(wrong_caption_lengths)).long()
for i, cap in enumerate(wrong_captions):
end = wrong_caption_lengths[i]
collate_data['wrong_captions'][i, :end] = cap[:end]
collate_data['class_id'] = np.stack(class_ids)
collate_data['right_embeds'] = torch.stack(right_embeds, 0)
collate_data['right_images_32'] = torch.stack(right_images_32, 0)
collate_data['right_images_64'] = torch.stack(right_images_64, 0)
collate_data['right_images_128'] = torch.stack(right_images_128, 0)
collate_data['right_images_256'] = torch.stack(right_images_256, 0)
collate_data['wrong_embeds'] = torch.stack(wrong_embeds, 0)
collate_data['wrong_images_32'] = torch.stack(wrong_images_32, 0)
collate_data['wrong_images_64'] = torch.stack(wrong_images_64, 0)
collate_data['wrong_images_128'] = torch.stack(wrong_images_128, 0)
collate_data['wrong_images_256'] = torch.stack(wrong_images_256, 0)
return collate_data
class TextImageDataLoader(DataLoader):
def __init__(self, data_dir, dataset_name, which_set, image_size, batch_size, num_workers):
self.data_dir = data_dir
self.which_set = which_set
self.dataset_name = dataset_name
assert self.which_set in {'train', 'valid', 'test'}
self.image_size = (image_size, image_size)
self.batch_size = batch_size
self.num_workers = num_workers
# transforms.ToTensor convert PIL images in range [0, 255] to a torch in range [-1.0, 1.0]
self.transform = transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
])
self.dataset = TextImageDataset(self.data_dir, self.dataset_name, self.which_set, self.transform, vocab_from_file=False)
self.n_samples = len(self.dataset)
if self.which_set == 'train' or self.which_set == 'valid':
super(TextImageDataLoader, self).__init__(
dataset=self.dataset,
batch_size=self.batch_size,
shuffle=True,
num_workers=self.num_workers,
collate_fn=text_image_collate_fn
)
else:
super(TextImageDataLoader, self).__init__(
dataset=self.dataset,
batch_size=self.batch_size,
shuffle=False,
num_workers=0,
collate_fn=text_image_collate_fn)
class COCOTextImageDataLoader(BaseDataLoader):
"""
COCO Image Caption Model Data Loader
"""
def __init__(self, data_dir, which_set, image_size, batch_size, validation_split, num_workers):
self.data_dir = data_dir
self.which_set = which_set
self.validation_split = validation_split
assert self.which_set in {'train', 'val', 'test'}
self.image_size = (image_size, image_size)
self.batch_size = batch_size
self.num_workers = num_workers
# transforms.ToTensor convert PIL images in range [0, 255] to a torch in range [-1.0, 1.0]
mean = torch.tensor([0.5, 0.5, 0.5], dtype=torch.float32)
std = torch.tensor([0.5, 0.5, 0.5], dtype=torch.float32)
if which_set == 'val' or which_set == 'test':
self.transform = transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=mean, std=std)
])
else:
self.transform = transforms.Compose([
# transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=mean, std=std)
])
self.dataset = COCOTextImageDataset(self.data_dir, self.which_set, self.transform, vocab_from_file=True)
# self.n_samples = len(self.dataset)
if self.which_set == 'train':
super(COCOTextImageDataLoader, self).__init__(
dataset=self.dataset,
batch_size=self.batch_size,
shuffle=True,
validation_split=validation_split,
num_workers=self.num_workers,
collate_fn=text_image_collate_fn
)
else:
super(COCOTextImageDataLoader, self).__init__(
dataset=self.dataset,
batch_size=self.batch_size,
shuffle=False,
validation_split=0,
num_workers=self.num_workers,
collate_fn=text_image_collate_fn)
if __name__ == '__main__':
data_loader = COCOTextImageDataLoader(
data_dir='/Users/leon/Projects/I2T2I/data/coco/',
# dataset_name="birds",
which_set='val',
image_size=256,
batch_size=16,
validation_split=0.05,
num_workers=0)
print(len(data_loader.dataset.vocab))
print(len(data_loader.dataset.vocab.word2idx))
for i, data in enumerate(data_loader):
print(i)
print("right_img_id:", data['right_img_id'])
# print("class_ids:", data["class_id"])
print('right images 32 shape:', data['right_images_32'].shape)
print('right images 64 shape:', data['right_images_64'].shape)
print('right images 128 shape:', data['right_images_128'].shape)
print('right images 256 shape:', data['right_images_256'].shape)
print("right embed shape:", data['right_embeds'].shape)
print("right caption shape:", data['right_captions'].shape)
print("right caption lengths:", data['right_caption_lengths'])
print("right txt:", data["right_txt"])
print("wrong_img_id:", data['wrong_img_id'])
print('wrong images 32 shape:', data['wrong_images_32'].shape)
print('wrong images 64 shape:', data['wrong_images_64'].shape)
print('wrong images 128 shape:', data['wrong_images_128'].shape)
print('wrong images 256 shape:', data['wrong_images_256'].shape)
print("wrong embed shape:", data['wrong_embeds'].shape)
print("wrong caption shape:", data['wrong_captions'].shape)
print("wrong caption lengths:", data['wrong_caption_lengths'])
print("wrong txt:", data["wrong_txt"])
if i == 10:
print("done")
break | 2.578125 | 3 |
tests/orm/test_app_user.py | KoalicjaOtwartyKrakow/backend | 0 | 12796426 | import pytest
from sqlalchemy.exc import ProgrammingError
from sqlalchemy_continuum.utils import count_versions
from kokon.orm import Guest
from kokon.utils.db import DB
from tests.helpers import admin_session
def test_app_user():
with admin_session() as session:
session.execute("TRUNCATE guests_version RESTART IDENTITY;")
session.execute("TRUNCATE guests RESTART IDENTITY;")
session.execute("TRUNCATE transaction RESTART IDENTITY;")
with DB().acquire() as session:
# creates a guest without error and version as well
guid = "74b86069-c837-4431-a7ee-3a4aedda978b"
guest = Guest(
guid=guid,
full_name="<NAME>",
email="<EMAIL>",
phone_number="100-330-497",
people_in_group=4,
adult_male_count=0,
adult_female_count=2,
children_ages=[1, 10],
have_pets=False,
how_long_to_stay="1w",
updated_by_id="782962fc-dc11-4a33-8f08-b7da532dd40d",
)
session.add(guest)
session.commit()
session.refresh(guest)
assert guest.claimed_by_id is None
# trigger works
claimed_at = guest.claimed_at
assert claimed_at is not None
guest.adult_male_count = 1
session.commit()
with pytest.raises(ProgrammingError):
_ = guest.versions[0]
with admin_session() as session:
guest = session.query(Guest).where(Guest.guid == guid).one()
assert count_versions(guest) == 2
assert str(guest.versions[0].guid) == guid
| 2.125 | 2 |
bandc/settings.py | crccheck/atx-bandc | 0 | 12796427 | <reponame>crccheck/atx-bandc
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
import dj_database_url
from project_runpy import env
BASE_DIR = os.path.dirname(__file__)
SECRET_KEY = env.get("SECRET_KEY", "Rotom")
DEBUG = env.get("DEBUG", False)
ALLOWED_HOSTS = ["*"]
INSTALLED_APPS = (
"bandc.apps.agenda.apps.AgendaConfig",
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
# support
"django_extensions",
"django_object_actions",
"bootstrap_pagination",
)
MIDDLEWARE = (
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
)
ROOT_URLCONF = "bandc.urls"
WSGI_APPLICATION = "bandc.wsgi.application"
# Database
# https://docs.djangoproject.com/en/stable/ref/settings/#databases
DATABASES = {"default": dj_database_url.config(default="sqlite:///bandc.db")}
DEFAULT_AUTO_FIELD = "django.db.models.AutoField"
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator"},
{"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator"},
{"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator"},
]
# Internationalization
# https://docs.djangoproject.com/en/stable/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "America/Chicago"
USE_I18N = False
USE_L10N = False
USE_TZ = True
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [os.path.join(BASE_DIR, "templates")],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
"bandc.context_processors.base_url",
],
"debug": DEBUG,
},
},
]
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/dev/howto/static-files/
STATIC_URL = "/static/"
STATICFILES_DIRS = (os.path.join(BASE_DIR, "static"),)
MEDIA_ROOT = os.path.join(BASE_DIR, "..", "media")
MEDIA_URL = "/media/"
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"root": {"level": os.environ.get("LOG_LEVEL", "WARNING"), "handlers": ["console"]},
"formatters": {
"dev": {
"format": "%(levelname)s %(name)s %(message)s",
# 'datefmt': '%Y-%m-%dT%H:%M:%S%z', # I want milliseconds but Python doesn't make it easy
# "class": "pythonjsonlogger.jsonlogger.JsonFormatter",
},
},
"filters": {
"require_debug_false": {"()": "django.utils.log.RequireDebugFalse"},
"require_debug_true": {"()": "django.utils.log.RequireDebugTrue"},
"readable_sql": {"()": "project_runpy.ReadableSqlFilter"},
},
"handlers": {
"console": {
"level": "DEBUG",
"formatter": "dev",
"class": "project_runpy.ColorizingStreamHandler",
},
},
"loggers": {
"django.db.backends": {
"level": "DEBUG" if env.get("SQL", False) else "INFO",
"handlers": ["console"],
"filters": ["require_debug_true", "readable_sql"],
"propagate": False,
},
"sh": {"level": "WARNING", "propagate": False},
"pdfminer": {"level": "WARNING", "propagate": False},
"factory": {"level": "ERROR", "propagate": False},
},
}
| 1.976563 | 2 |
algorithms_keeper/api.py | hfz1337/algorithms-keeper | 0 | 12796428 | <filename>algorithms_keeper/api.py
from typing import Mapping, Tuple
from gidgethub.abc import UTF_8_CHARSET
from gidgethub.aiohttp import GitHubAPI as BaseGitHubAPI
from .log import STATUS_OK, inject_status_color, logger
TOKEN_ENDPOINT = "access_tokens"
class GitHubAPI(BaseGitHubAPI): # pragma: no cover
LOG_FORMAT = 'api "%(method)s %(path)s %(data)s %(version)s" => %(status)s'
async def _request(
self, method: str, url: str, headers: Mapping[str, str], body: bytes = b""
) -> Tuple[int, Mapping[str, str], bytes]:
"""This is the same method as `gidgethub.aiohttp.GitHubAPI._request` with the
addition of logging the request-response cycle. No need to cover this function.
The logger information will be useful to know what actions the bot made.
INFO: All actions taken by the bot.
ERROR: Unknown error in the API call.
"""
async with self._session.request(
method, url, headers=headers, data=body
) as response:
# We don't want to reveal the `installation_id` from the URL.
if response.url.name != TOKEN_ENDPOINT:
inject_status_color(response.status)
data = "NONE" if body == b"" else body.decode(UTF_8_CHARSET)
loggerlevel = (
logger.info if response.status in STATUS_OK else logger.error
)
loggerlevel(
self.LOG_FORMAT,
{
"method": method,
# host is always going to be 'api.github.com'.
"path": response.url.raw_path_qs,
"version": f"{response.url.scheme.upper()}/"
f"{response.version.major}.{response.version.minor}",
"data": data,
"status": f"{response.status}:{response.reason}",
},
)
return response.status, response.headers, await response.read()
| 2.390625 | 2 |
pymecompress/codecs.py | python-microscopy/pymecompress | 0 | 12796429 | <filename>pymecompress/codecs.py
"""
numcodecs compatible compression and quantization codecs.
"""
from . import bcl
import numcodecs
from numcodecs.abc import Codec
class Huffman(Codec):
codec_id='pymecompress.huffman'
def encode(self, buf):
return bcl.huffman_compress_buffer(buf)
def decode(self, buf, out=None):
return bcl.huffman_decompress_buffer(buf, out)
def get_config(self):
return {'codec_id': self.codec_id}
@classmethod
def from_config(cls, config):
return cls()
numcodecs.register_codec(Huffman)
class HuffmanQuant16(Codec):
codec_id = 'pymecompress.quant16'
def __init__(self, offset=0, scale=1):
self._offset = offset
self._scale = scale
def encode(self, buf):
return bcl.huffman_compress_quant_buffer(buf, self._offset, self._scale)
def decode(self, buf, out=None):
ret = bcl.huffman_decompress_buffer(buf, None)
ret = (ret*ret)/self._scale + self._offset
if out is None:
out = ret
else:
out[:] = ret
return out
def get_config(self):
return {'codec_id': self.codec_id,
'offset': self._offset, 'scale' : self._scale}
@classmethod
def from_config(cls, config):
return cls(offset=config.get('offset', 0), scale=config.get('scale', 1))
numcodecs.register_codec(HuffmanQuant16)
| 2.578125 | 3 |
payflowpro/__init__.py | pelotoncycle/python-payflowpro | 18 | 12796430 | VERSION = (0, 3, 'pre',) | 1.257813 | 1 |
test.py | narumiruna/pytorch-cpp-extension-example | 1 | 12796431 | <filename>test.py
import torch
from cppexample import normalize, gaussian
def main():
x = torch.tensor([1, 2, 3], dtype=torch.float)
y = normalize(x)
print(y)
x = gaussian(x, 0.0, 1.0, 1.0)
print(x)
if __name__ == '__main__':
main()
| 2.640625 | 3 |
randaugment.py | Hayoung93/UDA | 0 | 12796432 | import torch
import torch.nn as nn
from torchvision import transforms as ttf
class RandAugment(nn.Module):
def __init__(self, N, M):
super().__init__()
"""
rotate
shear x
shear y
translate y
translate x
autoContrast
sharpness
identity
contrast
color
brightness
eqaulize
solarize
posterize
"""
self.N = N
self.M = M
self.aug_list = [Rotate, ShearX, ShearY, TranslateX, TranslateY, AutoContrast,
Sharpness, Identity, Contrast, Color, Brightness, Equalize,
Solarize, Posterize]
def forward(self, img):
self.aug_index = torch.randperm(len(self.aug_list))[:self.N]
self.augmentations = nn.ModuleList([])
for aug_id in self.aug_index:
self.augmentations.append(self.aug_list[aug_id](self.M))
self.augmentations = nn.Sequential(*self.augmentations)
return self.augmentations(img)
class Rotate(nn.Module):
def __init__(self, M):
super().__init__()
self.M = M
self.angle = 359 / 10 * self.M
def forward(self, img):
return ttf.functional.rotate(img, self.angle)
class ShearX(nn.Module):
def __init__(self, M):
super().__init__()
self.M = M
self.angle = 359 / 10 * self.M - 180
def forward(self, img):
return ttf.functional.affine(img, 0, [0, 0], 1, [self.angle, 0])
class ShearY(nn.Module):
def __init__(self, M):
super().__init__()
self.M = M
self.angle = 359 / 10 * self.M - 180
def forward(self, img):
return ttf.functional.affine(img, 0, [0, 0], 1, [0, self.angle])
class TranslateX(nn.Module):
def __init__(self, M):
super().__init__()
self.M = M
def forward(self, img):
try:
max_size = img.size()[0]
except TypeError:
max_size = img.size()[0]
return ttf.functional.affine(img, 0, [(max_size - 1) / 10 * self.M, 0], 1, [0, 0])
class TranslateY(nn.Module):
def __init__(self, M):
super().__init__()
self.M = M
def forward(self, img):
try:
max_size = img.size()[1]
except TypeError:
max_size = img.size()[1]
return ttf.functional.affine(img, 0, [0, (max_size - 1) / 10 * self.M], 1, [0, 0])
class AutoContrast(nn.Module):
def __init__(self, M):
super().__init__()
self.M = M
def forward(self, img):
return ttf.functional.autocontrast(img)
class Sharpness(nn.Module):
def __init__(self, M):
super().__init__()
self.M = M
def forward(self, img):
return ttf.functional.adjust_sharpness(img, self.M / 5.)
class Identity(nn.Module):
def __init__(self, M):
super().__init__()
self.M = M
def forward(self, img):
return img
class Contrast(nn.Module):
def __init__(self, M):
super().__init__()
self.M = M
def forward(self, img):
return ttf.functional.adjust_contrast(img, self.M / 5.)
class Color(nn.Module):
def __init__(self, M):
super().__init__()
self.M = M
def forward(self, img):
return ttf.functional.adjust_saturation(img, self.M / 5.)
class Brightness(nn.Module):
def __init__(self, M):
super().__init__()
self.M = M
def forward(self, img):
return ttf.functional.adjust_brightness(img, self.M / 5.)
class Equalize(nn.Module):
def __init__(self, M):
super().__init__()
self.M = M
def forward(self, img):
return ttf.functional.equalize(img)
class Solarize(nn.Module):
def __init__(self, M):
super().__init__()
self.M = M
def forward(self, img):
return ttf.functional.solarize(img, (10 - self.M) * 25.5)
class Posterize(nn.Module):
def __init__(self, M):
super().__init__()
self.M = M
def forward(self, img):
return ttf.functional.posterize(img, round((10 - self.M) / 10 * 8)) | 2.390625 | 2 |
sipy/test.py | riikkano/protopaja2018 | 0 | 12796433 | <reponame>riikkano/protopaja2018<gh_stars>0
#Aja konsolissa: py -m unittest test.py
import unittest
from lib.mittaus import *
from lib.classes import load
from main import *
#Testaa funktiot pääohjelmasta
class TestMain(unittest.TestCase):
def test_openLoads(self):
#Testi tähän
pass
def test_openPhases(self):
#Testi tähön
pass
def test_openMonthMax(self):
#Testi tähän
pass
#Testaa palauttaako getConsAll() objektin
def test_getConsAll(self):
result = getConsAll()
val = isinstance(result, object)
self.assertTrue(val)
#Testaa paketin "mittaus" funktiot
class TestMittaus(unittest.TestCase):
def test_val_to_volt(self):
result_1 = val_to_volt(4095)
result_2 = val_to_volt(300)
result_3 = val_to_volt(2000)
self.assertEqual(result_1, 1.1)
self.assertEqual(result_2, 0.0806)
self.assertEqual(result_3, 0.5372)
def test_adc_read(self):
sensorPin='P14'
result = adc_read(sensorPin)
bol = isinstance(result, int)
self.assertTrue(bol)
self.assertGreaterEqual(result, 0)
def test_adc_save(self):
#Testi tähän
pass
#Testaa luokan "load" metodit
class Testload(unittest.TestCase):
def setUp(self):
self.kuorma_1=load(name="Lattialämmitys", ID=12345, sensorPin="P11", relayPin=2, maximumCurrent=10, phase=1, priority=0)
self.kuorma_2=load("Kiuas",12346,"P12",2,10,1,0)
def tearDown(self):
#Testi tähän
pass
def test_changeRelayPin(self):
#Testi tähän
pass
def test_resetHour(self):
#Parempi testi tähän
result_1 = self.kuorma_1.resetHour()
result_2 = self.kuorma_2.resetHour()
self.assertEqual(result_1, 1)
self.assertEqual(result_2, 1)
def test_getName(self):
result_1 = self.kuorma_1.getName()
result_2 = self.kuorma_2.getName()
self.assertEqual(result_1, self.kuorma_1._load__name)
self.assertEqual(result_2, self.kuorma_2._load__name)
def test_getCons(self):
#Parempi testi tähän
result_1 = self.kuorma_1.getCons()
result_2 = self.kuorma_2.getCons()
self.assertGreaterEqual(result_1, 0)
self.assertGreaterEqual(result_2, 0)
def test_info(self):
#Testi tähän
pass
if __name__ == "__main__":
unittest.main()
| 2.78125 | 3 |
ikats/manager/timeseries_mgr_.py | IKATS/ikats_api | 0 | 12796434 | # -*- coding: utf-8 -*-
"""
Copyright 2019 CS Systèmes d'Information
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import re
from ikats.client.datamodel_client import DatamodelClient
from ikats.client.datamodel_stub import DatamodelStub
from ikats.client.opentsdb_client import OpenTSDBClient
from ikats.client.opentsdb_stub import OpenTSDBStub
from ikats.exceptions import (IkatsConflictError, IkatsException,
IkatsNotFoundError)
from ikats.lib import (MDType, check_is_fid_valid, check_is_valid_epoch,
check_type)
from ikats.manager.generic_mgr_ import IkatsGenericApiEndPoint
from ikats.objects import Timeseries
NON_INHERITABLE_PATTERN = re.compile("^qual(.)*|ikats(.)*|funcId")
class IkatsTimeseriesMgr(IkatsGenericApiEndPoint):
"""
Ikats EndPoint specific to Timeseries management
"""
def __init__(self, *args, **kwargs):
super(IkatsTimeseriesMgr, self).__init__(*args, **kwargs)
if self.api.emulate:
self.tsdb_client = OpenTSDBStub(session=self.api.session)
self.dm_client = DatamodelStub(session=self.api.session)
else:
self.tsdb_client = OpenTSDBClient(session=self.api.session)
self.dm_client = DatamodelClient(session=self.api.session)
def new(self, fid=None, data=None):
"""
Create an empty local Timeseries (if fid not provided)
If fid is set, the identifier will be created to database
:param fid: Identifier to create (if provided)
:param data: List of data points as numpy array or python 2D-list
:type fid: str
:type data: list or np.array
:returns: the Timeseries object
:rtype: Timeseries
:raises IkatsConflictError: if *fid* already present in database (use `get` instead of `new`)
"""
if fid is None:
ts = Timeseries(api=self.api)
else:
ts = self._create_ref(fid=fid)
ts.data = data
return ts
def get(self, fid=None, tsuid=None):
"""
Returns an existing Timeseries object by providing either its FID or TSUID (only one shall be provided)
:param fid: FID of the Timeseries
:param tsuid: TSUID of the Timeseries
:type fid: str
:type tsuid: str
:returns: The Timeseries object
:rtype: Timeseries
:raises ValueError: if both *fid* and *tsuid* are set (or none of them)
:raises IkatsNotFoundError: if the identifier was not found in database
"""
if bool(fid) == bool(tsuid):
raise ValueError("fid and tsuid are mutually exclusive")
if fid is not None:
tsuid = self.fid2tsuid(fid=fid, raise_exception=True)
return Timeseries(api=self.api, tsuid=tsuid, fid=fid)
def save(self, ts, parent=None, generate_metadata=True, raise_exception=True):
"""
Import timeseries data points to database or update an existing timeseries with new points
if *generate_metadata* is set or if no TSUID is present in *ts* object,
the *ikats_start_date*, *ikats_end_date* and *qual_nb_points* will be
overwritten by the first point date, last point date and number of points in *ts.data*
*parent* is the original timeseries where metadata shall be taken from
(except intrinsic ones, eg. *qual_nb_points*)
If the timeseries is a new one (object has no tsuid defined), the computation of the metadata is forced
Returns a boolean status of the action (True means "OK", False means "errors occurred")
:param ts: Timeseries object containing information about what to create
:param parent: (optional) Timeseries object of inheritance parent
:param generate_metadata: Generate metadata (set to False when doing partial import) (Default: True)
:param raise_exception: Indicates if exceptions shall be raised (True, default) or not (False)
:type ts: Timeseries
:type parent: Timeseries
:type generate_metadata: bool
:type raise_exception: bool
:returns: the status of the action
:rtype: bool
:raises TypeError: if *ts* is not a valid Timeseries object
"""
# Input checks
check_type(ts, Timeseries, "ts", raise_exception=True)
check_type(parent, [Timeseries, None], "parent", raise_exception=True)
check_type(generate_metadata, bool, "generate_metadata", raise_exception=True)
check_is_fid_valid(ts.fid, raise_exception=True)
try:
# First, we shall create the TSUID reference (if not provided)
if ts.tsuid is None:
ts.tsuid = self._create_ref(ts.fid).tsuid
# If the TS is fresh, we force the creation of the metadata
generate_metadata = True
# Add points to this TSUID
start_date, end_date, nb_points = self.tsdb_client.add_points(tsuid=ts.tsuid, data=ts.data)
if generate_metadata:
# ikats_start_date
self.dm_client.metadata_update(tsuid=ts.tsuid, name='ikats_start_date', value=start_date,
data_type=MDType.DATE, force_create=True)
ts.metadata.set(name='ikats_start_date', value=start_date, dtype=MDType.DATE)
# ikats_end_date
self.dm_client.metadata_update(tsuid=ts.tsuid, name='ikats_end_date', value=end_date,
data_type=MDType.DATE, force_create=True)
ts.metadata.set(name='ikats_end_date', value=end_date, dtype=MDType.DATE)
# qual_nb_points
self.dm_client.metadata_update(tsuid=ts.tsuid, name='qual_nb_points', value=nb_points,
data_type=MDType.NUMBER, force_create=True)
ts.metadata.set(name='qual_nb_points', value=nb_points, dtype=MDType.NUMBER)
# Inherit from parent when it is defined
if parent is not None:
self.inherit(ts=ts, parent=parent)
except IkatsException:
if raise_exception:
raise
return False
return True
def delete(self, ts, raise_exception=True):
"""
Delete the data corresponding to a *ts* object and all associated metadata
Note that if timeseries belongs to a dataset it will not be removed
Returns a boolean status of the action (True means "OK", False means "errors occurred")
:param ts: tsuid of the timeseries or Timeseries Object to remove
:param raise_exception: (optional) Indicates if IKATS exceptions shall be raised (True, default) or not (False)
:type ts: str or Timeseries
:type raise_exception: bool
:returns: the status of the action
:rtype: bool
:raises TypeError: if *ts* is not a str nor a Timeseries
:raises IkatsNotFoundError: if timeseries is not found on server
:raises IkatsConflictError: if timeseries belongs to -at least- one dataset
"""
check_type(value=ts, allowed_types=[str, Timeseries], var_name="ts", raise_exception=True)
tsuid = ts
if isinstance(ts, Timeseries):
if ts.tsuid is not None:
tsuid = ts.tsuid
elif ts.fid is not None:
try:
tsuid = self.dm_client.get_tsuid_from_fid(fid=ts.fid)
except IkatsException:
if raise_exception:
raise
return False
else:
raise ValueError("Timeseries object shall have set at least tsuid or fid")
return self.dm_client.ts_delete(tsuid=tsuid, raise_exception=raise_exception)
def list(self):
"""
Get the list of all Timeseries from database
.. note::
This action may take a while
:returns: the list of Timeseries object
:rtype: list
"""
return [Timeseries(tsuid=x["tsuid"], fid=x["funcId"], api=self.api) for x in
self.dm_client.get_ts_list()]
def fetch(self, ts, sd=None, ed=None):
"""
Retrieve the data corresponding to a Timeseries object as a numpy array
.. note::
if omitted, *sd* (start date) and *ed* (end date) will be retrieved from metadata
if you want a fixed windowed range, set *sd* and *ed* manually (but be aware that the TS may be
not completely gathered)
:param ts: Timeseries object
:param sd: (optional) starting date (timestamp in ms from epoch)
:param ed: (optional) ending date (timestamp in ms from epoch)
:type ts: Timeseries
:type sd: int or None
:type ed: int or None
:returns: The data points
:rtype: np.array
:raises TypeError: if *ts* is not a Timeseries object
:raises TypeError: if *sd* is not an int
:raises TypeError: if *ed* is not an int
:raises IkatsNotFoundError: if TS data points couldn't be retrieved properly
"""
check_type(value=ts, allowed_types=Timeseries, var_name="ts", raise_exception=True)
check_type(value=sd, allowed_types=[int, None], var_name="sd", raise_exception=True)
check_type(value=ed, allowed_types=[int, None], var_name="ed", raise_exception=True)
if sd is None:
sd = ts.metadata.get(name="ikats_start_date")
check_is_valid_epoch(value=sd, raise_exception=True)
if ed is None:
ed = ts.metadata.get(name="ikats_end_date")
check_is_valid_epoch(value=ed, raise_exception=True)
try:
data_points = self.tsdb_client.get_ts_by_tsuid(tsuid=ts.tsuid, sd=sd, ed=ed)
# Return the points
return data_points
except ValueError:
raise IkatsNotFoundError("TS data points couldn't be retrieved properly")
def inherit(self, ts, parent):
"""
Make a timeseries inherit of parent's metadata according to a pattern (not all metadata inherited)
:param ts: TS object in IKATS (which will inherit)
:param parent: TS object in IKATS of inheritance parent
:type ts: Timeseries
:param parent: Timeseries
"""
try:
result = self.dm_client.metadata_get_typed([parent.tsuid])[parent.tsuid]
for meta_name in result:
# Flag metadata as "not deleted"
result[meta_name]["deleted"] = False
if not NON_INHERITABLE_PATTERN.match(meta_name):
self.dm_client.metadata_create(tsuid=ts.tsuid, name=meta_name, value=result[meta_name]["value"],
data_type=MDType(result[meta_name]["dtype"]),
force_update=True)
except(ValueError, TypeError, SystemError) as exception:
self.api.session.log.warning(
"Can't get metadata of parent TS (%s), nothing will be inherited; \nreason: %s", parent, exception)
def find_from_meta(self, constraint=None):
"""
From a metadata constraint provided in parameter, the method get a TS list matching these constraints
Example of constraint:
| {
| frequency: [1, 2],
| flight_phase: 8
| }
will find the TS having the following metadata:
| (frequency == 1 OR frequency == 2)
| AND
| flight_phase == 8
:param constraint: constraint definition
:type constraint: dict
:returns: list of TSUID matching the constraints
:rtype: dict
:raises TypeError: if *constraint* is not a dict
"""
return self.dm_client.get_ts_from_metadata(constraint=constraint)
def tsuid2fid(self, tsuid, raise_exception=True):
"""
Retrieve the functional ID associated to the tsuid param.
:param tsuid: one tsuid value
:param raise_exception: Allow to specify if the action shall assert if not found or not
:type tsuid: str
:type raise_exception: bool
:returns: retrieved functional identifier value
:rtype: str
:raises TypeError: if tsuid is not a defined str
:raises ValueError: no functional ID matching the tsuid
:raises ServerError: http answer with status : 500 <= status < 600
"""
try:
return self.dm_client.get_func_id_from_tsuid(tsuid=tsuid)
except IkatsException:
if raise_exception:
raise
return None
def fid2tsuid(self, fid, raise_exception=True):
"""
Retrieve the TSUID associated to the functional ID param.
:param fid: the functional Identifier
:param raise_exception: Allow to specify if the action shall assert if not found or not
:type fid: str
:type raise_exception: bool
:returns: retrieved TSUID value or None if not found
:rtype: str
:raises TypeError: if fid is not str
:raises IkatsNotFoundError: no match
"""
check_is_fid_valid(fid=fid)
# Check if fid already associated to an existing tsuid
try:
return self.dm_client.get_tsuid_from_fid(fid=fid)
except IkatsException:
if raise_exception:
raise
return None
def _create_ref(self, fid):
"""
Create a reference of timeseries in temporal data database and associate it to fid
in temporal database for future use.
Shall be used before create method in case of parallel creation of data (import data via spark for example)
:param fid: Functional Identifier of the TS in Ikats
:type fid: str
:returns: A prepared Timeseries object
:rtype: Timeseries
:raises IkatsConflictError: if FID already present in database (use `get` instead of `new`)
"""
check_is_fid_valid(fid, raise_exception=True)
try:
# Check if fid already associated to an existing tsuid
tsuid = self.dm_client.get_tsuid_from_fid(fid=fid)
# if fid already exists in database, raise a conflict exception
raise IkatsConflictError("%s already associated to an existing tsuid: %s" % (fid, tsuid))
except IkatsNotFoundError:
# Creation of a new tsuid
metric, tags = self.tsdb_client.gen_metric_tags()
tsuid = self.tsdb_client.assign_metric(metric=metric, tags=tags)
# finally importing tsuid/fid pair in non temporal database
self.dm_client.import_fid(tsuid=tsuid, fid=fid)
return Timeseries(tsuid=tsuid, fid=fid, api=self.api)
| 1.78125 | 2 |
roc.py | dmitryduev/deep-asteroids | 8 | 12796435 | import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
import tensorflow as tf
from keras.models import model_from_json
import json
from sklearn.metrics import roc_curve, auc, confusion_matrix
import numpy as np
import pandas as pd
from copy import deepcopy
import itertools
from utils import load_data
# import matplotlib
# matplotlib.use('agg')
import matplotlib.pyplot as plt
def load_model_helper(path, model_base_name):
# return load_model(path)
with open(os.path.join(path, f'{model_base_name}.architecture.json'), 'r') as json_file:
loaded_model_json = json_file.read()
m = model_from_json(loaded_model_json)
m.load_weights(os.path.join(path, f'{model_base_name}.weights.h5'))
return m
def thres(v, thr: float = 0.5):
v_ = np.array(deepcopy(v))
v_[v_ >= thr] = 1
v_[v_ < thr] = 0
return v_
if __name__ == '__main__':
tf.keras.backend.clear_session()
# path_base = '/Users/dmitryduev/_caltech/python/deep-asteroids/'
path_base = './'
with open(os.path.join(path_base, 'service/code/config.json')) as f:
config = json.load(f)
# models = config['models']
models = config['models_201901']
model_names = list(models.keys())
path_models = os.path.join(path_base, 'service/models')
c_families = {'rb': '5b96af9c0354c9000b0aea36',
'sl': '5b99b2c6aec3c500103a14de',
'kd': '5be0ae7958830a0018821794',
'os': '5c05bbdc826480000a95c0bf'}
# c_families = {'rb': '5b96af9c0354c9000b0aea36',
# 'sl': '5b99b2c6aec3c500103a14de',
# 'kd': '5be0ae7958830a0018821794'}
# c_families = {'rb': '5b96af9c0354c9000b0aea36'}
path_data = './data'
# mpl colors:
colors = plt.rcParams['axes.prop_cycle'].by_key()['color']
# [u'#1f77b4', u'#ff7f0e', u'#2ca02c', u'#d62728', u'#9467bd',
# u'#8c564b', u'#e377c2', u'#7f7f7f', u'#bcbd22', u'#17becf']
# line styles:
line_styles = ['-', '--', ':']
# thresholds
score_thresholds = [0.99, 0.9, 0.5, 0.1, 0.01]
# ROC
fig = plt.figure(figsize=(14, 5))
fig.subplots_adjust(bottom=0.09, left=0.05, right=0.70, top=0.98, wspace=0.2, hspace=0.2)
lw = 1.6
# ROCs
ax = fig.add_subplot(1, 2, 1)
# zoomed ROCs
ax2 = fig.add_subplot(1, 2, 2)
ax.plot([0, 1], [0, 1], color='#333333', lw=lw, linestyle='--')
ax.set_xlim([0.0, 1.0])
ax.set_ylim([0.0, 1.05])
ax.set_xlabel('False Positive Rate (Contamination)')
ax.set_ylabel('True Positive Rate (Sensitivity)')
# ax.legend(loc="lower right")
# ax.legend(loc="best")
ax.grid(True)
ax2.set_xlim([0.0, .2])
ax2.set_ylim([0.8, 1.0])
ax2.set_xlabel('False Positive Rate (Contamination)')
ax2.set_ylabel('True Positive Rate (Sensitivity)')
# ax.legend(loc="lower right")
# ax2.legend(loc="best")
ax2.grid(True)
# Confusion matrices
fig2 = plt.figure()
fig2.subplots_adjust(bottom=0.06, left=0.01, right=1.0, top=0.93, wspace=0.0, hspace=0.12)
cn = 0
for cfi, c_family in enumerate(c_families):
project_id = c_families[c_family]
print(c_family, project_id)
# load data
x_train, y_train, x_test, y_test, classes = load_data(path=path_data,
project_id=project_id,
binary=True,
grayscale=True,
resize=(144, 144),
test_size=0.1,
verbose=True,
random_state=42)
mn = [m_ for m_ in model_names if c_family in m_]
n_mn = len(mn)
for ii, model_name in enumerate(mn):
print(f'loading model {model_name}: {models[model_name]}')
m = load_model_helper(path_models, models[model_name])
y = m.predict(x_test, batch_size=32, verbose=True)
# for thr in (0.5, 0.9):
for thr in (0.5,):
labels_pred = thres(y, thr=thr)
confusion_matr = confusion_matrix(y_test, labels_pred)
confusion_matr_normalized = confusion_matr.astype('float') / confusion_matr.sum(axis=1)[:, np.newaxis]
print(f'Threshold: {thr}')
print('Confusion matrix:')
print(confusion_matr)
print('Normalized confusion matrix:')
print(confusion_matr_normalized)
fpr, tpr, thresholds = roc_curve(y_test, y)
roc_auc = auc(fpr, tpr)
ax.plot(fpr, tpr, line_styles[ii], color=colors[cfi], lw=lw)
ax2.plot(fpr, tpr, line_styles[ii], color=colors[cfi], lw=lw,
label=f'{model_name} curve (area = {roc_auc:.5f})')
# plot thresholds
for it, thr in enumerate(score_thresholds):
x_ = np.interp(thr, thresholds[::-1], fpr)
y_ = np.interp(thr, thresholds[::-1], tpr)
# print(thr, x_, y_)
if cfi == 0 and ii == 0:
ax.plot(x_, y_, '.', markersize=8, color=colors[-(it + 1)], label=f'Threshold: {1-thr:.2f}')
else:
ax.plot(x_, y_, '.', markersize=8, color=colors[-(it + 1)])
ax2.plot(x_, y_, 'o', markersize=8, color=colors[-(it + 1)])
# plot confusion matrices
ax_ = fig2.add_subplot(3, 2 * len(c_families), ii * 8 + cfi * 2 + 1)
ax2_ = fig2.add_subplot(3, 2 * len(c_families), ii * 8 + cfi * 2 + 2)
ax_.imshow(confusion_matr, interpolation='nearest', cmap=plt.cm.Blues)
ax2_.imshow(confusion_matr_normalized, interpolation='nearest', cmap=plt.cm.Blues)
tick_marks = np.arange(2)
# ax_.set_xticks(tick_marks, tick_marks)
# ax_.set_yticks(tick_marks, tick_marks)
# ax2_.set_xticks(tick_marks, tick_marks)
# ax2_.set_yticks(tick_marks, tick_marks)
#
# ax_.xaxis.set_visible(False)
# ax_.yaxis.set_visible(False)
# ax2_.xaxis.set_visible(False)
# ax2_.yaxis.set_visible(False)
ax_.axis('off')
ax2_.axis('off')
thresh = confusion_matr.max() / 2.
thresh_norm = confusion_matr_normalized.max() / 2.
for i, j in itertools.product(range(confusion_matr.shape[0]), range(confusion_matr.shape[1])):
ax_.text(j, i, format(confusion_matr[i, j], 'd'),
horizontalalignment="center",
color="white" if confusion_matr[i, j] > thresh else "black")
ax2_.text(j, i, format(confusion_matr_normalized[i, j], '.2f'),
horizontalalignment="center",
color="white" if confusion_matr_normalized[i, j] > thresh_norm else "black")
# if ii == 0:
# break
ax.legend(loc='lower right')
ax2.legend(bbox_to_anchor=(1.04, 1), loc="upper left")
fig.savefig(f'./roc_rb_sl_kd.png', dpi=300)
fig2.savefig(f'./cm_rb_sl_kd.png', dpi=300)
plt.show()
| 1.898438 | 2 |
test_field.py | pwcberry/footy-simulator-python | 0 | 12796436 | <gh_stars>0
import unittest
from field import *
from data import Team, Skills
from status import BallStatus, FieldZone, LateralDirection, Possession
class TestField(unittest.TestCase):
def setUp(self):
self.team_a = Team(
# name
"AAA",
# forwards
Skills(0.5, 0.5, 0.5),
# mid_field
Skills(0.5, 0.5, 0.5),
# backs
Skills(0.5, 0.5, 0.5),
# ruck
Skills(0.5, 0.5, 0.5),
)
self.team_b = Team("BBB",
Skills(0.5, 0.5, 0.5),
Skills(0.5, 0.5, 0.5),
Skills(0.5, 0.5, 0.5),
Skills(0.5, 0.5, 0.5),
)
def test_init(self):
f = Field(self.team_a, self.team_b)
self.assertEqual(f.position, Position(FIELD_CENTER_X, FIELD_CENTER_Y))
self.assertEqual(f.ball_status, BallStatus.BOUNCE)
self.assertEqual(f.possession, Possession.IN_CONTENTION)
self.assertEqual(f.teams[0], self.team_a)
self.assertEqual(f.teams[1], self.team_b)
def test_set_position(self):
f = Field(self.team_a, self.team_b)
f.set_position(Position(4, 3))
self.assertEqual(f.position.x, 4)
self.assertEqual(f.position.y, 3)
def test_set_position_x_less_than_minimum(self):
f = Field(self.team_a, self.team_b)
f.set_position(Position(-1, 3))
self.assertEqual(f.position.x, FIELD_MIN_X)
def test_set_position_x_greater_than_maximum(self):
f = Field(self.team_a, self.team_b)
f.set_position(Position(10, 3))
self.assertEqual(f.position.x, FIELD_MAX_X)
def test_set_position_x_less_than_minimum(self):
f = Field(self.team_a, self.team_b)
f.set_position(Position(3, 0))
self.assertEqual(f.position.y, FIELD_MIN_Y)
def test_set_position_x_greater_than_maximum(self):
f = Field(self.team_a, self.team_b)
f.set_position(Position(3, 6))
self.assertEqual(f.position.y, FIELD_MAX_Y)
def test_centre_ball(self):
f = Field(self.team_a, self.team_b)
f.centre_ball()
self.assertEqual(f.position, Position(FIELD_CENTER_X, FIELD_CENTER_Y))
self.assertEqual(f.ball_status, BallStatus.BOUNCE)
self.assertEqual(f.possession, Possession.IN_CONTENTION)
def test_get_field_zone_at_bounce(self):
f = Field(self.team_a, self.team_b)
zone = f.get_field_zone()
self.assertEqual(zone, FieldZone.RUCK)
def test_get_field_zone_when_moving(self):
f = Field(self.team_a, self.team_b)
f.ball_status = BallStatus.MOVING
f.set_position(Position(1, 1))
zone = f.get_field_zone()
self.assertEqual(zone, FieldZone.FORWARDS)
f.set_position(Position(3, 2))
zone = f.get_field_zone()
self.assertEqual(zone, FieldZone.FORWARDS)
f.set_position(Position(4, 3))
zone = f.get_field_zone()
self.assertEqual(zone, FieldZone.MID_FIELD)
f.set_position(Position(6, 4))
zone = f.get_field_zone()
self.assertEqual(zone, FieldZone.MID_FIELD)
f.set_position(Position(7, 5))
zone = f.get_field_zone()
self.assertEqual(zone, FieldZone.BACKS)
f.set_position(Position(9, 4))
zone = f.get_field_zone()
self.assertEqual(zone, FieldZone.BACKS)
def test_get_field_zone_when_ball_is_thrown_in(self):
f = Field(self.team_a, self.team_b)
f.ball_status = BallStatus.THROW_IN
f.set_position(Position(6, 1))
zone = f.get_field_zone()
self.assertEqual(zone, FieldZone.RUCK)
def test_field_status_returns_possession_and_ball_status(self):
f = Field(self.team_a, self.team_b)
fs = f.field_status
self.assertEqual(fs.possession, Possession.IN_CONTENTION)
self.assertEqual(fs.ball_status, BallStatus.BOUNCE)
# Move forwards
def test_move_forward_when_possession_is_home_team_and_in_field_center(self):
f = Field(self.team_a, self.team_b)
f.possession = Possession.HOME_TEAM
f.move_forward()
self.assertEqual(f.position.x, FIELD_CENTER_X - 1)
self.assertEqual(f.position.y, FIELD_CENTER_Y)
def test_move_forward_when_possession_is_home_team_and_at_forward_limit(self):
f = Field(self.team_a, self.team_b)
f.possession = Possession.HOME_TEAM
f.set_position(Position(FIELD_MIN_X, FIELD_CENTER_Y))
f.move_forward()
self.assertEqual(f.position.x, FIELD_MIN_X)
self.assertEqual(f.position.y, FIELD_CENTER_Y)
def test_move_forward_when_possession_is_home_team_and_in_back_field(self):
f = Field(self.team_a, self.team_b)
f.possession = Possession.HOME_TEAM
f.set_position(Position(FIELD_MAX_X, FIELD_CENTER_Y))
f.move_forward()
self.assertEqual(f.position.x, FIELD_MAX_X - 1)
self.assertEqual(f.position.y, FIELD_CENTER_Y)
def test_move_forward_when_possession_is_away_team_and_in_field_center(self):
f = Field(self.team_a, self.team_b)
f.possession = Possession.AWAY_TEAM
f.move_forward()
self.assertEqual(f.position.x, FIELD_CENTER_X + 1)
self.assertEqual(f.position.y, FIELD_CENTER_Y)
def test_move_forward_when_possession_is_away_team_and_at_forward_limit(self):
f = Field(self.team_a, self.team_b)
f.possession = Possession.AWAY_TEAM
f.set_position(Position(FIELD_MAX_X, FIELD_CENTER_Y))
f.move_forward()
self.assertEqual(f.position.x, FIELD_MAX_X)
self.assertEqual(f.position.y, FIELD_CENTER_Y)
def test_move_forward_when_possession_is_away_team_and_in_back_field(self):
f = Field(self.team_a, self.team_b)
f.possession = Possession.AWAY_TEAM
f.set_position(Position(FIELD_MIN_X, FIELD_CENTER_Y))
f.move_forward()
self.assertEqual(f.position.x, FIELD_MIN_X + 1)
self.assertEqual(f.position.y, FIELD_CENTER_Y)
## Move backwards
def test_move_backward_when_possession_is_home_team_and_in_field_center(self):
f = Field(self.team_a, self.team_b)
f.possession = Possession.HOME_TEAM
f.move_backward()
self.assertEqual(f.position.x, FIELD_CENTER_X + 1)
self.assertEqual(f.position.y, FIELD_CENTER_Y)
def test_move_backward_when_possession_is_home_team_and_at_back_limit(self):
f = Field(self.team_a, self.team_b)
f.possession = Possession.HOME_TEAM
f.set_position(Position(FIELD_MAX_X, FIELD_CENTER_Y))
f.move_backward()
self.assertEqual(f.position.x, FIELD_MAX_X)
self.assertEqual(f.position.y, FIELD_CENTER_Y)
def test_move_backward_when_possession_is_home_team_and_in_forward_field(self):
f = Field(self.team_a, self.team_b)
f.possession = Possession.HOME_TEAM
f.set_position(Position(FIELD_MIN_X, FIELD_CENTER_Y))
f.move_backward()
self.assertEqual(f.position.x, FIELD_MIN_X + 1)
self.assertEqual(f.position.y, FIELD_CENTER_Y)
def test_move_backward_when_possession_is_away_team_and_in_field_center(self):
f = Field(self.team_a, self.team_b)
f.possession = Possession.AWAY_TEAM
f.move_backward()
self.assertEqual(f.position.x, FIELD_CENTER_X - 1)
self.assertEqual(f.position.y, FIELD_CENTER_Y)
def test_move_backward_when_possession_is_away_team_and_at_back_limit(self):
f = Field(self.team_a, self.team_b)
f.possession = Possession.AWAY_TEAM
f.set_position(Position(FIELD_MIN_X, FIELD_CENTER_Y))
f.move_backward()
self.assertEqual(f.position.x, FIELD_MIN_X)
self.assertEqual(f.position.y, FIELD_CENTER_Y)
def test_move_backward_when_possession_is_away_team_and_in_forward_field(self):
f = Field(self.team_a, self.team_b)
f.possession = Possession.AWAY_TEAM
f.set_position(Position(FIELD_MAX_X, FIELD_CENTER_Y))
f.move_backward()
self.assertEqual(f.position.x, FIELD_MAX_X - 1)
self.assertEqual(f.position.y, FIELD_CENTER_Y)
# Move laterally - HOME_TEAM
def test_move_laterally_when_possession_is_home_team_and_in_field_center_and_move_left(self):
f = Field(self.team_a, self.team_b)
f.possession = Possession.HOME_TEAM
f.move_laterally(LateralDirection.LEFT)
self.assertEqual(f.position.x, FIELD_CENTER_X)
self.assertEqual(f.position.y, FIELD_CENTER_Y + 1)
def test_move_laterally_when_possession_is_home_team_and_in_field_center_and_move_right(self):
f = Field(self.team_a, self.team_b)
f.possession = Possession.HOME_TEAM
f.move_laterally(LateralDirection.RIGHT)
self.assertEqual(f.position.x, FIELD_CENTER_X)
self.assertEqual(f.position.y, FIELD_CENTER_Y - 1)
def test_move_laterally_when_possession_is_home_team_and_at_left_side_and_move_left(self):
f = Field(self.team_a, self.team_b)
f.possession = Possession.HOME_TEAM
f.set_position(Position(FIELD_CENTER_X, FIELD_MAX_Y))
f.move_laterally(LateralDirection.LEFT)
self.assertEqual(f.position.x, FIELD_CENTER_X)
self.assertEqual(f.position.y, FIELD_MAX_Y)
def test_move_laterally_when_possession_is_home_team_and_at_right_side_and_move_right(self):
f = Field(self.team_a, self.team_b)
f.possession = Possession.HOME_TEAM
f.set_position(Position(FIELD_CENTER_X, FIELD_MIN_Y))
f.move_laterally(LateralDirection.RIGHT)
self.assertEqual(f.position.x, FIELD_CENTER_X)
self.assertEqual(f.position.y, FIELD_MIN_Y)
# Move laterally - AWAY_TEAM
def test_move_laterally_when_possession_is_away_team_and_in_field_center_and_move_left(self):
f = Field(self.team_a, self.team_b)
f.possession = Possession.AWAY_TEAM
f.move_laterally(LateralDirection.LEFT)
self.assertEqual(f.position.x, FIELD_CENTER_X)
self.assertEqual(f.position.y, FIELD_CENTER_Y - 1)
def test_move_laterally_when_possession_is_away_team_and_in_field_center_and_move_right(self):
f = Field(self.team_a, self.team_b)
f.possession = Possession.AWAY_TEAM
f.move_laterally(LateralDirection.RIGHT)
self.assertEqual(f.position.x, FIELD_CENTER_X)
self.assertEqual(f.position.y, FIELD_CENTER_Y + 1)
def test_move_laterally_when_possession_is_away_team_and_at_left_side_and_move_left(self):
f = Field(self.team_a, self.team_b)
f.possession = Possession.AWAY_TEAM
f.set_position(Position(FIELD_CENTER_X, FIELD_MIN_Y))
f.move_laterally(LateralDirection.LEFT)
self.assertEqual(f.position.x, FIELD_CENTER_X)
self.assertEqual(f.position.y, FIELD_MIN_Y)
def test_move_laterally_when_possession_is_away_team_and_at_right_side_and_move_right(self):
f = Field(self.team_a, self.team_b)
f.possession = Possession.AWAY_TEAM
f.set_position(Position(FIELD_CENTER_X, FIELD_MAX_Y))
f.move_laterally(LateralDirection.RIGHT)
self.assertEqual(f.position.x, FIELD_CENTER_X)
self.assertEqual(f.position.y, FIELD_MAX_Y)
if __name__ == "__main__":
unittest.main()
| 2.96875 | 3 |
trustpayments/models/subscription_version.py | TrustPayments/python-sdk | 2 | 12796437 | # coding: utf-8
import pprint
import six
from enum import Enum
class SubscriptionVersion:
swagger_types = {
'activated_on': 'datetime',
'billing_currency': 'str',
'component_configurations': 'list[SubscriptionComponentConfiguration]',
'created_on': 'datetime',
'expected_last_period_end': 'datetime',
'failed_on': 'datetime',
'id': 'int',
'language': 'str',
'linked_space_id': 'int',
'planned_purge_date': 'datetime',
'planned_termination_date': 'datetime',
'product_version': 'SubscriptionProductVersion',
'selected_components': 'list[SubscriptionProductComponent]',
'state': 'SubscriptionVersionState',
'subscription': 'Subscription',
'terminated_on': 'datetime',
'terminating_on': 'datetime',
'termination_issued_on': 'datetime',
'version': 'int',
}
attribute_map = {
'activated_on': 'activatedOn','billing_currency': 'billingCurrency','component_configurations': 'componentConfigurations','created_on': 'createdOn','expected_last_period_end': 'expectedLastPeriodEnd','failed_on': 'failedOn','id': 'id','language': 'language','linked_space_id': 'linkedSpaceId','planned_purge_date': 'plannedPurgeDate','planned_termination_date': 'plannedTerminationDate','product_version': 'productVersion','selected_components': 'selectedComponents','state': 'state','subscription': 'subscription','terminated_on': 'terminatedOn','terminating_on': 'terminatingOn','termination_issued_on': 'terminationIssuedOn','version': 'version',
}
_activated_on = None
_billing_currency = None
_component_configurations = None
_created_on = None
_expected_last_period_end = None
_failed_on = None
_id = None
_language = None
_linked_space_id = None
_planned_purge_date = None
_planned_termination_date = None
_product_version = None
_selected_components = None
_state = None
_subscription = None
_terminated_on = None
_terminating_on = None
_termination_issued_on = None
_version = None
def __init__(self, **kwargs):
self.discriminator = None
self.activated_on = kwargs.get('activated_on', None)
self.billing_currency = kwargs.get('billing_currency', None)
self.component_configurations = kwargs.get('component_configurations', None)
self.created_on = kwargs.get('created_on', None)
self.expected_last_period_end = kwargs.get('expected_last_period_end', None)
self.failed_on = kwargs.get('failed_on', None)
self.id = kwargs.get('id', None)
self.language = kwargs.get('language', None)
self.linked_space_id = kwargs.get('linked_space_id', None)
self.planned_purge_date = kwargs.get('planned_purge_date', None)
self.planned_termination_date = kwargs.get('planned_termination_date', None)
self.product_version = kwargs.get('product_version', None)
self.selected_components = kwargs.get('selected_components', None)
self.state = kwargs.get('state', None)
self.subscription = kwargs.get('subscription', None)
self.terminated_on = kwargs.get('terminated_on', None)
self.terminating_on = kwargs.get('terminating_on', None)
self.termination_issued_on = kwargs.get('termination_issued_on', None)
self.version = kwargs.get('version', None)
@property
def activated_on(self):
"""Gets the activated_on of this SubscriptionVersion.
:return: The activated_on of this SubscriptionVersion.
:rtype: datetime
"""
return self._activated_on
@activated_on.setter
def activated_on(self, activated_on):
"""Sets the activated_on of this SubscriptionVersion.
:param activated_on: The activated_on of this SubscriptionVersion.
:type: datetime
"""
self._activated_on = activated_on
@property
def billing_currency(self):
"""Gets the billing_currency of this SubscriptionVersion.
The subscriber is charged in the billing currency. The billing currency has to be one of the enabled currencies on the subscription product.
:return: The billing_currency of this SubscriptionVersion.
:rtype: str
"""
return self._billing_currency
@billing_currency.setter
def billing_currency(self, billing_currency):
"""Sets the billing_currency of this SubscriptionVersion.
The subscriber is charged in the billing currency. The billing currency has to be one of the enabled currencies on the subscription product.
:param billing_currency: The billing_currency of this SubscriptionVersion.
:type: str
"""
self._billing_currency = billing_currency
@property
def component_configurations(self):
"""Gets the component_configurations of this SubscriptionVersion.
:return: The component_configurations of this SubscriptionVersion.
:rtype: list[SubscriptionComponentConfiguration]
"""
return self._component_configurations
@component_configurations.setter
def component_configurations(self, component_configurations):
"""Sets the component_configurations of this SubscriptionVersion.
:param component_configurations: The component_configurations of this SubscriptionVersion.
:type: list[SubscriptionComponentConfiguration]
"""
self._component_configurations = component_configurations
@property
def created_on(self):
"""Gets the created_on of this SubscriptionVersion.
:return: The created_on of this SubscriptionVersion.
:rtype: datetime
"""
return self._created_on
@created_on.setter
def created_on(self, created_on):
"""Sets the created_on of this SubscriptionVersion.
:param created_on: The created_on of this SubscriptionVersion.
:type: datetime
"""
self._created_on = created_on
@property
def expected_last_period_end(self):
"""Gets the expected_last_period_end of this SubscriptionVersion.
The expected last period end is the date on which the projected end date of the last period is. This is only a projection and as such the actual date may be different.
:return: The expected_last_period_end of this SubscriptionVersion.
:rtype: datetime
"""
return self._expected_last_period_end
@expected_last_period_end.setter
def expected_last_period_end(self, expected_last_period_end):
"""Sets the expected_last_period_end of this SubscriptionVersion.
The expected last period end is the date on which the projected end date of the last period is. This is only a projection and as such the actual date may be different.
:param expected_last_period_end: The expected_last_period_end of this SubscriptionVersion.
:type: datetime
"""
self._expected_last_period_end = expected_last_period_end
@property
def failed_on(self):
"""Gets the failed_on of this SubscriptionVersion.
:return: The failed_on of this SubscriptionVersion.
:rtype: datetime
"""
return self._failed_on
@failed_on.setter
def failed_on(self, failed_on):
"""Sets the failed_on of this SubscriptionVersion.
:param failed_on: The failed_on of this SubscriptionVersion.
:type: datetime
"""
self._failed_on = failed_on
@property
def id(self):
"""Gets the id of this SubscriptionVersion.
The ID is the primary key of the entity. The ID identifies the entity uniquely.
:return: The id of this SubscriptionVersion.
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this SubscriptionVersion.
The ID is the primary key of the entity. The ID identifies the entity uniquely.
:param id: The id of this SubscriptionVersion.
:type: int
"""
self._id = id
@property
def language(self):
"""Gets the language of this SubscriptionVersion.
:return: The language of this SubscriptionVersion.
:rtype: str
"""
return self._language
@language.setter
def language(self, language):
"""Sets the language of this SubscriptionVersion.
:param language: The language of this SubscriptionVersion.
:type: str
"""
self._language = language
@property
def linked_space_id(self):
"""Gets the linked_space_id of this SubscriptionVersion.
The linked space id holds the ID of the space to which the entity belongs to.
:return: The linked_space_id of this SubscriptionVersion.
:rtype: int
"""
return self._linked_space_id
@linked_space_id.setter
def linked_space_id(self, linked_space_id):
"""Sets the linked_space_id of this SubscriptionVersion.
The linked space id holds the ID of the space to which the entity belongs to.
:param linked_space_id: The linked_space_id of this SubscriptionVersion.
:type: int
"""
self._linked_space_id = linked_space_id
@property
def planned_purge_date(self):
"""Gets the planned_purge_date of this SubscriptionVersion.
The planned purge date indicates when the entity is permanently removed. When the date is null the entity is not planned to be removed.
:return: The planned_purge_date of this SubscriptionVersion.
:rtype: datetime
"""
return self._planned_purge_date
@planned_purge_date.setter
def planned_purge_date(self, planned_purge_date):
"""Sets the planned_purge_date of this SubscriptionVersion.
The planned purge date indicates when the entity is permanently removed. When the date is null the entity is not planned to be removed.
:param planned_purge_date: The planned_purge_date of this SubscriptionVersion.
:type: datetime
"""
self._planned_purge_date = planned_purge_date
@property
def planned_termination_date(self):
"""Gets the planned_termination_date of this SubscriptionVersion.
:return: The planned_termination_date of this SubscriptionVersion.
:rtype: datetime
"""
return self._planned_termination_date
@planned_termination_date.setter
def planned_termination_date(self, planned_termination_date):
"""Sets the planned_termination_date of this SubscriptionVersion.
:param planned_termination_date: The planned_termination_date of this SubscriptionVersion.
:type: datetime
"""
self._planned_termination_date = planned_termination_date
@property
def product_version(self):
"""Gets the product_version of this SubscriptionVersion.
:return: The product_version of this SubscriptionVersion.
:rtype: SubscriptionProductVersion
"""
return self._product_version
@product_version.setter
def product_version(self, product_version):
"""Sets the product_version of this SubscriptionVersion.
:param product_version: The product_version of this SubscriptionVersion.
:type: SubscriptionProductVersion
"""
self._product_version = product_version
@property
def selected_components(self):
"""Gets the selected_components of this SubscriptionVersion.
:return: The selected_components of this SubscriptionVersion.
:rtype: list[SubscriptionProductComponent]
"""
return self._selected_components
@selected_components.setter
def selected_components(self, selected_components):
"""Sets the selected_components of this SubscriptionVersion.
:param selected_components: The selected_components of this SubscriptionVersion.
:type: list[SubscriptionProductComponent]
"""
self._selected_components = selected_components
@property
def state(self):
"""Gets the state of this SubscriptionVersion.
:return: The state of this SubscriptionVersion.
:rtype: SubscriptionVersionState
"""
return self._state
@state.setter
def state(self, state):
"""Sets the state of this SubscriptionVersion.
:param state: The state of this SubscriptionVersion.
:type: SubscriptionVersionState
"""
self._state = state
@property
def subscription(self):
"""Gets the subscription of this SubscriptionVersion.
:return: The subscription of this SubscriptionVersion.
:rtype: Subscription
"""
return self._subscription
@subscription.setter
def subscription(self, subscription):
"""Sets the subscription of this SubscriptionVersion.
:param subscription: The subscription of this SubscriptionVersion.
:type: Subscription
"""
self._subscription = subscription
@property
def terminated_on(self):
"""Gets the terminated_on of this SubscriptionVersion.
:return: The terminated_on of this SubscriptionVersion.
:rtype: datetime
"""
return self._terminated_on
@terminated_on.setter
def terminated_on(self, terminated_on):
"""Sets the terminated_on of this SubscriptionVersion.
:param terminated_on: The terminated_on of this SubscriptionVersion.
:type: datetime
"""
self._terminated_on = terminated_on
@property
def terminating_on(self):
"""Gets the terminating_on of this SubscriptionVersion.
:return: The terminating_on of this SubscriptionVersion.
:rtype: datetime
"""
return self._terminating_on
@terminating_on.setter
def terminating_on(self, terminating_on):
"""Sets the terminating_on of this SubscriptionVersion.
:param terminating_on: The terminating_on of this SubscriptionVersion.
:type: datetime
"""
self._terminating_on = terminating_on
@property
def termination_issued_on(self):
"""Gets the termination_issued_on of this SubscriptionVersion.
:return: The termination_issued_on of this SubscriptionVersion.
:rtype: datetime
"""
return self._termination_issued_on
@termination_issued_on.setter
def termination_issued_on(self, termination_issued_on):
"""Sets the termination_issued_on of this SubscriptionVersion.
:param termination_issued_on: The termination_issued_on of this SubscriptionVersion.
:type: datetime
"""
self._termination_issued_on = termination_issued_on
@property
def version(self):
"""Gets the version of this SubscriptionVersion.
The version number indicates the version of the entity. The version is incremented whenever the entity is changed.
:return: The version of this SubscriptionVersion.
:rtype: int
"""
return self._version
@version.setter
def version(self, version):
"""Sets the version of this SubscriptionVersion.
The version number indicates the version of the entity. The version is incremented whenever the entity is changed.
:param version: The version of this SubscriptionVersion.
:type: int
"""
self._version = version
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
elif isinstance(value, Enum):
result[attr] = value.value
else:
result[attr] = value
if issubclass(SubscriptionVersion, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, SubscriptionVersion):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| 1.695313 | 2 |
Janeiro/008-Digitos-Pares.py | brenodt/Desafio-365-dias-programando | 0 | 12796438 | """Exercício 8:
Escreva uma função que receba dois números inteiros (a e b), e retorne uma lista contendo todos os números inteiros cujos dígitos são todos pares (a e b inclusos)"""
def digitos_pares(valor_inicial: int = 0, valor_final: int = 1000):
saida = []
# usando (valor final + 1) pois a função range não inclui o valor final por definição
for numero in range(valor_inicial, valor_final + 1):
digitos_str = str(numero)
check = True # essa variável se manterá como True se todos os digitos forem positivos
for digito in digitos_str:
if int(digito) % 2 != 0: # usando operador modulo pra determinar se o número é par
check = False
break # após o primeiro dígito ímpar, sai do for loop
if check: # se o número é par, adiciona ele à lista
saida.append(numero)
return saida
inicio = 0
fim = 30
print(digitos_pares(inicio, fim))
| 3.96875 | 4 |
trefoil/examples/csv_pivot.py | icpac-igad/trefoil | 9 | 12796439 | """
Example to demonstrate creating a pivot table from the output of zonal stats CLI
"""
import time
import pandas
# Return a pipe-delimited combination of value from every column up through zone
def get_key(row):
key_parts = []
for col in row.keys():
if col == 'zone':
return '|'.join(key_parts)
key_parts.append(str(row[col]))
start = time.time()
infilename = '/tmp/test.csv'
df = pandas.read_csv(infilename)
df['key'] = df.apply(lambda x: get_key(x), axis=1)
sub_df = df[['key', 'zone', 'mean']]
pivot = sub_df.pivot('zone', columns='key')
# Need to manually create the CSV instead of letting pandas do it, due to composite header
# we don't want
with open('/tmp/pivot.csv', 'w') as outfile:
header = ','.join( ['zone'] + pivot.columns.levels[1].tolist())
csv_data = pivot.to_csv(None, index=True, header=False)
outfile.write(header + '\n' + csv_data)
print('Elapsed: {0:.2f}'.format(time.time() - start)) | 3.296875 | 3 |
day18/test/test_main.py | JoseTomasTocino/AdventOfCode2020 | 0 | 12796440 | <filename>day18/test/test_main.py
import logging
import os.path
from day18.code.main import evaluate_expression
logger = logging.getLogger(__name__)
local_path = os.path.abspath(os.path.dirname(__file__))
sample_input = None
def test_sample_input(caplog):
# caplog.set_level(logging.INFO)
assert evaluate_expression("1 + 2 * 3 + 4 * 5 + 6") == 71
assert evaluate_expression("1 + (2 * 3) + (4 * (5 + 6))") == 51
assert evaluate_expression("2 * 3 + (4 * 5)") == 26
assert evaluate_expression("5 + (8 * 3 + 9 + 3 * 4 * 3)") == 437
assert evaluate_expression("5 * 9 * (7 * 3 * 3 + 9 * 3 + (8 + 6 * 4))") == 12240
assert evaluate_expression("((2 + 4 * 9) * (6 + 9 * 8 + 6) + 6) + 2 + 4 * 2") == 13632
def test_sample_input_with_advanced_priorities(caplog):
# caplog.set_level(logging.INFO)
assert evaluate_expression("1 + 2 * 3 + 4 * 5 + 6", use_advanced_precedence=True) == 231
assert evaluate_expression("1 + (2 * 3) + (4 * (5 + 6))", use_advanced_precedence=True) == 51
assert evaluate_expression("2 * 3 + (4 * 5)", use_advanced_precedence=True) == 46
assert evaluate_expression("5 + (8 * 3 + 9 + 3 * 4 * 3)", use_advanced_precedence=True) == 1445
assert evaluate_expression("5 * 9 * (7 * 3 * 3 + 9 * 3 + (8 + 6 * 4))", use_advanced_precedence=True) == 669060
assert evaluate_expression("((2 + 4 * 9) * (6 + 9 * 8 + 6) + 6) + 2 + 4 * 2", use_advanced_precedence=True) == 23340
def test_big_input(caplog):
# caplog.set_level(logging.INFO)
with open(os.path.join(local_path, "input"), "r") as f:
content = f.read()
assert sum(evaluate_expression(x) for x in content.split("\n") if x) == 4696493914530
assert sum(evaluate_expression(x, use_advanced_precedence=True) for x in content.split("\n") if x) == 362880372308125
| 2.609375 | 3 |
210.py | wilbertgeng/LeetCode_exercise | 0 | 12796441 | <filename>210.py
"""210. Course Schedule II"""
class Solution(object):
def findOrder(self, numCourses, prerequisites):
"""
:type numCourses: int
:type prerequisites: List[List[int]]
:rtype: List[int]
"""
## Practice:
graph = collections.defaultdict(list)
num_pre = [0]*numCourses
for after, prev in prerequisites:
graph[prev].append(after)
num_pre[after] += 1
coursesCanBeTaken = []
for i in range(numCourses):
if num_pre[i] == 0:
coursesCanBeTaken.append(i)
res = []
while coursesCanBeTaken:
course = coursesCanBeTaken.pop()
res.append(course)
for c in graph[course]:
num_pre[c] -= 1
if num_pre[c] == 0:
coursesCanBeTaken.append(c)
return res if len(res) == numCourses else []
## bfs
graph = [[] for _ in range(numCourses)]
num_pre = [0]*numCourses
for after, pre in prerequisites:
graph[pre].append(after)
num_pre[after] += 1
coursesCanBeTaken = [i for i in range(numCourses) if num_pre[i] == 0]
res = []
while coursesCanBeTaken:
courseTaken = coursesCanBeTaken.pop(0)
res.append(courseTaken)
for j in graph[courseTaken]:
num_pre[j] -= 1
if num_pre[j] == 0:
coursesCanBeTaken.append(j)
return res if len(res) == numCourses else []
## DFS
graph = [[] for _ in range(numCourses)]
visit = [0 for _ in range(numCourses)]
for a, b in prerequisites:
graph[a].append(b)
result = []
def dfs(i):
if visit[i] == 1:
return True
if visit[i] == -1:
return False
visit[i] = -1
for j in graph[i]:
if not dfs(j):
return False
result.append(i)
visit[i] = 1
return True
for i in range(numCourses):
if not dfs(i):
return []
return result
| 3.546875 | 4 |
fastapi_sso/sso/kakao.py | thdwoqor/fastapi-sso | 0 | 12796442 | <reponame>thdwoqor/fastapi-sso
import os
from typing import Dict
from fastapi_sso.sso.base import OpenID, SSOBase
class KakaoSSO(SSOBase):
provider = "kakao"
scope = ["openid"]
version = "v2"
async def get_discovery_document(self) -> Dict[str, str]:
return {
"authorization_endpoint": f"https://kauth.kakao.com/oauth/authorize?client_id={self.client_secret}&response_type=code&redirect_uri={self.redirect_uri}",
"token_endpoint": f"https://kauth.kakao.com/oauth/token",
"userinfo_endpoint": f"https://kapi.kakao.com/{self.version}/user/me",
}
@classmethod
async def openid_from_response(cls, response: dict) -> OpenID:
return OpenID(display_name=response["properties"]["nickname"], provider=cls.provider)
| 2.53125 | 3 |
aio_background/job.py | anna-money/aio-background | 7 | 12796443 | import abc
import asyncio
from typing import Collection
class Job(abc.ABC):
__slots__ = ()
@property
@abc.abstractmethod
def is_running(self) -> bool:
...
@abc.abstractmethod
async def close(self, *, timeout: float = 0.5) -> bool:
...
class SingleTaskJob(Job):
__slots__ = ("_task",)
def __init__(self, task: asyncio.Task[None]):
self._task = task
@property
def is_running(self) -> bool:
return not self._task.done()
async def close(self, *, timeout: float = 0.5) -> bool:
if self._task.done():
return True
self._task.cancel()
await asyncio.wait({self._task}, timeout=timeout)
return self._task.done()
class CombinedJob(Job):
__slots__ = ("_jobs",)
def __init__(self, jobs: Collection[Job]):
self._jobs = jobs
@property
def is_running(self) -> bool:
return all(job.is_running for job in self._jobs)
async def close(self, *, timeout: float = 0.5) -> bool:
tasks = [asyncio.create_task(job.close(timeout=timeout)) for job in self._jobs]
closed = True
for task in tasks:
closed &= await task
return closed
| 2.734375 | 3 |
ulmo.py | simusid/ulmo | 0 | 12796444 | # provide status of all jobs
import ulmodb
dbname = "ulmodb.db"
db = ulmodb.UlmoDB(dbname)
| 1.25 | 1 |
docs/code/snippet_nmf_fro.py | askerdb/nimfa | 325 | 12796445 | import numpy as np
import nimfa
V = np.random.rand(40, 100)
nmf = nimfa.Nmf(V, seed="nndsvd", rank=10, max_iter=12, update='euclidean',
objective='fro')
nmf_fit = nmf()
| 1.992188 | 2 |
ether/qparser.py | alexin-ivan/ether | 0 | 12796446 | <gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
##############################################################################
from PyQt4 import QtCore
import logging
##############################################################################
class QParser(QtCore.QThread):
def __init__(self, f, parent=None):
super(QParser, self).__init__(parent)
self.f = f
self.logger = logging.getLogger('QParser')
def run(self):
self.logger.debug('started')
self.f()
self.logger.debug('stoped')
| 2.265625 | 2 |
04_chat/client.py | pymug/ARJ_SpoonfeedingSockets_APR2021 | 0 | 12796447 | <filename>04_chat/client.py<gh_stars>0
'''
Modified by <NAME>
<EMAIL>
'''
import socket, threading
def handle_messages(connection: socket.socket):
'''
Receive messages sent by the server and display them to user
'''
while True:
try:
msg = connection.recv(1024)
# If there is no message, there is a chance that connection has closed
# so the connection will be closed and an error will be displayed.
# If not, it will try to decode message in order to show to user.
if msg:
print(msg.decode())
else:
connection.close()
break
except Exception as e:
print(f'Error handling message from server: {e}')
connection.close()
break
def client() -> None:
'''
Main process that start client connection to the server
and handle it's input messages
'''
SERVER_ADDRESS = '127.0.0.1'
SERVER_PORT = 12000
try:
# Instantiate socket and start connection with server
socket_instance = socket.socket()
socket_instance.connect((SERVER_ADDRESS, SERVER_PORT))
# Create a thread in order to handle messages sent by server
threading.Thread(target=handle_messages, args=[socket_instance]).start()
print('Connected to chat!')
# Read user's input until it quit from chat and close connection
while True:
msg = input('> ')
if msg == 'quit':
break
# Parse message to utf-8
socket_instance.send(msg.encode())
# Close connection with the server
socket_instance.close()
except Exception as e:
print(f'Error connecting to server socket {e}')
socket_instance.close()
if __name__ == "__main__":
client() | 3.328125 | 3 |
bin/config-get.py | chrisbrierley/jaspy-manager | 0 | 12796448 | #!/usr/bin/env python
import os
import json
import sys
import argparse
def _find_config_file():
config = 'etc/minicondas.json'
while not os.path.isfile(config):
config = '../{}'.format(config)
if len(config) > 70:
raise Exception('Cannot locate config file "etc/minicondas.json".')
return config
def _get(py_version, miniconda_version, attribute):
config = _find_config_file()
with open(config) as reader:
data = json.load(reader)
if miniconda_version == 'latest':
_all_versions = [i.split('-')[1] for i in data['minicondas'][py_version].keys()]
m_start = 'm' + py_version.replace('py', '')[0]
_av_ints = sorted([[int(i) for i in item.split('.')] for item in _all_versions])
_all_versions = ['.'.join([str(item) for item in items]) for items in _av_ints]
miniconda_version = m_start + '-' + _all_versions[-1]
try:
attr = data['minicondas'][py_version][miniconda_version][attribute]
except:
print('Could not find {} attribute for python version: "{}"'.format(attribute, py_version))
return attr
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("py_version", type=str, help="Python version")
parser.add_argument("attribute", type=str, choices=['url', 'md5', 'short_id'],
help="Attribute")
parser.add_argument('-m', '--miniconda-version', default='latest',
help='Add Miniconda version (or use "latest").',
type=str)
args = parser.parse_args()
print(_get(args.py_version, args.miniconda_version, args.attribute))
| 2.734375 | 3 |
tokenizer.py | datares/recipe-gpt | 2 | 12796449 | from transformers import GPT2Tokenizer
tokenizer = GPT2Tokenizer.from_pretrained('gpt2', bos_token='<|startoftext|>', eos_token='<|endoftext|>', pad_token='<|pad|>')
tokenizer.pad_token = tokenizer.eos_token
| 2.25 | 2 |
ete_component/EteComponent.py | marco-mariotti/treedex | 0 | 12796450 | # AUTO GENERATED FILE - DO NOT EDIT
from dash.development.base_component import Component, _explicitize_args
class EteComponent(Component):
"""An EteComponent component.
Keyword arguments:
- id (string; required):
The ID used to identify this component in Dash callbacks.
- activeClades (list of dicts; optional):
Active clade selections (nodes not saved with name).
- activeLeaves (list of dicts; optional):
Active leaf selections (nodes not saved with name).
- activeNodes (list of dicts; optional):
Active node selections (nodes not saved with name).
- height (string; default "100%"):
iframe height. Default: 100%.
- hover (dict; optional):
Hovered node.
- path (string; default ""):
URL from where ETE's server is running.
- saved (dict; optional):
Saved selections.
`saved` is a dict with strings as keys and values of type dict
with keys:
- color (string; optional)
- name (string; optional)
- selectCommand (string; optional)
- treeid (number; required):
Integer that defines a tree.
- url (string; required):
URL from where ETE's server is running.
- width (string; default "100%"):
iframe width. Default: 100%."""
@_explicitize_args
def __init__(self, id=Component.REQUIRED, url=Component.REQUIRED, path=Component.UNDEFINED, treeid=Component.REQUIRED, width=Component.UNDEFINED, height=Component.UNDEFINED, hover=Component.UNDEFINED, activeNodes=Component.UNDEFINED, activeClades=Component.UNDEFINED, activeLeaves=Component.UNDEFINED, saved=Component.UNDEFINED, **kwargs):
self._prop_names = ['id', 'activeClades', 'activeLeaves', 'activeNodes', 'height', 'hover', 'path', 'saved', 'treeid', 'url', 'width']
self._type = 'EteComponent'
self._namespace = 'ete_component'
self._valid_wildcard_attributes = []
self.available_properties = ['id', 'activeClades', 'activeLeaves', 'activeNodes', 'height', 'hover', 'path', 'saved', 'treeid', 'url', 'width']
self.available_wildcard_properties = []
_explicit_args = kwargs.pop('_explicit_args')
_locals = locals()
_locals.update(kwargs) # For wildcard attrs
args = {k: _locals[k] for k in _explicit_args if k != 'children'}
for k in ['id', 'url', 'treeid']:
if k not in args:
raise TypeError(
'Required argument `' + k + '` was not specified.')
super(EteComponent, self).__init__(**args)
| 2.046875 | 2 |
Subsets and Splits