metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "9bstudios/mecco_passify",
"score": 2
} |
#### File: mecco_passify/lxserv/passify_ManagerApplyDiscard.py
```python
import lx, lxu.command, modo, passify, traceback
class commandClass(lxu.command.BasicCommand):
def __init__(self):
lxu.command.BasicCommand.__init__(self)
self.dyna_Add('mode', lx.symbol.sTYPE_STRING)
self.basic_SetFlags(0, lx.symbol.fCMDARG_QUERY)
def CMD_EXE(self, msg, flags):
mode = self.dyna_String(0)
if mode == passify.APPLY:
passify.safe_edit_apply()
try:
lx.eval('!passify.ManagerAutoAdd 0')
except:
pass
if mode == passify.DISCARD:
passify.safe_edit_discard()
try:
lx.eval('!passify.ManagerAutoAdd 0')
except:
pass
notifier = passify.Notifier()
notifier.Notify(lx.symbol.fCMDNOTIFY_DATATYPE)
def basic_Execute(self, msg, flags):
try:
self.CMD_EXE(msg, flags)
except Exception:
lx.out(traceback.format_exc())
def basic_Enable(self,msg):
try:
return passify.is_enabled('edit.apply')
except Exception:
lx.out(traceback.format_exc())
def arg_UIValueHints(self, index):
return Channel_Notifiers()
lx.bless(commandClass, passify.CMD_MANAGER_APPLY_DISCARD)
class Channel_Notifiers(lxu.command.BasicHints):
def __init__(self):
self._notifiers = [('notifier.editAction','')]
```
#### File: mecco_passify/lxserv/passify_ManagerChannelsRemove.py
```python
import lx, modo, lxu.command, traceback, passify
class myGreatCommand(lxu.command.BasicCommand):
def __init__(self):
lxu.command.BasicCommand.__init__(self)
def cmd_Flags (self):
return lx.symbol.fCMD_MODEL | lx.symbol.fCMD_UNDO
def CMD_EXE(self, msg, flags):
lx.eval('group.edit rem chan pass')
notifier = passify.Notifier()
notifier.Notify(lx.symbol.fCMDNOTIFY_DATATYPE)
def basic_Execute(self, msg, flags):
try:
self.CMD_EXE(msg, flags)
except Exception:
lx.out(traceback.format_exc())
def arg_UIValueHints(self, index):
return Notifiers()
def basic_Enable(self,msg):
group = None
try:
group = lx.eval('group.current group:? type:pass')
except:
return False
if group:
return True
return False
class Notifiers(lxu.command.BasicHints):
def __init__(self):
self._notifiers = [("select.event", "item +l"),("select.event", "channel +l")]
lx.bless(myGreatCommand, passify.CMD_MANAGER_CHANNELSREMOVE)
```
#### File: mecco_passify/lxserv/passify_ManagerCombinePassGroups.py
```python
import modo, lx, lxu, traceback, passify
class CMD(lxu.command.BasicCommand):
def basic_Execute(self, msg, flags):
try:
scene = modo.Scene()
selected_groups = scene.selectedByType(lx.symbol.sITYPE_GROUP)
if selected_groups:
selected_pass_groups = [g for g in selected_groups if g.type == 'render']
if selected_pass_groups:
passify.create_master_pass_group(selected_pass_groups)
else:
modo.dialogs.alert(passify.message('error'), passify.message('select_a_pass_group'))
else:
modo.dialogs.alert(passify.message('error'), passify.message('select_a_pass_group'))
except Exception:
passify.util.debug(traceback.format_exc())
notifier = passify.Notifier()
notifier.Notify(lx.symbol.fCMDNOTIFY_DATATYPE)
def basic_Enable(self,msg):
try:
group = lx.eval('group.current group:? type:pass')
except:
return False
if group:
return True
return False
class Notifiers(lxu.command.BasicHints):
def __init__(self):
self._notifiers = [('notifier.layerAutoAdd',''),('notifier.editAction','')]
lx.bless(CMD, passify.CMD_MANAGER_COMBINE_PASS_GROUPS)
```
#### File: mecco_passify/lxserv/passify_ManagerSelectGroupChannels.py
```python
import lx, lxu.command, passify
class commandClass(lxu.command.BasicCommand):
def basic_Execute(self, msg, flags):
lx.eval('select.drop channel')
try:
lx.eval('group.scan mode:sel type:chan grpType:pass')
lx.eval('tool.set channel.haul on')
except:
pass
def arg_UIValueHints(self, index):
return Notifiers()
def basic_Enable(self,msg):
try:
group = lx.eval('group.current group:? type:pass')
except:
return False
if group:
return True
return False
class Notifiers(lxu.command.BasicHints):
def __init__(self):
self._notifiers = [('notifier.layerAutoAdd',''),('notifier.editAction','')]
lx.bless(commandClass, passify.CMD_MANAGER_HAUL_GROUP_CHANNELS)
```
#### File: mecco_passify/lxserv/passify_SelectionListener.py
```python
import lx, lxifc, lxu, passify
class MySelectionListener(lxifc.SelectionListener):
def __init__ (self):
self.COM_object = lx.object.Unknown (self)
self.lis_svc = lx.service.Listener ()
self.lis_svc.AddListener (self.COM_object)
sel_svc = lx.service.Selection ()
self.cinetype = lxu.lxID4 ('CINE')
def __del__ (self):
self.lis_svc.RemoveListener (self.COM_object)
def notify (self):
notifier = passify.Notifier()
notifier.Notify (lx.symbol.fCMDNOTIFY_DATATYPE)
# notifier.Notify (lx.symbol.fCMDNOTIFY_LABEL)
# notifier.Notify (lx.symbol.fCMDNOTIFY_VALUE)
''' Listener Method Overrides '''
def selevent_Current (self, typ):
if typ == self.cinetype:
self.notify ()
def selevent_Add (self, typ, subtType):
if typ == self.cinetype:
self.notify ()
def selevent_Remove (self, typ, subtType):
if typ == self.cinetype:
self.notify ()
MySelectionListener ()
```
#### File: mecco_passify/lxserv/passify_TogglerRemoveItems.py
```python
import lx, lxu.command, traceback, passify, lxifc
class cmd_remove_from_layer(lxu.command.BasicCommand):
def __init__(self):
lxu.command.BasicCommand.__init__(self)
def cmd_Flags (self):
return lx.symbol.fCMD_MODEL | lx.symbol.fCMD_UNDO
def CMD_EXE(self, msg, flags):
passify.toggler.remove_selected()
notifier = passify.Notifier()
notifier.Notify(lx.symbol.fCMDNOTIFY_DATATYPE)
def basic_Execute(self, msg, flags):
try:
self.CMD_EXE(msg, flags)
except Exception:
lx.out(traceback.format_exc())
def arg_UIValueHints(self, index):
return Notifiers()
def basic_Enable(self,msg):
try:
if passify.get_selected_and_maskable() and passify.fetch_by_tag(passify.TOGGLER_PGRP,type_='renderPassGroups'):
return True
except:
return False
return False
class Notifiers(lxu.command.BasicHints):
def __init__(self):
self._notifiers = [
("select.event", "item +l")
]
lx.bless(cmd_remove_from_layer, passify.CMD_TOGGLER_REMOVE)
```
#### File: mecco_passify/passify/add_items.py
```python
import modo
from util import *
from var import *
def add_items(items_dict):
# debug("add_items", True)
for i in fetch_tagged():
if i.getTags()[TAG] in items_dict:
items_dict[i.getTags()[TAG]]["item"] = i
# debug("fetched tagged items", True)
for k, v in {k:v for k, v in items_dict.iteritems() if "item" not in v}.iteritems():
items_dict[k]["item"] = modo.Scene().addItem(v[TYPE])
items_dict[k]["item"].name = v[NAME]
tag = buildTag(v[TAGS]) if TAGS in v else TAG_DEFAULT
items_dict[k]["item"].setTag(TAG, tag)
if GTYP in v:
items_dict[k]["item"].setTag('GTYP',v[GTYP])
if CHANNELS in v:
for channel, value in v[CHANNELS].iteritems():
items_dict[k]["item"].channel(channel).set(value)
# debug("created all items", True)
for k, v in {k:v for k, v in items_dict.iteritems() if PARENT in v}.iteritems():
items_dict[k]["item"].setParent(fetch_by_tag(v[PARENT]))
for k, v in {k:v for k, v in items_dict.iteritems() if REORDER in v}.iteritems():
reorder(items_dict[k]["item"],v[REORDER])
# debug("parented items", True)
for k, v in {k:v for k, v in items_dict.iteritems() if ITEMGRAPHS in v}.iteritems():
# debug("adding item graph links for " + k, True)
links = v[ITEMGRAPHS] if hasattr(v[ITEMGRAPHS], '__iter__') else [v[ITEMGRAPHS]]
for i in links:
# debug("linking %s to %s via %s" % (items_dict[k]["item"].id, fetch_by_tag(i[1]).id, i[0]), True)
itemGraph = lx.object.ItemGraph(modo.Scene().GraphLookup(i[0]))
itemGraph.AddLink(items_dict[k]["item"],fetch_by_tag(i[1]))
# debug("item graphs added", True)
for k, v in {k:v for k, v in items_dict.iteritems() if GROUPCHANNELS in v}.iteritems():
for channel_tuple in v[GROUPCHANNELS]:
channel = fetch_by_tag(channel_tuple[0]).channel(channel_tuple[1])
if channel not in items_dict[k]["item"].groupChannels:
items_dict[k]["item"].addChannel(channel)
# debug("group channels added", True)
for k, v in {k:v for k, v in items_dict.iteritems() if CHANNELWRITE in v}.iteritems():
for channel_tuple in v[CHANNELWRITE]:
fetch_by_tag(channel_tuple[0]).channel(channel_tuple[1]).set(channel_tuple[2], action=v["item"].name)
safe_edit_apply()
# debug("channels written to passes", True)
# debug("end add_items", True)
return items_dict
``` |
{
"source": "9bstudios/mecco_renderMonkey",
"score": 2
} |
#### File: mecco_renderMonkey/lxserv/rm_pauseToggle.py
```python
import lx
import lxu.command
import traceback
class pauseToggle(lxu.command.BasicCommand):
def __init__(self):
lxu.command.BasicCommand.__init__(self)
self.PAUSED = False
self.dyna_Add('isPaused', lx.symbol.sTYPE_BOOLEAN)
self.basic_SetFlags(0, lx.symbol.fCMDARG_QUERY)
def cmd_Flags (self):
return lx.symbol.fCMD_MODEL | lx.symbol.fCMD_UNDO
def CMD_EXE(self, msg, flags):
pass
def basic_Execute(self, msg, flags):
try:
self.CMD_EXE(msg, flags)
except Exception:
lx.out(traceback.format_exc())
def basic_Enable(self,msg):
return True
def cmd_Query(self, index, vaQuery):
lx.out(self.dyna_Int(0, 0))
return lx.result.OK
lx.bless(pauseToggle, "mecco.renderMonkey.paused")
```
#### File: mecco_renderMonkey/lxserv/rm_range.py
```python
import monkey, modo, lx, lxu, traceback
CMD_NAME = 'renderMonkey.range'
class CMD(lxu.command.BasicCommand):
def __init__(self):
lxu.command.BasicCommand.__init__(self)
self.startPath = None
self.dyna_Add('range', lx.symbol.sTYPE_STRING)
def basic_Execute(self, msg, flags):
try:
frames_string = self.dyna_String(0)
frames_list = monkey.util.frames_from_string(frames_string)
if frames_list:
monkey.render.render_frames(frames_list)
else:
modo.dialogs.alert(
"Invalid Frame Range", "error", 'No frame range recognized in "{}".'.format(frames_string)
)
return lx.symbol.e_FAILED
except:
monkey.util.debug(traceback.format_exc())
return lx.symbol.e_FAILEDs
lx.bless(CMD, CMD_NAME)
``` |
{
"source": "9bstudios/mecco_replay",
"score": 3
} |
#### File: mecco_replay/lxserv/replay_argEditFCL.py
```python
import lx, modo, replay
"""A simple example of a blessed MODO command using the commander module.
https://github.com/adamohern/commander for details"""
class CommandClass(replay.commander.CommanderClass):
"""Inserts a comment-only command object into the `Macro().commands` list."""
def commander_arguments(self):
return [
{
'name': 'query',
'datatype': 'string',
'values_list_type': 'fcl',
'values_list': self.list_commands,
'flags': ['query']
}, {
'name': 'asString',
'label': "As String",
'datatype': 'boolean',
'default': 'false',
'flags': ['optional']
}
]
def commander_notifiers(self):
return [("replay.notifier", "")]
def remove_duplicates(self, seq):
"""Removes duplicate list items while maintaining list order."""
seen = set()
seen_add = seen.add
return [x for x in seq if not (x in seen or seen_add(x))]
def list_commands(self):
nodes = replay.Macro().selected_commands
asString = self.commander_args().get('asString', False)
# Collect args of selected commands
args = []
for node in nodes:
command_obj = node.attributes()
for arg in node.args:
if not command_obj.arg(arg.index).is_hidden(True):
args.append(arg.argName)
# Collect selected args
for arg in replay.Macro().selected_args:
command_obj = arg.parent.attributes()
if not command_obj.arg(arg.index).is_hidden(True):
args.append(arg.argName)
commands_list = []
for arg in self.remove_duplicates(args):
commands_list.append('replay.argEdit%s %s ?' % ("AsString" if asString else "", arg))
return commands_list
lx.bless(CommandClass, 'replay.argEditFCL')
```
#### File: mecco_replay/lxserv/replay_fileOpenExternal.py
```python
import lx, modo, replay
from os.path import basename
import replay.commander as commander
from replay import message as message
"""A simple example of a blessed MODO command using the commander module.
https://github.com/adamohern/commander for details"""
class CommandClass(replay.commander.CommanderClass):
"""Opens the currently-open Macro in the filesystem text editor."""
def commander_execute(self, msg, flags):
# Stop recording
lx.eval('replay.record stop')
file_path = replay.Macro().file_path
if file_path is None:
lx.eval('replay.saveAs')
try:
file_path = replay.Macro().file_path
lx.eval('!!file.open {%s}' % file_path)
lx.eval('replay.fileClose')
except:
modo.dialogs.alert(
message("MECCO_REPLAY", "OPEN_FILE_FAIL"),
message("MECCO_REPLAY", "OPEN_FILE_FAIL_MSG", basename(file_path))
)
def basic_Enable(self, msg):
if not replay.Macro().file_path:
return False
return True
lx.bless(CommandClass, 'replay.fileOpenExternal')
```
#### File: mecco_replay/lxserv/replay_fileRunFromFolderPop.py
```python
import lx, modo, replay, os, traceback
from replay import message as message
"""A simple example of a blessed MODO command using the commander module.
https://github.com/adamohern/commander for details"""
def is_valid_script(path):
if os.path.isfile(path) and os.path.splitext(path)[1].lower() in ['.py', '.lxm', '.pl']:
return True
return False
class CommandClass(replay.commander.CommanderClass):
"""Lists recently-opened macro files stored in a custom user value."""
def commander_arguments(self):
return [
{
'name': 'path',
'datatype': 'string'
}, {
'name': 'query',
'label': self.label,
'datatype': 'string',
'values_list_type': 'popup',
'values_list': self.list_scripts,
'flags': ['query']
}
]
def commander_notifiers(self):
return [("replay.notifier", "")]
def commander_execute(self, msg, flags):
directory = lx.eval('query platformservice alias ? {%s}' % self.commander_arg_value(0))
path = self.commander_arg_value(1)
if not path:
notifier = replay.Notifier()
notifier.Notify(lx.symbol.fCMDNOTIFY_CHANGE_ALL)
return
try:
lx.eval('replay.runScript {%s}' % path)
except:
traceback.print_exc()
def list_scripts(self):
path = lx.eval('query platformservice alias ? {%s}' % self.commander_arg_value(0))
commands_list = []
for sub in [f for f in os.listdir(path) if is_valid_script(os.path.join(path, f))]:
commands_list.append((os.path.join(path, sub), os.path.basename(sub)))
commands_list.append(('', '(%s)' % message("MECCO_REPLAY", "REFRESH")))
return commands_list
def label(self):
return "Run from " + os.path.basename(lx.eval('query platformservice alias ? {%s}' % self.commander_arg_value(0)))
lx.bless(CommandClass, 'replay.fileRunFromFolderPop')
```
#### File: mecco_replay/lxserv/replay_fileSaveAs.py
```python
import lx, modo, replay
from replay import message as message
"""A simple example of a blessed MODO command using the commander module.
https://github.com/adamohern/commander for details"""
class CommandClass(replay.commander.CommanderClass):
"""Saves the current Macro() object to the destination stored in its
`file_path` property. If `file_path` is `None`, prompt for a destination. Unlike
`replay.fileExport`, this command only supports saving to the LXM format."""
_path = lx.eval('query platformservice alias ? {scripts:untitled}')
def commander_arguments(self):
return [
{
'name': 'path',
'datatype': 'string',
'flags': ['optional']
}
]
def commander_execute(self, msg, flags):
# Stop recording
lx.eval('replay.record stop')
macro = replay.Macro()
file_path = None
file_format = macro.file_format
# If there is no associated file path try to get from command line or prompt the user for new destination
if file_path is None:
# Try to get the path from the command line:
file_path = self.commander_arg_value(0)
file_format = "lxm"
# Prompt the user
if not file_path:
file_path = modo.dialogs.customFile(
dtype = 'fileSave',
title = message("MECCO_REPLAY", "SAVE_DIALOG_TITLE"),
names = ('LXM',),
unames = ('LXM file',),
ext=('LXM',),
path = self._path
)
if file_path is None:
return
self.__class__._path = file_path
# And save it for the next time
macro.file_path = file_path
macro.render(file_format, file_path)
lx.eval('!!replay.fileClose')
lx.eval('replay.fileOpen {%s}' % file_path)
# Add to recently-opened
lx.eval('replay.fileOpenAddRecent {%s}' % file_path)
def basic_Enable(self, msg):
if replay.Macro().is_empty:
return False
return True
lx.bless(CommandClass, 'replay.fileSaveAs')
```
#### File: mecco_replay/lxserv/replay_gistAccount.py
```python
import base64
import lx, modo
import replay
from replay import message as message
"""A simple example of a blessed MODO command using the commander module.
https://github.com/adamohern/commander for details"""
class GistAccountClass(replay.commander.CommanderClass):
"""Editor for command argument values. Accepts an argName and a value query.
Designed specifically for use with `replay.argEditFCL`."""
def commander_arguments(self):
return [
{
'name': 'username',
'label': 'User Name',
'datatype': 'string',
'flags': ['query']
}, {
'name': 'password',
'label': 'Password',
'datatype': 'string',
'flags': ['query']
}
]
def cmd_DialogInit(self):
self.attr_SetString(1, "")
def commander_execute(self, msg, flags):
"""Fires whenever the value is updated in the form. Stores changes in the
proper place."""
username = self.commander_args()['username']
password = self.commander_args()['password']
# temporary using base64 instead of encryption
# need to be figure out what library to use and encrypt instead
password = <PASSWORD>(password)
lx.eval("user.value mecco_replay_gist_username {%s}" % username)
lx.eval("user.value mecco_replay_gist_password {%s}" % password)
def cmd_Query(self, index, vaQuery):
"""Fires whenever the value is displayed in the form. Should return the value(s)
to be displayed in the edit field. If multiple values are provided, MODO will
display "mixed..." in the edit field for batch editing."""
# Create the ValueArray object
va = lx.object.ValueArray()
va.set(vaQuery)
if index == 0:
va.AddString(lx.eval("user.value mecco_replay_gist_username ?"))
elif index == 1:
password = lx.eval("user.value mecco_replay_gist_password ?")
va.AddString('*' * len(password))
return lx.result.OK
lx.bless(GistAccountClass, 'replay.gistAccount')
```
#### File: mecco_replay/lxserv/replay_lineSelect.py
```python
import lx, lxifc, modo, replay
from replay import message as message
"""A simple example of a blessed MODO command using the commander module.
https://github.com/adamohern/commander for details"""
class CommandClass(replay.commander.CommanderClass):
"""Selects the first command or block node in the Macro."""
def commander_arguments(self):
return [
{
'name': 'path',
'datatype': 'string',
'default': '0',
'flags':[]
}, {
'name': 'add',
'label': "Add To Selection",
'datatype': 'boolean',
'default': 'false',
'flags': ['optional']
}
]
def commander_execute(self, msg, flags):
path = self.commander_args()['path']
path = [int(idx) for idx in path.split(';')]
add = self.commander_args()['add']
macro = replay.Macro()
if not add:
macro.clear_selection()
try:
macro.node_for_path(path).selected = True
except:
pass
macro.rebuild_view()
macro.unsaved_changes = True
notifier = replay.Notifier()
notifier.Notify(lx.symbol.fCMDNOTIFY_CHANGE_ALL)
lx.bless(CommandClass, 'replay.lineSelect')
```
#### File: mecco_replay/lxserv/replay_mapToKey.py
```python
import lx, modo, replay
from replay import message as message
"""A simple example of a blessed MODO command using the commander module.
https://github.com/adamohern/commander for details"""
class CommandClass(replay.commander.CommanderClass):
"""Maps the currently-open Macro to a key."""
def commander_execute(self, msg, flags):
# Stop recording
lx.eval('replay.record stop')
macro = replay.Macro()
if macro.file_path:
lx.eval('cmds.mapKey command:{replay.runScript {%s}}' % macro.file_path)
else:
default_path = lx.eval('query platformservice alias ? {scripts:}')
# Get the path from the user, if not given as argument:
file_path = modo.dialogs.customFile(
dtype = 'fileOpen',
title = message("MECCO_REPLAY", "KEY_MAPPING_SCRIPT"),
names = macro.import_format_names,
unames = macro.import_format_unames,
patterns = macro.import_format_patterns,
path = default_path
)
if file_path is None:
return
lx.eval('cmds.mapKey command:{@{%s}}' % file_path)
lx.bless(CommandClass, 'replay.mapToKey')
```
#### File: mecco_replay/lxserv/replay_selToBlock.py
```python
import lx, lxifc, modo, replay
"""A simple example of a blessed MODO command using the commander module.
https://github.com/adamohern/commander for details"""
class CommandClass(replay.commander.CommanderClass):
"""Deletes the currently-selected command from the `Macro()` object."""
def commander_execute(self, msg, flags):
macro = replay.Macro()
# collect list of selected paths
paths = list()
for line in macro.selected_descendants:
paths.append(line.path)
target = macro.primary
if target is None:
return
# Register Undo object performing operation and apply it
undo_svc = lx.service.Undo()
if undo_svc.State() != lx.symbol.iUNDO_INVALID:
undo_svc.Apply(UndoToBlock(paths, target.path, ""))
def basic_Enable(self, msg):
if lx.eval('replay.record query:?'):
return False
if len(replay.Macro().selected_descendants) == 0:
return False
for command in replay.Macro().selected_descendants:
if not command.can_add_to_block():
return False
return True
class UndoToBlock(lxifc.Undo):
def __init__(self, paths, target_path, name):
self.m_paths = paths
self.m_paths.sort()
self.m_target_path = target_path
self.m_name = name
def finalize_command(self, macro):
"""Does common command finalizing operations"""
macro.rebuild_view()
replay.Macro().unsaved_changes = True
notifier = replay.Notifier()
notifier.Notify(lx.symbol.fCMDNOTIFY_CHANGE_ALL)
def undo_Forward(self):
macro = replay.Macro()
nodes = list()
# Collectiong nodes since paths will be invalidated during move
for path in self.m_paths:
nodes.append(macro.node_for_path(path))
target_node = macro.add_block(name = self.m_name, comment=[], meta = [], suppress=False, path = self.m_target_path)
# Storing new paths after creating target_node for undo
del self.m_paths[:]
for node in nodes:
self.m_paths.append(node.path)
idx = 0
for node in nodes:
node.path = target_node.path + [idx]
idx += 1
self.m_target_path = target_node.path
self.finalize_command(macro)
def undo_Reverse(self):
macro = replay.Macro()
target_node = macro.node_for_path(self.m_target_path)
nodes = list()
# Each move will validate next index
for path in self.m_paths:
child = target_node.children[0]
nodes.append(child)
child.path = path
self.m_target_path = target_node.path
target_node.delete()
# Restoring initial paths for redo
del self.m_paths[:]
for node in nodes:
self.m_paths.append(node.path)
self.finalize_command(macro)
lx.bless(CommandClass, 'replay.selToBlock')
```
#### File: replay/lumberjack/Color.py
```python
class Color(object):
def __init__(self):
self._internal_rgb = []
self._special = None
# Markup for rich text
def markup(self):
"""Returns the markup string for use in treeview cells."""
if self._special:
return '\x03({}:{})'.format('c', self._special)
if self._internal_rgb:
return '\x03({}:{})'.format('c', self.bitwise_rgb())
return ''
# Bitwise
def bitwise_rgb(self):
"""Returns the bitwise RGB string for the Color object's current internal RGB."""
r, g, b = [int(n * 255) for n in self._internal_rgb]
return str(0x01000000 | ((r << 16) | (g << 8 | b)))
# Set Explicit Color
def set_with_8bit(self, r, g, b):
"""Sets internal RGB with three int values between 0-255."""
self._internal_rgb = [(n / 255) for n in (r, g, b)]
def set_with_float(self, r, g, b):
"""Sets internal RGB with three decimal values 0.0-1.0"""
self._internal_rgb = [r, g, b]
def set_with_hex(self, h):
"""Sets internal RGB using a 16-bit hex code string, e.g. "#ffffff"""
h = h.strip()
if h[0] == '#':
h = h[1:]
r, g, b = h[:2], h[2:4], h[4:]
r, g, b = [int(n, 16) for n in (r, g, b)]
self._internal_rgb = [r, g, b]
# Set Special Color
def special_by_int():
doc = """Certain specific color codes are built-in to MODO for common UI
conventions, such as 4113 for grayed out text. Should be a string. If
unsure, leave this alone."""
def fget(self):
return self._special_by_int
def fset(self, value):
self._special_by_int = value
self._internal_rgb = []
return locals()
special_by_int = property(**special_by_int())
def special_by_name(self, name):
"""Sets special MODO colors by human-readable names."""
if name in ['gray', 'grey']:
# 4113 is a special color for grayed-out text in MODO
self._special = 4113
elif name == 'default':
self._special = None
elif name == 'black':
self._special = None
self._internal_rgb = [0,0,0]
```
#### File: replay/lumberjack/Font.py
```python
class Font(object):
"""Special class for storing and retrieving font flags for use in treeviews."""
_font = None
def markup(self):
"""Returns the markup string for use in treeview cells."""
if self._font:
return '\x03({}:{})'.format('f', self._font)
return ''
def set_bold(self):
self._font = 'FONT_BOLD'
def set_italic(self):
self._font = 'FONT_ITALIC'
def set_normal(self):
self._font = 'FONT_NORMAL'
def set_default(self):
self._font = 'FONT_DEFAULT'
```
#### File: mecco_replay/replay/MacroCommandArg.py
```python
import lx
import re
import lumberjack
class MacroCommandArg(lumberjack.TreeNode):
'''
Contains everything pertaining to a single command argument in the macro.
Each MacroCommand object will create one MacroCommandArg child for each
argument.
Args:
parent (MacroCommand): parent MacroCommand instance
arg_index (int): argument position within command
\**kwargs: varkwargs
Returns:
MacroCommandArg: command argument
'''
def __init__(self, parent, arg_index, **kwargs):
super(self.__class__, self).__init__(state=lumberjack.fTREE_VIEW_ITEM_ATTR, **kwargs)
# We have to manually pass these in from the parent because the `parent`
# parameter won't be operational until the object has finished `__init__()`.
self._parent = parent
self._arg_index = arg_index
# Argument metadata placeholders
self._argUsername = None
self._argType = None
self._argTypeName = None
self._argDesc = None
self._argExample = None
# `command` field displays the actual argument value
self.columns['command'] = lumberjack.TreeValue()
self.columns['command'].input_region = 'MacroCommandArg'
self.columns['command'].value = None
# `enable` field is empty for arguments
self.columns['enable'] = lumberjack.TreeValue()
self.columns['enable'].input_region = None
self.columns['enable'].display_value = ''
# `prefix` field is empty for arguments
self.columns['prefix'] = lumberjack.TreeValue()
self.columns['prefix'].input_region = None
self.columns['prefix'].display_value = ''
# `name` field contains the argument name as a `value`,
# and the argument's username (nice name) as a `display_value`
self.columns['name'] = lumberjack.TreeValue()
self.columns['name'].input_region = 'MacroCommandArg'
self.columns['name'].color.special_by_name('gray')
# Query argument metadata
self.retreive_arg_meta()
# If a command string (it's actually a list of strings) has been passed in, parse it:
if bool(kwargs.get('arg_string')) and \
all(isinstance(elem, basestring) for elem in kwargs.get('arg_string')):
self.parse_string(kwargs.get('arg_string'))
def can_change_suppress(self):
'''
Whether or not the supression of this argument can be changed
Args:
None
Returns:
bool: False
.. todo:
- what is the point of having functions that have no logic and are
not properties?
def __init__(self): self.can_change_suppress = False Done.
- Arman. It is non-trivial in other node class MacroBaseCommand and the caller doesn't know
exact type of object when calling (lineSuppress command implementation). Generally having functions instead of
variables is more flexible since even if there is no logic at this moment it could be added easly later. For example
can_change_color have no logic in any node class but it could be easely added if it will be needed.
'''
return False
def can_change_color(self):
'''
Whether or not the color of this argument's GUI widget can be changed
Args:
None
Returns:
bool: False
'''
return False
def can_add_command(self):
'''
Whether or not a command can be added to this argument
Args:
None
Returns:
bool: False
'''
return False
def can_add_to_block(self):
'''
Whether or not this argument can be added to a block
Args:
None
Returns:
bool: False
'''
return False
def can_copy(self):
'''
Whether or not this argument can be copied
Args:
None
Returns:
bool: False
'''
return False
def can_insert_after(self):
'''
Whether or not this argument can be inserted after another
Args:
None
Returns:
bool: False
'''
return False
def can_delete(self):
'''
Whether or not this argument can be deleted
Args:
None
Returns:
bool: False
'''
return False
def can_change_name(self):
'''
Whether or not this argument's name can be changed
Args:
None
Returns:
bool: False
'''
return False
def value():
doc = '''
dict: local context
The value property is really a proxy for the command cell value.
If you set it to a string, it will try to convert it to to the
appropriate datatype based on argType.
'''
def fget(self):
return self.columns['command'].value
def fset(self, value):
self.columns['command'].value = self.convert_string_to_value(value)
if self.columns['command'].value is None:
self.columns['name'].color.special_by_name('gray')
else:
self.columns['name'].color.special_by_name('default')
return locals()
value = property(**value())
def display_prefix():
'''dict: locdoc = al context
Gets and sets the display of the command prefix for this argument
'''
def fget(self):
return self.columns['prefix'].display_value
def fset(self, value):
self.columns['prefix'].display_value = value
return locals()
display_prefix = property(**display_prefix())
def asString():
doc = '''
dict: local context
Gets and sets whether or not the argument value is displayed as a string
'''
def fget(self):
return (self.columns['prefix'].display_value == "%")
def fset(self, value):
self.columns['prefix'].display_value = "%" if value else ""
return locals()
asString = property(**asString())
def argName():
doc = '''
dict: local context
Gets and sets the name for this argument's name cell value
'''
def fget(self):
return self.columns['name'].value
def fset(self, value):
self.columns['name'].value = value
return locals()
argName = property(**argName())
def argUsername():
doc = '''
dict: local context
Gets and sets the display_value cell, which controls display of the user name
'''
def fget(self):
return self._argUsername
def fset(self, value):
# Since the `display_value` getter will return color and font markup,
# we need to store the username in both the `display_value` for the `name`
# column, and also in an internal `_argUsername` variable.
self._argUsername = value
self.columns['name'].display_value = value
return locals()
argUsername = property(**argUsername())
def argType():
doc = '''
dict: local context
Gets and sets the argType property
'''
def fget(self):
return self._argType
def fset(self, value):
self._argType = value
return locals()
argType = property(**argType())
def argTypeName():
doc = '''
dict: local context
Gets and sets the argTypeName property
'''
def fget(self):
return self._argTypeName
def fset(self, value):
self._argTypeName = value
return locals()
argTypeName = property(**argTypeName())
def argDesc():
doc = '''
dict: local context
Gets and sets the argDesc property
'''
def fget(self):
return self._argDesc
def fset(self, value):
self._argDesc = value
return locals()
argDesc = property(**argDesc())
def argExample():
doc = '''
dict: local context
Gets and sets the argExample property
'''
def fget(self):
return self._argExample
def fset(self, value):
self._argExample = value
return locals()
argExample = property(**argExample())
def canEval(self):
'''
Whether or the argument can be evaluated
Args:
None
Returns:
bool: False
'''
return False
def retreive_arg_meta(self):
'''
Retrieve a list of arguments and datatypes from modo's commandservice.
See http://sdk.luxology.com/wiki/Commandservice#command.argNames
Args:
None
Returns:
None
Example:
[
{
'argName': 'argname',
'argUsername': 'Argument Name',
'argType': 0, # 0 for generic objects, 1 for integers, 2 for floats an 3 for strings
'argTypeName': 'boolean',
'argDesc': 'What the argument does.',
'argExample': 'Example if available.'
'argValue': 'Value of the argument.'
}
]
'''
base_command = self.parent.command
arg_index = self._arg_index
if not base_command:
raise Exception("Invalid parent command.")
return
# Names of the arguments for the current command.
if not lx.evalN("query commandservice command.argNames ? {%s}" % base_command):
raise Exception("Parent command has no args. Why do I exist? (Big Questions In Life)")
return
# Unlike other metadata, we store these two directly inside the value objects for the columns.
values_list = lx.evalN("query commandservice command.argNames ? {%s}" % base_command)
self.argName = values_list[arg_index]
values_list = lx.evalN("query commandservice command.argUsernames ? {%s}" % base_command)
self.argUsername = "%s \x03(c:4113)(%s)" % (values_list[arg_index], self.argName)
# These are the ones I care about for now. If there are others later, we can add them.
query_terms = [
'argTypes',
'argTypeNames',
'argDescs',
'argExamples'
]
# The list of query_terms is arbitrary. I'm just grabbing everything I think is important.
for term in query_terms:
# Remove the last character from the term to make it singular (argNames becomes argName)
property_name = term[:-1]
# Get the full list of values (for all args)
# Note the use of `lx.evalN` as opposed to the normal `lx.eval`. We need to be certain
# that we always receive a list in response, even if the list length is 1.
values_list = lx.evalN('query commandservice command.%s ? {%s}' % (term, base_command))
# Run the query.
setattr(self, property_name, values_list[arg_index])
def parse_string(self, command_string):
'''
Parse a single modo argument string into its constituent parts and stores
it for display in the treeview.
Args:
command_string (str): modo argument string
Returns:
None
Raises:
ValueError
'''
# Get the argument value and, if given, its name:
full_argument = re.search(r'(\S+):(\S+)', arg)
if full_argument:
arg_name = full_argument.group(1)
# Check if the name of the argument is correct:
if arg_name in [self.args[i]['argNames'] for i in range(len(args))]:
arg_number = [self.args[i]['argNames'] for i in range(len(args))].index(arg_name)
else:
raise ValueError("Wrong argument name.")
arg_value = full_argument.group(2)
else:
arg_value = arg
# Clean the argument value of "", '' and {} wraps:
if arg_value[0] == '"' or arg_value[0] == "'" or arg_value[0] == '{':
arg_value = arg_value[1:-1]
# Set the value of the argument:
self._args[arg_number]['argValues'] = arg_value
def convert_string_to_value(self, arg_value):
'''
Convenience method for converting strings to argument values
Args:
arg_value (object): argument value
Returns:
str or None
'''
if arg_value is None:
return None
return str(arg_value)
``` |
{
"source": "9bstudios/mecco_solo",
"score": 2
} |
#### File: mecco_solo/solo/util.py
```python
import lx, lxu
from Notifier import Notifier
from MyOnIdleVisitor import MyOnIdleVisitor
HIDDEN_GROUP_NAME = "solo_hidden"
def queue_idle_visitor(todo_function, *args, **kwargs):
visitor = MyOnIdleVisitor(todo_function, *args, **kwargs)
if visitor.arm():
pfm_svc = lx.service.Platform()
pfm_svc.DoWhenUserIsIdle(visitor, lx.symbol.fUSERIDLE_CMD_STACK_EMPTY)
def get_selection_mode():
"""Returns the current selection mode as any of the following strings:
vertex;edge;polygon;item;pivot;center;ptag
"""
modes = 'vertex;edge;polygon;item;pivot;center;ptag'
for mode in modes.split(';'):
if lx.eval('select.typeFrom %s;%s ?' % (mode, modes)):
return mode
return False
class Singleton(type):
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
class SceneStatuses(object):
__metaclass__ = Singleton
def __init__(self):
self.active_scenes = set()
def any_solo_active_scene(self):
return len(self.active_scenes) != 0
def current_scene_is_solo_active(self):
scene = lx.object.Scene(lxu.select.SceneSelection().current())
return scene.__peekobj__() in self.active_scenes
def set_current_scene_active(self, flag):
scene = lx.object.Scene(lxu.select.SceneSelection().current())
if flag:
self.active_scenes.add(scene.__peekobj__())
else:
self.active_scenes.remove(scene.__peekobj__())
def handle_scene_delete(self, scene):
scene = lx.object.Scene(scene)
try:
self.active_scenes.remove(scene.__peekobj__())
except:
pass
def handle_scene_create(self, scene):
scene = lx.object.Scene(scene)
try:
group = scene.ItemLookup(HIDDEN_GROUP_NAME)
self.active_scenes.add(scene.__peekobj__())
except:
pass
``` |
{
"source": "9bstudios/mecco_tagger",
"score": 2
} |
#### File: mecco_tagger/lxserv/tagger_pTag_quickSelect_Popup.py
```python
import lx, lxifc, lxu.command, tagger
CMD_NAME = tagger.CMD_PTAG_QUICK_SELECT_POPUP
class CommandClass(tagger.CommanderClass):
#_commander_default_values = []
def commander_arguments(self):
return [
{
'name': tagger.TAGTYPE,
'label': tagger.LABEL_TAGTYPE,
'datatype': 'string',
'default': tagger.MATERIAL,
'values_list_type': 'popup',
'values_list': tagger.POPUPS_TAGTYPES,
'flags': []
}, {
'name': tagger.TAG,
'label': self.tag_label,
'datatype': 'string',
'default': '',
'values_list_type': 'popup',
'values_list': self.list_tags,
'flags': ['query'],
}
]
def commander_execute(self, msg, flags):
if not self.commander_arg_value(1):
return
tagType = self.commander_arg_value(0, tagger.MATERIAL)
tag = self.commander_arg_value(1)
args = tagger.build_arg_string({
tagger.TAGTYPE: tagType,
tagger.TAG: tag
})
lx.eval(tagger.CMD_SELECT_ALL_BY_DIALOG + args)
notifier = tagger.Notifier()
notifier.Notify(lx.symbol.fCMDNOTIFY_DATATYPE)
def tag_label(self):
tagType = self.commander_arg_value(0, tagger.MATERIAL)
label = tagger.convert_to_tagType_label(tagType)
return "%s %s" % (tagger.LABEL_SELECT_TAG, label)
def list_tags(self):
tagType = self.commander_arg_value(0, tagger.MATERIAL)
i_POLYTAG = tagger.convert_to_iPOLYTAG(tagType)
tags = tagger.scene.all_tags_by_type(i_POLYTAG)
return tags
def commander_notifiers(self):
return [('notifier.editAction',''), ("select.event", "item +ldt"), ("tagger.notifier", "")]
lx.bless(CommandClass, CMD_NAME)
```
#### File: mecco_tagger/lxserv/tagger_pTagReplace.py
```python
import lx, lxu.command, lxifc, traceback, modo, tagger
CMD_NAME = tagger.CMD_PTAG_REPLACE
DEFAULTS = [tagger.MATERIAL, '', '']
def selected_tag(tagType):
active_layers = tagger.items.get_active_layers()
polys = []
if active_layers:
for layer in active_layers:
polys.extend(layer.geometry.polygons.selected)
if polys:
return polys[0].tags()[tagType]
elif not polys:
return DEFAULTS[1]
elif not active:
return DEFAULTS[1]
class CommandClass(tagger.CommanderClass):
#_commander_default_values = []
def commander_arguments(self):
return [
{
'name': tagger.TAGTYPE,
'label': tagger.LABEL_TAGTYPE,
'datatype': 'string',
'default': tagger.MATERIAL,
'values_list_type': 'popup',
'values_list': tagger.POPUPS_TAGTYPES,
'flags': [],
}, {
'name': tagger.REPLACETAG,
'label': tagger.LABEL_REPLACE_TAG,
'datatype': 'string',
'default': selected_tag(tagger.MATERIAL),
'flags': [],
'values_list_type': 'sPresetText',
'values_list': tagger.scene.all_tags
}, {
'name': tagger.WITHTAG,
'label': tagger.LABEL_WITH_TAG,
'datatype': 'string',
'default': "",
'flags': ['optional'],
'values_list_type': 'sPresetText',
'values_list': tagger.scene.all_tags
}, {
'name': tagger.IGNORE_CASE,
'label': tagger.LABEL_IGNORE_CASE,
'datatype': 'boolean',
'value': False,
'flags': ['optional']
}, {
'name': tagger.REGEXP,
'label': tagger.LABEL_REGEXP,
'datatype': 'boolean',
'value': False,
'flags': ['optional']
}
]
def commander_execute(self, msg, flags):
tagType = self.commander_arg_value(0)
replaceTag = self.commander_arg_value(1)
withTag = self.commander_arg_value(2)
ignoreCase = self.commander_arg_value(3)
regexp = self.commander_arg_value(4)
if not withTag:
withTag = None
hitcount = tagger.scene.replace_tag(tagType, replaceTag, withTag, ignoreCase, regexp)
if hitcount == 0:
try:
modo.dialogs.alert(
tagger.DIALOGS_TAG_NOT_FOUND[0],
tagger.DIALOGS_TAG_NOT_FOUND[1] % (tagType, replaceTag)
)
except:
pass
elif hitcount >= 1:
try:
modo.dialogs.alert(
tagger.DIALOGS_TAG_REPLACED[0],
tagger.DIALOGS_TAG_REPLACED[1] % (hitcount, tagType, replaceTag)
)
except:
pass
notifier = tagger.Notifier()
notifier.Notify(lx.symbol.fCMDNOTIFY_DATATYPE)
lx.bless(CommandClass, CMD_NAME)
```
#### File: mecco_tagger/lxserv/tagger_setMaterial_existing_Popup.py
```python
import lx, lxifc, lxu.command, tagger
CMD_NAME = tagger.CMD_SET_EXISTING_POPUP
def tagsHack():
tags = tagger.items.get_all_masked_tags()
timer = tagger.DebugTimer()
hackedTags = []
if not tags:
hackedTags.append((None, tagger.LABEL_NONE))
for tag in sorted(tags):
tag_internal = tagger.TAGTYPE_SEP.join((tag[0], tag[1]))
tag_user = "%s (%s)" % (tag[1], tag[0])
hackedTags.append((tag_internal, tag_user))
timer.end()
return hackedTags
class CommandClass(tagger.CommanderClass):
#_commander_default_values = []
def commander_arguments(self):
return [
{
'name': tagger.TAG,
'label': tagger.LABEL_TAG_WITH_MASKED,
'datatype': 'string',
'default': '',
'values_list_type': 'popup',
'values_list': tagsHack,
'flags': ['query'],
}, {
'name': tagger.SCOPE,
'label': tagger.LABEL_SCOPE,
'datatype': 'string',
'default': tagger.SCOPE_SELECTED,
'flags': ['optional'],
'values_list_type': 'sPresetText',
'values_list': tagger.POPUPS_SCOPE
}
]
def commander_execute(self, msg, flags):
if not self.commander_arg_value(0):
return
tag = self.commander_arg_value(0).split(tagger.TAGTYPE_SEP)
connected = self.commander_arg_value(1)
args = tagger.build_arg_string({
tagger.TAGTYPE: tag[0],
tagger.TAG: tag[1],
tagger.SCOPE: connected
})
lx.eval(tagger.CMD_PTAG_SET + args)
notifier = tagger.Notifier()
notifier.Notify(lx.symbol.fCMDNOTIFY_DATATYPE)
def commander_notifiers(self):
return [('notifier.editAction',''), ("select.event", "item +ldt"), ("tagger.notifier", "")]
lx.bless(CommandClass, CMD_NAME)
```
#### File: mecco_tagger/lxserv/tagger_setMaterial_items.py
```python
import lx, lxu, modo, tagger, traceback
NAME_CMD = tagger.CMD_SET_ITEM
class CommandClass(tagger.CommanderClass):
#_commander_default_values = []
def commander_arguments(self):
return [
{
'name': tagger.PRESET,
'label': tagger.LABEL_PRESET,
'datatype': 'string',
'default': tagger.RANDOM,
'values_list_type': 'popup',
'values_list': tagger.presets.presets_popup(),
'flags': ['optional']
}, {
'name': tagger.WITH_EXISTING,
'label': tagger.LABEL_WITH_EXISTING,
'datatype': 'string',
'default': tagger.KEEP,
'values_list_type': 'popup',
'values_list': tagger.POPUPS_WITH_EXISTING,
'flags': ['optional']
}
]
def commander_execute(self, msg, flags):
preset = self.commander_arg_value(0)
withExisting = self.commander_arg_value(1)
if preset == tagger.RANDOM:
preset = None
if not withExisting:
withExisting = tagger.KEEP
items = tagger.items.get_selected_and_maskable()
for item in items:
existing_masks = tagger.shadertree.get_masks(item)
if existing_masks and withExisting == 'use':
return
elif existing_masks and withExisting == 'remove':
tagger.shadertree.seek_and_destroy(item)
elif existing_masks and withExisting == 'consolidate':
tagger.shadertree.consolidate(item)
mask = tagger.shadertree.build_material( item, preset = preset )
tagger.shadertree.move_to_base_shader(mask)
notifier = tagger.Notifier()
notifier.Notify(lx.symbol.fCMDNOTIFY_DATATYPE)
lx.bless(CommandClass, NAME_CMD)
```
#### File: mecco_tagger/lxserv/tagger_setMaterial_itemsRemove.py
```python
import lx, lxu, modo, tagger, traceback
NAME_CMD = tagger.CMD_SET_ITEM_REMOVE
class CommandClass(tagger.CommanderClass):
#_commander_default_values = []
def commander_execute(self, msg, flags):
items = tagger.items.get_selected_and_maskable()
tagger.shadertree.seek_and_destroy(items)
notifier = tagger.Notifier()
notifier.Notify(lx.symbol.fCMDNOTIFY_DATATYPE)
lx.bless(CommandClass, NAME_CMD)
```
#### File: mecco_tagger/lxserv/tagger_shaderTree_consolidateByColor.py
```python
import lx, modo, lxifc, lxu.command, tagger
CMD_NAME = tagger.CMD_SHADERTREE_CONSOLIDATE_BY_COLOR
def color_convert(color):
return [i*256 for i in color]
class CommandClass(tagger.CommanderClass):
#_commander_default_values = []
def commander_execute(self, msg, flags):
all_masks = modo.Scene().items('mask')
target_masks = []
unique_colors = set()
consolidation_masks = []
for mask in all_masks:
if mask.parent.id != modo.Scene().renderItem.id:
continue
if mask.channel(lx.symbol.sICHAN_MASK_PTYP).get() not in ('Material', ''):
continue
if len(mask.children()) != 1:
continue
material = mask.children()[0]
if material.type != 'advancedMaterial':
continue
target_masks.append({"mask_item": mask})
target_masks[-1]["material_item"] = material
target_masks[-1]["color"] = material.channel('diffCol').get()
target_masks[-1]["pTag"] = target_masks[-1]["mask_item"].channel(lx.symbol.sICHAN_MASK_PTAG).get()
unique_colors.add(target_masks[-1]["color"])
for c in unique_colors:
consolidation_masks.append({"color": c})
consolidation_masks[-1]["colorname"] = tagger.colors.ColorNames.findNearestWebColorName(color_convert(c))
consolidation_masks[-1]["hitlist"] = [m for m in target_masks if m["color"] == c]
for c in consolidation_masks:
c["pTag"] = c["colorname"]
all_existing_tags = tagger.scene.all_tags_by_type(lx.symbol.i_POLYTAG_MATERIAL)
n = 0
while c["pTag"] in all_existing_tags:
n += 1
c["pTag"] = "_".join((c["colorname"], str(n)))
c["consolidation_mask"] = tagger.shadertree.build_material(pTag = c["pTag"])
c["consolidation_mask"].children()[0].channel('diffCol').set(c["color"])
# The material.reassign command expects no textureLayers to be selected.
to_restore = [i for i in modo.Scene().selected if i.superType == 'textureLayer']
for textureLayer in to_restore:
textureLayer.deselect()
for hit in c["hitlist"]:
tagger.safe_removeItems([hit["mask_item"]], True)
lx.eval('!material.reassign {%s} %s' % (hit["pTag"], c["pTag"]))
for textureLayer in to_restore:
try:
textureLayer.select()
except:
pass
lx.bless(CommandClass, CMD_NAME)
```
#### File: mecco_tagger/lxserv/tagger_shaderTree_maskUnmasked.py
```python
import lx, lxu.command, lxifc, traceback, modo, tagger
CMD_NAME = tagger.CMD_SHADERTREE_MASK_UNMASKED
class CommandClass(tagger.CommanderClass):
#_commander_default_values = []
def commander_arguments(self):
return [
{
'name': tagger.TAGTYPE,
'label': tagger.LABEL_TAGTYPE,
'datatype': 'string',
'default': tagger.MATERIAL,
'values_list_type': 'popup',
'values_list': tagger.POPUPS_TAGTYPES,
'flags': [],
}
]
def commander_execute(self, msg, flags):
tagType = self.commander_arg_value(0)
i_POLYTAG = tagger.convert_to_iPOLYTAG(tagType)
counter = 0
for pTag in tagger.scene.all_tags_by_type(i_POLYTAG):
if not tagger.shadertree.get_masks( pTags = { pTag: i_POLYTAG }):
new_mask = tagger.shadertree.build_material(i_POLYTAG = i_POLYTAG, pTag = pTag)
counter += 1
try:
modo.dialogs.alert(
tagger.DIALOGS_MASKED_TAGS_COUNT[0],
tagger.DIALOGS_MASKED_TAGS_COUNT[1] % counter
)
except:
pass
notifier = tagger.Notifier()
notifier.Notify(lx.symbol.fCMDNOTIFY_DATATYPE)
lx.bless(CommandClass, CMD_NAME)
```
#### File: mecco_tagger/tagger/scene.py
```python
import modo, lx
import util
import re
from debug import *
from var import *
def add_pTag_to_recent(pTag, tagType):
old_tag = modo.Scene().sceneItem.getTags().get(SCENE_TAG_RECENT)
if old_tag:
tags_list = old_tag.split(TAG_SEP)
else:
tags_list = []
tags_list = [TAGTYPE_SEP.join((tagType, pTag))] + tags_list
# removes duplicates while maintainint list order
tags_list = [ii for n,ii in enumerate(tags_list) if ii not in tags_list[:n]]
if len(tags_list) > SCENE_TAG_RECENT_MAX:
tags_list = tags_list[:SCENE_TAG_RECENT_MAX]
modo.Scene().sceneItem.setTag(SCENE_TAG_RECENT, TAG_SEP.join(tags_list))
def get_recent_pTags():
tags = modo.Scene().sceneItem.getTags().get(SCENE_TAG_RECENT)
if tags:
tags = tags.split(TAG_SEP)
tags = [tuple(i.split(TAGTYPE_SEP)) for i in tags]
return tags
def all_tags_by_type(i_POLYTAG):
timer = DebugTimer()
tags = set()
for m in modo.Scene().meshes:
n = m.geometry.internalMesh.PTagCount(i_POLYTAG)
for i in xrange(n):
tags.add(m.geometry.internalMesh.PTagByIndex(i_POLYTAG, i))
timer.end()
return sorted(list(tags))
def all_tags():
timer = DebugTimer()
tags = set()
for m in modo.Scene().meshes:
for i_POLYTAG in (lx.symbol.i_POLYTAG_MATERIAL, lx.symbol.i_POLYTAG_PICK, lx.symbol.i_POLYTAG_PART):
n = m.geometry.internalMesh.PTagCount(i_POLYTAG)
for i in range(n):
tags.add(m.geometry.internalMesh.PTagByIndex(i_POLYTAG, i))
timer.end()
return sorted(list(tags))
def compareRegexp(pattern, str, ignoreCase, regexp):
if ignoreCase:
if regexp:
return re.match(pattern, str, re.IGNORECASE) is not None
else:
return pattern.lower() == str.lower()
else:
if regexp:
return re.match(pattern, str) is not None
else:
return pattern == str
# Replace all leftmost occurrences of 'search' by 'replace'. Function is case sensitive
def replaceStringCase(string, search, replace):
return string.replace(search, replace)
# Replace all leftmost occurrences of regexp pattern 'search' by 'replace'. Function is case sensitive
def replaceRegexpCase(string, search, replace):
pattern = re.compile(search)
return pattern.sub(replace, string)
# Replace all leftmost occurrences of 'search' by 'replace'. Function ignores case
def replaceStringIgnoreCase(string, search, replace):
# There is no standard Python function for this. Have to implement it.
idx = 0
while idx < len(string):
pos = string.lower().find(search.lower(), idx)
if pos == -1:
break
string = string[:pos] + replace + string[pos + len(search):]
idx = pos + len(replace)
return string
# Replace all leftmost occurrences of regexp pattern 'search' by 'replace'. Function ignores case
def replaceRegexpIgnoreCase(string, search, replace):
pattern = re.compile(search, re.IGNORECASE)
return pattern.sub(replace, string)
def meshes_with_pTag(pTag, i_POLYTAG):
meshes = set()
for m in modo.Scene().meshes:
tags = set()
n = m.geometry.internalMesh.PTagCount(i_POLYTAG)
for i in range(n):
tags.add(m.geometry.internalMesh.PTagByIndex(i_POLYTAG, i))
if pTag in tags:
meshes.add(m)
return list(meshes)
def meshes_with_pTag_Regexp(pTag, i_POLYTAG, ignoreCase, regexp):
meshes = set()
for m in modo.Scene().meshes:
tags = set()
n = m.geometry.internalMesh.PTagCount(i_POLYTAG)
for i in range(n):
tags.add(m.geometry.internalMesh.PTagByIndex(i_POLYTAG, i))
for tag in tags:
if compareRegexp(pTag, tag, ignoreCase, regexp):
meshes.add(m)
break
return list(meshes)
def replace_tag(tagType, replaceTag, withTag, ignoreCase, regexp):
i_POLYTAG = util.convert_to_iPOLYTAG(tagType)
meshes = meshes_with_pTag_Regexp(replaceTag, i_POLYTAG, ignoreCase, regexp)
hitcount = 0
for mesh in meshes:
with mesh.geometry as geo:
hitlist = set()
for poly in geo.polygons:
if tagType in [MATERIAL, PART]:
if compareRegexp(replaceTag, poly.getTag(i_POLYTAG), ignoreCase, regexp):
hitlist.add(poly)
hitcount += 1
elif tagType == PICK:
if not poly.getTag(i_POLYTAG):
continue
pickTags = set(poly.getTag(i_POLYTAG).split(";"))
for tag in pickTags:
if compareRegexp(replaceTag, tag, ignoreCase, regexp):
hitlist.add(poly)
hitcount += 1
break
# Building replace function based of ignoreCase and regexp flags
if ignoreCase:
if regexp:
replace = replaceRegexpIgnoreCase
else:
replace = replaceStringIgnoreCase
else:
if regexp:
replace = replaceRegexpCase
else:
replace = replaceStringCase
with mesh.geometry as geo:
for poly in hitlist:
if tagType in [MATERIAL, PART]:
poly.setTag(i_POLYTAG, replace(poly.getTag(i_POLYTAG), replaceTag, withTag))
elif tagType == PICK:
pickTags = set(poly.getTag(i_POLYTAG).split(";"))
if withTag:
newTags = map(lambda tag: replace(tag, replaceTag, withTag) if compare(replaceTag, tag) else tag, pickTags)
else:
newTags = list()
for tag in pickTags:
if not compareRegexp(replaceTag, tag):
newTags.append(tag)
poly.setTag(i_POLYTAG, ";".join(newTags))
return hitcount
```
#### File: mecco_tagger/tagger/TaggerPresetPaths.py
```python
class TaggerPresetPaths(object):
_preset_paths = []
def __init__(self):
pass
@classmethod
def add_path(cls, path):
"""An array of preset paths that will be appended to the Tagger material
preset popup."""
cls._preset_paths.append(path)
def paths():
doc = "Returns the list of registered search paths."
def fget(self):
return self._preset_paths
return locals()
paths = property(**paths())
``` |
{
"source": "9bstudios/mecco_Zen",
"score": 2
} |
#### File: mecco_Zen_10/lxserv/zen_GroupAdd.py
```python
import lx, lxifc, lxu.command, modo
from zen import CommanderClass
CMD_NAME = 'zen.groupAdd_Popup'
NEW = ('new', '(new group)')
class CommandClass(CommanderClass):
def commander_arguments(self):
return [
{
'name': 'group',
'label': 'Add to Group',
'datatype': 'string',
'default': '',
'values_list_type': 'popup',
'values_list': self.list_groups,
'flags': ['query'],
}
]
def commander_execute(self, msg, flags):
if not self.commander_arg_value(0):
return
group_id = self.commander_arg_value(0)
if group_id == NEW[0]:
lx.eval('?group.create {} mode:selItems')
return
else:
group = modo.Scene().item(group_id)
itemGraph = lx.object.ItemGraph(modo.Scene().GraphLookup('itemGroups'))
for item in [i for i in modo.Scene().selected if i.superType != None]:
itemGraph.AddLink(group,item)
modo.dialogs.alert("Added Items to Group", "Added %s selected items to '%s'." % (len(modo.Scene().selected), group.name))
def list_groups(self):
groups_list = [NEW]
groups_list += sorted([(i.id, i.name) for i in modo.Scene().iterItemsFast('group')], key=lambda x: x[1])
return groups_list
lx.bless(CommandClass, CMD_NAME)
```
#### File: mecco_Zen_10/lxserv/zen_labeledPopover.py
```python
import lx, lxifc, lxu.command
class CommandClass(lxu.command.BasicCommand):
def __init__(self):
lxu.command.BasicCommand.__init__(self)
self.dyna_Add('cmd', lx.symbol.sTYPE_STRING)
self.dyna_Add('label', lx.symbol.sTYPE_STRING)
self.dyna_Add('recommended', lx.symbol.sTYPE_STRING)
def basic_ButtonName(self):
return "%s \x03(c:25132927)(Recommended: %s)" % (self.dyna_String(1), self.dyna_String(2))
def cmd_Execute(self,flags):
lx.eval("%s" % self.dyna_String(0))
lx.bless(CommandClass, "zen.labeledPopover")
class CommandClass(lxu.command.BasicCommand):
def __init__(self):
lxu.command.BasicCommand.__init__(self)
self.dyna_Add('cmd', lx.symbol.sTYPE_STRING)
self.dyna_Add('label', lx.symbol.sTYPE_STRING)
def basic_ButtonName(self):
return 'Map "%s" to key...' % self.dyna_String(1)
def cmd_Execute(self,flags):
lx.eval('cmds.mapKey {} "%s" .global (stateless) .anywhere' % self.dyna_String(0))
lx.bless(CommandClass, "zen.labeledMapKey")
```
#### File: mecco_Zen_10/lxserv/zen_mapping_FCL.py
```python
import lx, lxifc, lxu.command
FORMS = [
{
"label":"Zen Toolbox",
"recommended": "V",
"cmd":"attr.formPopover {zen_toolbox_full}"
}, {
"label":"Zen Palettes List",
"recommended": "G",
"cmd":"attr.formPopover {zen_palettesPopover:sheet}"
}, {
"label":"Recent Tools",
"recommended": "ctrl-R",
"cmd":"attr.formPopover {55281439258:sheet}"
}, {
"label":"Workplane Pie",
"recommended": "alt-W",
"cmd":"attr.formPopover {ZenPie_Workplane:sheet}"
}, {
"label":"Snapping Pie",
"recommended": "alt-X",
"cmd":"attr.formPopover {ZenPie_Snapping:sheet}"
}, {
"label":"Falloff Pie",
"recommended": "alt-F",
"cmd":"attr.formPopover {ZenPie_Falloff:sheet}"
}, {
"label":"ActionCtr Pie",
"recommended": "alt-A",
"cmd":"attr.formPopover {ZenPie_ActionCtr:sheet}"
}, {
"label":"Layout Frames Pie",
"recommended": "ctrl-shift-Space",
"cmd":"zen.framesPie"
}
]
def list_commands():
fcl = []
for n, form in enumerate(sorted(FORMS, key=lambda k: k['label']) ):
fcl.append("zen.labeledPopover {%s} {%s} {%s}" % (form["cmd"], form["label"], form["recommended"]))
fcl.append("zen.labeledMapKey {%s} {%s}" % (form["cmd"], form["label"]))
if n < len(FORMS)-1:
fcl.append('- ')
return fcl
class CommandListClass(lxifc.UIValueHints):
def __init__(self, items):
self._items = items
def uiv_Flags(self):
return lx.symbol.fVALHINT_FORM_COMMAND_LIST
def uiv_FormCommandListCount(self):
return len(self._items)
def uiv_FormCommandListByIndex(self,index):
return self._items[index]
class CommandClass(lxu.command.BasicCommand):
def __init__(self):
lxu.command.BasicCommand.__init__(self)
self.dyna_Add('query', lx.symbol.sTYPE_INTEGER)
self.basic_SetFlags(0, lx.symbol.fCMDARG_QUERY)
def arg_UIValueHints(self, index):
if index == 0:
return CommandListClass(list_commands())
def cmd_Execute(self,flags):
pass
def cmd_Query(self,index,vaQuery):
pass
lx.bless(CommandClass, "zen.mapping_FCL")
```
#### File: mecco_Zen_10/lxserv/zen_miniToolboxVisibility.py
```python
import lx, lxifc, lxu
import zen
CMD_NAME = 'zen.miniToolboxVisibility'
def viewport_is_visible(tag, direction, restore_tag):
return lx.eval('viewport.hide ? tag %s %s %s' % (tag, direction, restore_tag))
def safely_hide_viewport(tag, direction, restore_tag):
if viewport_is_visible(tag, direction, restore_tag):
lx.eval('viewport.hide false tag %s %s %s' % (tag, direction, restore_tag))
def safely_show_viewport(tag, direction, restore_tag):
if not viewport_is_visible(tag, direction, restore_tag):
lx.eval('viewport.hide true tag %s %s %s' % (tag, direction, restore_tag))
class CommandClass(zen.CommanderClass):
def commander_arguments(self):
return [
{
'name': 'visible',
'datatype': 'boolean',
'flags': ['query']
}
]
def commander_execute(self, msg, flags):
enable = self.commander_arg_value(0)
side = 'right' if lx.eval("user.value right_handed_toolboxes ?") else 'left'
if enable:
safely_show_viewport('zen6_toolboxes_%s_tag' % side, side, 'zen6_toolboxes_%s_restore' % side)
else:
safely_hide_viewport('zen6_toolboxes_%s_tag' % side, side, 'zen6_toolboxes_%s_restore' % side)
notifier = zen.Notifier()
notifier.Notify(lx.symbol.fCMDNOTIFY_CHANGE_ALL)
def commander_query(self, index):
if index == 0:
side = 'right' if lx.eval("user.value right_handed_toolboxes ?") else 'left'
return viewport_is_visible('zen6_toolboxes_%s_tag' % side, side, 'zen6_toolboxes_%s_restore' % side)
def commander_notifiers(self):
return [("zen.notifier", "")]
lx.bless(CommandClass, CMD_NAME)
```
#### File: mecco_Zen_10/lxserv/zen_startup.py
```python
import lx, modo, os, re
from zen import CommanderClass
CMD_NAME = 'zen.startup'
class myGreatCommand(CommanderClass):
def commander_execute(self, msg, flags):
if not lx.eval('query scriptsysservice userValue.isDefined ? zen_version'):
lx.eval('user.defNew zen_version string')
lx.eval('user.value zen_version {}')
zen_version_from_config = lx.eval("user.value zen_version ?")
kit_folder = lx.eval("query platformservice alias ? {kit_mecco_zen:}")
index_file = os.path.join(kit_folder, "index.cfg")
# xml.etree is not included in MODO install, so we need a hack
# index_xml = xml.etree.ElementTree.parse(index_file).getroot()
# zen_version_installed = index_xml.attrib["version"]
# Regex is hardly ideal for this. But it works in the absence of an XML parser.
with open(index_file, 'r') as index_file_data:
xml_as_string = index_file_data.read().replace('\n', '')
r = r'<[ ]*configuration[^>]*version[ =]*[\"\']([^\"\']*)[\"\']'
m = re.search(r, xml_as_string)
zen_version_installed = m.group(1)
if not zen_version_from_config:
lx.eval('zen.mapDefaultHotkeys')
elif zen_version_from_config != zen_version_installed:
modo.dialogs.alert(
"New Zen Version",
"IMPORTANT: New version of Zen detected. Reset MODO prefs using:\nSystem > Reset Preferences"
)
lx.eval("user.value zen_version %s" % zen_version_installed)
lx.bless(myGreatCommand, CMD_NAME)
```
#### File: mecco_Zen_10/lxserv/zen_switchViewAndToolbox.py
```python
import lx, lxifc, lxu
import zen
CMD_NAME = 'zen.switchViewAndToolbox'
class CommandClass(zen.CommanderClass):
def commander_arguments(self):
return [
{
'name': 'viewportTag',
'datatype': 'string'
}, {
'name': 'toolboxName',
'datatype': 'string'
}, {
'name': 'isLatest',
'datatype': 'boolean',
'flags': ['query', 'optional']
}
]
def commander_execute(self, msg, flags):
try:
lx.eval('viewport.tabWithTag %s' % self.commander_arg_value(0))
lx.eval('zen.toolboxSelector %s' % self.commander_arg_value(1))
lx.eval("user.value zen_latest_viewport %s" % self.commander_arg_value(0))
except:
pass
def commander_query(self, index):
if index == 2:
return self.commander_arg_value(0) == lx.eval("user.value zen_latest_viewport ?")
lx.bless(CommandClass, CMD_NAME)
``` |
{
"source": "9budddy/PythonCS100",
"score": 4
} |
#### File: org.budddy/ExerciseOne/BinaryString.py
```python
def is_binstr(s):
for char in s:
if char != "1" and char != "0":
print("This is not a binary string: " + s)
return
print("This is a binary string: " + s)
is_binstr("101010111")
is_binstr("15101410")
is_binstr("99428192")
is_binstr("000000000")
is_binstr("11111111")
```
#### File: org.budddy/ExerciseOne/BinaryToDecimal.py
```python
def binstr_expan(s):
for char in s:
if char != "1" and char != "0":
print("This is not a binary string: " + s)
return
length = len(s) - 1
currentLength = length
totalConvert = 0
while currentLength >= 0:
exponent = length - currentLength
binConvert = int(s[currentLength]) * 2 ** exponent
totalConvert += binConvert
currentLength -= 1
print(totalConvert)
binstr_expan("10001")
binstr_expan("10001111")
binstr_expan("11")
binstr_expan("95210")
``` |
{
"source": "9cat/dydx-v3-python",
"score": 3
} |
#### File: dydx-v3-python/tests/test_constants.py
```python
from dydx3.constants import SYNTHETIC_ASSET_MAP, SYNTHETIC_ASSET_ID_MAP, ASSET_RESOLUTION, COLLATERAL_ASSET
class TestConstants():
def test_constants_have_regular_structure(self):
for market, asset in SYNTHETIC_ASSET_MAP.items():
market_parts = market.split('-')
base_token, quote_token = market_parts
assert base_token == asset
assert quote_token == 'USD'
assert len(market_parts) == 2
assert list(SYNTHETIC_ASSET_MAP.values()) == list(SYNTHETIC_ASSET_ID_MAP.keys())
assets = [x for x in ASSET_RESOLUTION.keys() if x != COLLATERAL_ASSET]
assert assets == list(SYNTHETIC_ASSET_MAP.values())
``` |
{
"source": "9DemonFox/YEDDE_for_python3.7",
"score": 2
} |
#### File: 9DemonFox/YEDDE_for_python3.7/YEDDA_Admin.py
```python
from tkinter import *
from tkinter.ttk import *#Frame, Button, Label, Style, Scrollbar
import tkinter.filedialog as tkFileDialog
import tkinter.font as tkFont
import re
from collections import deque
import pickle
import os.path
import platform
from utils.recommend import *
from utils.metric4ann import *
from utils.compareAnn import *
import tkinter.messagebox as tkMessageBox
class Example(Frame):
def __init__(self, parent):
Frame.__init__(self, parent)
self.Version = "YEDDA-V1.0 Administrator"
self.OS = platform.system().lower()
self.parent = parent
self.fileName = ""
# default GUI display parameter
self.textColumn = 3
self.initUI()
def initUI(self):
self.parent.title(self.Version)
self.pack(fill=BOTH, expand=True)
for idx in range(0,self.textColumn):
if idx == 1:
self.columnconfigure(idx, weight =10)
else:
self.columnconfigure(idx, weight =1)
# for idx in range(0,2):
# self.rowconfigure(idx, weight =1)
the_font=('TkDefaultFont', 18, )
style0 = Style()
style0.configure(".", font=the_font, )
width_size = 30
abtn = Button(self, text="Multi-Annotator Analysis", command=self.multiFiles, width = width_size)
abtn.grid(row=0, column=1)
recButton = Button(self, text="Pairwise Comparison", command=self.compareTwoFiles, width = width_size)
recButton.grid(row=1, column=1)
cbtn = Button(self, text="Quit", command=self.quit, width = width_size)
cbtn.grid(row=2, column=1)
def ChildWindow(self, input_list, result_matrix):
file_list = []
for dir_name in input_list:
if ".ann" in dir_name:
dir_name = dir_name[:-4]
if "/" in dir_name:
file_list.append(dir_name.split('/')[-1])
else:
file_list.append(dir_name)
#Create menu
self.popup = Menu(self.parent, tearoff=0)
self.popup.add_command(label="Next", command=self.selection)
self.popup.add_separator()
def do_popup(event):
# display the popup menu
try:
self.popup.selection = self.tree.set(self.tree.identify_row(event.y))
self.popup.post(event.x_root, event.y_root)
finally:
# make sure to release the grab (Tk 8.0a1 only)
self.popup.grab_release()
#Create Treeview
win2 = Toplevel(self.parent)
new_element_header=file_list
treeScroll = Scrollbar(win2)
treeScroll.pack(side=RIGHT, fill=Y)
title_string = "F:Entity/Chunk"
self.tree = Treeview(win2, columns=[title_string]+file_list, show="headings")
self.tree.heading(title_string, text=title_string, anchor=CENTER)
self.tree.column(title_string, stretch=YES, minwidth=50, width=100, anchor=CENTER)
for each_file in file_list:
self.tree.heading(each_file, text=each_file, anchor=CENTER)
self.tree.column(each_file, stretch=YES, minwidth=50, width=100, anchor=CENTER)
for idx in range(len(file_list)):
self.tree.insert("" , 'end', text=file_list[idx], values=[file_list[idx]]+result_matrix[idx], tags = ('chart',))
the_font=('TkDefaultFont', 18, )
self.tree.tag_configure('chart', font=the_font)
style = Style()
style.configure(".", font=the_font, )
style.configure("Treeview", )
style.configure("Treeview.Heading",font=the_font, ) #<----
self.tree.pack(side=TOP, fill=BOTH)
# self.tree.grid()
self.tree.bind("<Button-3>", do_popup)
win2.minsize(30,30)
def selection(self):
print(self.popup.selection)
def multiFiles(self):
ftypes = [('ann files', '.ann')]
filez = tkFileDialog.askopenfilenames(parent=self.parent, filetypes = ftypes, title='Choose a file')
if len(filez) < 2:
tkMessageBox.showinfo("Monitor Error", "Selected less than two files!\n\nPlease select at least two files!")
else:
result_matrix = generate_report_from_list(filez)
self.ChildWindow(filez, result_matrix)
def compareTwoFiles(self):
ftypes = [('ann files', '.ann')]
filez = tkFileDialog.askopenfilenames(parent=self.parent, filetypes = ftypes, title='Choose a file')
if len(filez) != 2:
tkMessageBox.showinfo("Compare Error", "Please select exactly two files!")
else:
f = tkFileDialog.asksaveasfile(mode='w', defaultextension=".tex")
write_result = compareBoundary(filez[0],filez[1],f)
if write_result:
tkMessageBox.showinfo("Latex Generate", "Latex file generated successfully!\n\nSaved to "+ f.name)
# import os
# os.system("pdflatex "+ f.name)
else:
tkMessageBox.showinfo("Latex Error", "Latex generated Error, two files don't have same sentence number!")
f.close()
def main():
print("SUTDAnnotator launched!")
print(("OS:%s")%(platform.system()))
root = Tk()
root.geometry("400x100")
app = Example(root)
root.mainloop()
if __name__ == '__main__':
main()
``` |
{
"source": "9dev/django-fb-rank",
"score": 2
} |
#### File: fb_rank/templatetags/fb_rank.py
```python
from django import template
from .. import utils
register = template.Library()
@register.simple_tag
def get_rank(obj):
return utils.get_rank(obj)
``` |
{
"source": "9dev/django-flags",
"score": 2
} |
#### File: django-flags/flags/admin.py
```python
from django.contrib import admin
from .models import Approve, Flag
class FlagAdmin(admin.ModelAdmin):
list_display = ('content_object', 'creator', 'creation_date')
actions = ['delete_selected_flagged_objects', 'approve']
def approve(self, request, queryset):
for flag in queryset:
Approve.objects.create(content_object=flag.content_object, creator=request.user)
self.message_user(request, "Successfully approved selected objects.")
def delete_selected_flagged_objects(self, request, queryset):
for flag in queryset:
flag.content_object.delete()
flag.delete()
self.message_user(request, "Successfully deleted selected flagged objects.")
class ApproveAdmin(admin.ModelAdmin):
list_display = ('content_object', 'creator', 'creation_date')
admin.site.register(Flag, FlagAdmin)
admin.site.register(Approve, ApproveAdmin)
``` |
{
"source": "9dev/django-ip-system",
"score": 2
} |
#### File: django-ip-system/fts/_base.py
```python
from selenium import webdriver
from django.contrib.staticfiles.testing import StaticLiveServerTestCase
from django.core.urlresolvers import reverse
CHROMEDRIVER_PATH = '/usr/bin/chromedriver'
class BaseTestCase(StaticLiveServerTestCase):
fixtures = ['base.json']
def setUp(self):
self.browser = webdriver.Chrome(CHROMEDRIVER_PATH)
def tearDown(self):
self.browser.close()
def get(self, url=None, name=None, *args, **kwargs):
if name:
url = reverse(name, *args, **kwargs)
self.browser.get('{}{}'.format(self.live_server_url, url))
def get_by_id(self, selector):
return self.browser.find_element_by_id(selector)
def set_field(self, field_id, value):
field = self.browser.find_element_by_id(field_id)
field.clear()
field.send_keys(value)
def submit(self):
form = self.browser.find_element_by_tag_name('form')
form.submit()
def get_full_url(self, url):
return '{}{}'.format(self.live_server_url, url)
```
#### File: django-ip-system/ip_system/models.py
```python
from django.db import models
from .utils import get_ip_from_request
class Ip(models.Model):
address = models.GenericIPAddressField(unique=True, db_index=True)
@classmethod
def get_or_create(cls, request):
raw_ip = get_ip_from_request(request)
if not raw_ip:
return None
obj, _ = cls.objects.get_or_create(address=raw_ip)
return obj
def __str__(self):
return self.address.__str__()
``` |
{
"source": "9dev/django-metrics",
"score": 2
} |
#### File: demo/main/views.py
```python
from django.http import HttpResponse
def dummy(request):
return HttpResponse('Go to /metrics')
```
#### File: django-metrics/metrics/metrics.py
```python
class BaseMetric(object):
name = None
def get_name(self):
return self.name
class ValueMetric(BaseMetric):
value = None
def get_value(self):
return self.value
class LineChartMetric(BaseMetric):
x = []
y = []
xlabel = 'X Label'
ylabel = 'Y Label'
def get_values(self):
return zip(self.x, self.y)
def get_points(self):
return ['[{},{}]'.format(x, y) for x, y in self.get_values()]
```
#### File: django-metrics/metrics/views.py
```python
import importlib
from inspect import isclass
from django.conf import settings
from django.contrib.admin.views.decorators import staff_member_required
from django.shortcuts import render
from .metrics import LineChartMetric, ValueMetric
metrics = importlib.import_module(settings.METRICS_MODULE)
value_metrics = []
linechart_metrics = []
# collect metrics
for name, attr in metrics.__dict__.items():
if isclass(attr):
if issubclass(attr, ValueMetric) and attr != ValueMetric:
value_metrics.append(attr())
elif issubclass(attr, LineChartMetric) and attr != LineChartMetric:
linechart_metrics.append(attr())
@staff_member_required
def metrics(request):
context = {
'value_metrics': value_metrics,
'linechart_metrics': linechart_metrics,
}
return render(request, 'metrics/metrics.html', context)
``` |
{
"source": "9dev/django-repsystem",
"score": 2
} |
#### File: django-repsystem/repsystem/models.py
```python
from django.conf import settings
from django.db import models
from django.db.models.signals import post_save, pre_save
from django.dispatch import receiver
class Level(models.Model):
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=100, unique=True)
required_rep = models.IntegerField(unique=True)
def __str__(self):
return '{} ({})'.format(self.id, self.name)
class Reputation(models.Model):
score = models.PositiveIntegerField(default=0)
level = models.ForeignKey(Level, default=1)
user = models.OneToOneField(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, primary_key=True)
def __str__(self):
return str(self.score)
class Action(models.Model):
name = models.CharField(max_length=100, unique=True)
message = models.CharField(max_length=1000)
value = models.IntegerField()
def __str__(self):
return self.name
class History(models.Model):
action = models.ForeignKey(Action)
user = models.ForeignKey(settings.AUTH_USER_MODEL)
creation_date = models.DateTimeField(auto_now_add=True)
@receiver(post_save, sender=Reputation)
def post_save_reputation(sender, instance, created, **kwargs):
if not created:
reputation = Reputation.objects.get(pk=instance.pk)
levels = Level.objects.filter(required_rep__lte=reputation.score)
if levels.count() > 0:
new_level = levels.order_by('-required_rep')[0]
if instance.level != new_level:
Reputation.objects.filter(pk=instance.pk).update(level=new_level)
@receiver(post_save, sender=settings.AUTH_USER_MODEL)
def post_save_user(sender, instance, created, **kwargs):
if created:
Reputation.objects.create(user=instance)
``` |
{
"source": "9eck0/PyTUI",
"score": 3
} |
#### File: lib/EventSystem/KeyPress.py
```python
if __name__ == "__main__":
# MAJOR +1 represents an added function.
__MAJOR = 1
# MINOR +1 represents a change in existing function(s) within the current MAJOR.
__MINOR = 0
__info = """This file contains the module 'KeyEvent', used to integrate key press events.
To use this module in another project, include this file inside the project's directory."""
print("========================================================")
print("KeyPress.py version ", __MAJOR, ".", __MINOR, sep='', end='\n\n')
print(__info)
print("========================================================\n")
input("Press enter to continue...")
#========================Imports========================
# Used by:
import os
from lib.Utils import *
from lib.EventSystem import *
#========================Common Functions========================
# no function
#========================KeyPressEvent classes: KeyPressEventArgs, _KeyPressEventListener, KeyPressEventHandler========================
class KeyPressEventArgs(EventArgs):
def __init__(self, key, key2=b'\x00', **kwargs):
EventArgs.__init__(self, **kwargs)
self.Key = key
self.Key2 = key2
if key2 == b'\x00':
self.isSpecialKey = False
else:
self.isSpecialKey = True
class KeyPressEventListener(EventListener):
def __init__(self, *subscribers):
EventListener.__init__(self, *subscribers)
def notify(self, sender, key, key2=b'\x00'):
"""
Notifies all subscribers about a key press.
Args:
sender:
key:
key2:
"""
EventListener.notify(self, sender, KeyPressEventArgs(key, key2))
class KeyPressEventHandler(EventHandler):
def __init__(self, *subscribers):
if os.name == 'nt':
self._getch = KeyPressEventHandler.__WindowsKeyPress()
else: # fallback method. Most likely os.name == 'posix'
self._getch = KeyPressEventHandler.__UnixKeyPress()
EventHandler.__init__(self, KeyPressEventListener(*subscribers))
def readkey(self, decode=False):
"""
Updates methods and functions subscribed to this event handler.
Any subscriber must implement the exact parameters: subscriber(sender, args: KeyPressEventArgs)
where parameter 'args' contains the string character mapped from the pressed key.
Args:
decode: Whether to decode the key code into the corresponding character.
"""
if os.name == 'nt':
# _getch() in Windows returns a set of two user inputs in latin1 encoding
keycodes = []
# We need to call _getch() 3 times per user key input in order to catch combination keys (e.g. Delete key).
for i in range(2):
keycodes.append(self._getch())
if keycodes[0] != KeyCodes.Null and keycodes[0] != KeyCodes.FunctionPrefix:
# If the first key code is not a prefix to a combination key, it is a normal ASCII character.
# In this instance, default the second jey code to null and do not detect key input anymore.
keycodes.insert(1, KeyCodes.Null)
break
i+=1
# Option to decode the key. Bad idea if wanting to detect function keys such as 'Esc'.
if decode:
# Updates the _KeyPressEventListener
self.Listener.notify(self, KeyCodes.tostring(keycodes[0], keycodes[1]))
elif keycodes[1] == b'\x00':
# A key which can be represented as a single Unicode character
self.Listener.notify(self, keycodes[0])
else:
# A special function key that is represented as a combination of two Unicode characters
self.Listener.notify(self, keycodes[0], keycodes[1])
else:
keycode = self._getch()
# Option to decode the key. Bad idea if wanting to detect function keys such as 'Esc'.
if decode:
keycode = keycode.decode('latin1')
# Updates the _KeyPressEventListener
self.Listener.notify(self, keycode)
class __UnixKeyPress:
"""
Credits:
http://code.activestate.com/recipes/134892/
"""
def __init__(self):
try:
import tty, sys
except ImportError as e:
WriteShell("An error occurred while importing module '", e.name,
"' for KeyPressEventHandler initialization. Does this system lack the required module?",
sep='', stderr=True, Color='error', flush=True)
def __call__(self):
try:
import sys, tty, termios
stdin_file = sys.stdin.fileno()
tty_attr = termios.tcgetattr(stdin_file)
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(1)
except ImportError as impE:
WriteShell("An error occurred while importing module '", impE.name,
"' when calling KeyPressEventHandler. Does this system lack the required modules?",
sep='', stderr=True, Color='error', flush=True)
finally:
termios.tesetattr(stdin_file, termios.TCSADRAIN, tty_attr)
return ch
class __WindowsKeyPress:
"""
Credits:
http://code.activestate.com/recipes/134892/
"""
def __init__(self):
try:
import msvcrt
except ImportError as e:
WriteShell("An error occurred while importing module '", e.name,
"' for KeyPressEventHandler initialization. Does this system lack the required module?",
sep='', stderr=True, Color='error', flush=True)
def __call__(self):
try:
import msvcrt
return msvcrt.getch()
except ImportError as impE:
WriteShell("An error occurred while importing module '", impE.name,
"' when calling KeyPressEventHandler. Does this system lack the required module?",
sep='', stderr=True, Color='error', flush=True)
class KeyCodes:
"""
This class contains bytecodes of common unicode characters.
"""
# For special function keys, Python's msvcrt module will return this prefix, followed by the function key's bytecode
FunctionPrefix = b'\xe0'
Null = b'\x00'
Backspace = b'\x08'
BackspaceChar = b'\x7f' # For legacy purposes; deprecated
Escape = b'\x1b'
Enter = b'\n' # Ctrl + Enter/Return or Ctrl + J
Return = b'\r' # Enter/Return or Ctrl + M
Tab = b'\t' # Tab or Ctrl + I
CtrlZ = b'\x1a' # Undo
CtrlX = b'\x18' # Cut
CtrlC = b'\x03' # Copy
CtrlV = b'\x16' # Paste
CtrlB = b'\x02' # Embolden
CtrlN = b'\x0e' # New Item
CtrlM = Return
CtrlA = b'\x01' # Select All
CtrlS = b'\x13' # Save Item
CtrlD = b'\x04'
CtrlF = b'\x06' # Find
CtrlG = b'\x07'
CtrlH = b'\x08'
CtrlJ = Enter
CtrlK = b'\x0b'
CtrlL = b'\x0c'
CtrlQ = b'\x11' # Quit
CtrlW = b'\x17'
CtrlE = b'\x05' # Center
CtrlR = b'\x12'
CtrlT = b'\x14'
CtrlY = b'\x19' # Redo
CtrlU = b'\x15' # Underline
CtrlI = Tab
CtrlO = b'\x0f' # Open Item
CtrlP = b'\x10' # Print
Zero = b'0'
One = b'1'
Two = b'2'
Three = b'3'
Four = b'4'
Five = b'5'
Six = b'6'
Seven = b'7'
Eight = b'8'
Nine = b'9'
CommercialAt = b'@'
NumberSign = b'#'
DollarSign = b'$'
PercentSign = b'%'
Caret = b'^'
Ampersand = b'&'
Grave = b'`'
Tilde = b'~'
Space = b' '
ExclamationMark = b'!'
QuestionMark = b'?'
QuotationMark = b'"'
Apostrophe = b"'"
Comma = b','
Period = b'.'
Colon = b':'
Semicolon = b';'
LeftParenthesis = b'('
RightParenthesis = b')'
LeftBracket = b'['
RightBracket = b']'
LeftCurlyBracket = b'{'
RightCurlyBracket = b'}'
LeftAngleBracket = b'<'
RightAngleBracket = b'>'
Add = b'+'
Subtract = b'-'
Asterisk = b'*'
Slash = b'/'
Backslash = b'\\'
Equal = b'='
Underscore = b'_'
A = b'A'
B = b'B'
C = b'C'
D = b'D'
E = b'E'
F = b'F'
G = b'G'
H = b'H'
I = b'I'
J = b'J'
K = b'K'
L = b'L'
M = b'M'
N = b'N'
O = b'O'
P = b'P'
Q = b'Q'
R = b'R'
S = b'S'
T = b'T'
U = b'U'
V = b'V'
W = b'W'
X = b'X'
Y = b'Y'
Z = b'Z'
a = b'a'
b = b'b'
c = b'c'
d = b'd'
e = b'e'
f = b'f'
g = b'g'
h = b'h'
i = b'i'
j = b'j'
k = b'k'
l = b'l'
m = b'm'
n = b'n'
o = b'o'
p = b'p'
q = b'q'
r = b'r'
s = b's'
t = b't'
u = b'u'
v = b'v'
w = b'w'
x = b'x'
y = b'y'
z = b'z'
CombinationCharacters = {(FunctionPrefix, H) : 'ArrowUp',
(FunctionPrefix, P) : 'ArrowDown',
(FunctionPrefix, K) : 'ArrowLeft',
(FunctionPrefix, M) : 'ArrowRight',
(Null, Semicolon) : 'F1',
(Null, LeftAngleBracket) : 'F2',
(Null, Equal) : 'F3',
(Null, RightAngleBracket) : 'F4',
(Null, QuestionMark) : 'F5',
(Null, CommercialAt) : 'F6',
(Null, A) : 'F7',
(Null, B) : 'F8',
(Null, C) : 'F9',
(Null, D) : 'F10',
(FunctionPrefix, b'\x85') : 'F11',
(FunctionPrefix, b'\x86') : 'F12',
(FunctionPrefix, R) : 'Insert',
(FunctionPrefix, S) : 'Del',
(FunctionPrefix, I) : 'PageUp',
(FunctionPrefix, Q) : 'PageDown',
(FunctionPrefix, G) : 'Home',
(FunctionPrefix, O) : 'End',
(Null, CtrlC) : 'Ctrl+2'}
@staticmethod
def tostring(key1: bytes, key2: bytes=b'\x00'):
"""
Returns the string representation of
Args:
key1: The first bytecode returned from a keypress
key2: The second bytecode returned from a keypress
Returns:
"""
# Those are normal characters, simply decode to their respective string literals
if key2 == b'\x00':
return key1.decode('latin1')
else:
return KeyCodes.CombinationCharacters[(key1, key2)]
#========================Version History========================
# 1.0
"""
Initial Release
Refactored from EventSystem.
See version history from EventSystem.py
Additions
---------
-class KeyPressEventArgs implements EventArgs
-__init__(self, key, **kwargs)
-class _KeyPressEventListener implements EventListener
-__init__(self, *subscribers)
-notify(self, sender, key)
-class KeyPressEventHandler implements EventHandler
-__init__(self, *subscribers)
-readkey(self)
-class UnixKeyPress
-__init__(self)
-__call__(self)
-class WindowsKeyPress
-__init__(self)
-__call__(self)
-class KeyCodes
-Defined constants (under this class) for all latin1 unicode characters
-CombinationCharacters dictionary for special keyboard functions that cannot be represented as a single
Unicode character
-static tostring(key1: bytes, key2: bytes)
"""
```
#### File: PyTUI/lib/ShellGUI_Core.py
```python
if __name__ == "__main__":
# MAJOR +1 represents an added function.
__MAJOR = 1
# MINOR +1 represents a change in existing function(s) within the current MAJOR.
__MINOR = 0 + 1
__info = """This file contains the module 'ShellGUI_Core', used to create a shell-based rendering engine.
To use this module in another project, include this file inside the project's directory."""
print("========================================================")
print("ShellGUI_Core.py version ", __MAJOR, ".", __MINOR, sep='', end='\n\n')
print(__info)
print("========================================================\n")
input("Press enter to continue...")
#========================Imports========================
# Used by: Canvas.draw()
import time
from lib.Utils import *
from lib.Color import Color
from lib.EventSystem.FocusLost import FocusLostEventListener
from lib.EventSystem.KeyPress import *
from lib.EventSystem.OnFocus import OnFocusEventListener
#========================Borders========================
class BorderTypes:
"""
An 'enum' containing border types for drawing borders around components.
"""
NoBorder = ("", "", "", "", "", "", "", "")
BlankBorder = (" ", " ", " ", " ", " ", " ", " ", " ")
ThinBorder = ("┌", "─", "┐", "│", "│", "└", "─", "┘")
ThinHorizontalBorder = ("", "─", "", "", "", "", "─", "")
ThinVerticalBorder = ("", "", "", "│", "│", "", "", "")
ThinUnderline = ("", "", "", "", "", "", "─", "")
ThinOverline = ("", "─", "", "", "", "", "", "")
BlockBorder = ("█", "▀", "█", "█", "█", "▀", "▀", "▀")
BlockHorizontalBorder = ("", "▀", "", "", "", "", "▀", "")
BlockVerticalBorder = ("", "", "", "█", "█", "", "", "")
BlockOverline = ("", "▀", "", "", "", "", "", "")
BlockUnderline = ("", "", "", "", "", "", "▀", "")
def addborder(value: str, bordertype=BorderTypes.ThinBorder):
"""
Adds a four-sided ASCII border around a component.
Args:
value: The string representation of the component's layout. Use <component>.value() to get.
Returns:
The component's value with a border, in string
"""
# If the text is empty
if value == "":
value = " "
buffer: list = str(value).splitlines(False)
width: int = len(LongestFromList(buffer))
# Insert side borders
for lineindex in range(0, len(buffer)):
linelength = len(buffer[lineindex])
if linelength < width:
# The line is not long enough. Add spaces after the line
buffer[lineindex] = buffer[lineindex] + " "*(width-linelength)
buffer[lineindex] = bordertype[3] + buffer[lineindex] + bordertype[4]
# Insert top-and-bottom borders with corner pieces
buffer = [bordertype[0] + bordertype[1] * width + bordertype[2]] + \
buffer + \
[bordertype[5] + bordertype[6] * width + bordertype[7]]
# Delete empty horizontal borders
if buffer[0] == "":
del buffer[0]
if buffer[-1] == "":
del buffer[-1]
return "\n".join(buffer)
#========================Color list========================
class SystemColors:
Default = Color(255, 255, 255)
WindowBorder = Color(96, 96, 96)
Hyperlink = Color(6, 69, 173)
VisitedHyperlink = Color(11, 0, 128)
#========================Canvas class========================
class Canvas:
def __init__(self, width: int, height: int, bordertype=BorderTypes.BlockBorder):
# Dictionary to store all component
self.__elem = {}
# OPTIONS
self.Width = int(width)
self.Height = int(height)
self.Border = bordertype
def add(self, component, z_pos: int=-1):
# We must first check if 'component' passes a subclass of class 'Component'
if not issubclass(type(component), Component):
raise TypeError("Argument 'component' is not an implementation of class 'Component'.")
if z_pos == -1:
self.__elem[len(self.__elem)] = component
else:
self.__elem[z_pos] = component
def setzpos(self, z_pos, new_z_pos):
if z_pos in self.__elem.keys():
self.__elem[new_z_pos] = self.__elem[z_pos]
del self.__elem[z_pos]
def remove(self, z_pos: int):
if z_pos in self.__elem.keys():
del self.__elem[z_pos]
def draw(self, delay: float=0, hideoverflown: bool=False, color: str='default'):
"""
Draws/renders the current canvas with its components.
Args:
delay: A float indicating the delay, in seconds, before rendering starts.
hideoverflown: Boolean indicating whether to omit rendering any component that is partially out of bounds.
color: The color of the canvas. WILL OVERRIDE ANY CUSTOM COMPONENT COLOR (if not set to 'default')!
"""
# draw elements inside __elem dictionary.
# __elem dictionary has the following structure: ([z-pos], ([object to draw], [focus order]))
# Delay parameter
if delay > 0:
time.sleep(delay)
# This is the current ASCII frame to render.
frame = (" "*self.Width + "\n")*(self.Height-1) + (" "*self.Width)
# This buffer serves to write to individual lines
framebuffer = frame.split(sep="\n")
# Below for block writes individual component to 'framebuffer'
# We need to write components from the smallest z-pos to the largest, hence the usage of range()
for index in self.__elem.keys():
comp: Component = self.__elem[index]
# To write a component onto the frame, we need to separate each line and store the result in a list.
compbuffer: list = comp.value().split(sep="\n")
# Testing if the component overflows for optional parameter 'hideoverflown'
if hideoverflown:
if len(LongestFromList(compbuffer)) > (self.Width - comp.X):
# Object overflows in the x-axis, discard.
# Note: using Utils.py's LongestFromList() will rule out any inconsistent 'Component' class's value() implementation.
continue
if len(compbuffer) > (self.Height - comp.Y):
# Object overflows in the y-axis, discard.
continue
# Testing if the current component is outside of canvas area:
if comp.X >= self.Width and comp.Y >= self.Height:
# Object entirely outside of canvas, discard.
continue
# Drawing 'comp' inside 'framebuffer'
for lineindex in range(0, len(compbuffer)):
if comp.Y + lineindex >= len(framebuffer) or comp.Y + lineindex < 0:
# This line is out of bounds. Do not draw.
continue
# Convert target 'framebuffer' line into a list of characters with list(str)
linebuffer: list = list(framebuffer[comp.Y + lineindex])
# Convert target 'compbuffer' line into a list of characters
complinebuffer: list = list(compbuffer[lineindex])
# This will replace the line with the current component's content without overflowing.
#linebuffer[comp.X :] = complinebuffer[0 : len(linebuffer)-comp.X] - produces undesirable result
linebuffer[comp.X :] = complinebuffer
# Update the target 'framebuffer' line with the new one
framebuffer[comp.Y + lineindex]: str = "".join(linebuffer)
for i in range(0, len(framebuffer)):
# Adds space for lines that do not match canvas's width
if len(framebuffer[i]) < self.Width:
framebuffer[i] += " " * (self.Width - len(framebuffer[i]))
# Cuts overflown lines that do not match canvas's width
if len(framebuffer[i]) > self.Width:
framebuffer[i] = framebuffer[i][0:self.Width]
# Reassemble from from framebuffer
frame = "\n".join(framebuffer)
# Adding borders to current frame string
frame = addborder(frame, self.Border)
WriteShell(frame, end="\n", Color=color)
def ColorPrinter(self, , delay: float=0, hideoverflown: bool=False, color: str='default'):
# ========================GUI class========================
class GUI:
"""
Class responsible for the creation and maintenance of a canvas and its components.
"""
#====================Magic methods====================
def __init__(self, width: int = 100, height = 25, bordertype = BorderTypes.NoBorder):
self.width = width
self.height = height
self.bordertype = bordertype
self.Focus = 0 # focuses on first component
# UX interactions
self._inputkey = KeyCodes.Null
self._inputkeystr = KeyCodes.tostring(KeyCodes.Null)
self._userinput = KeyPressEventHandler(self._keypressdetector)
#====================Properties====================
@property
def active(self):
"""
Returns the index of the child component currently receiving focus.
Returns:
the index of the component to focus in integer
"""
return self._active
@active.setter
def active(self, value: bool):
"""
Focuses on a child component by its index.
Args:
value: the index of the component to focus in integer
"""
self._active = bool(value)
if bool(value):
self._run()
@property
def focus(self):
"""
Returns the index of the child component curently receiving focus.
Returns:
the index of the component to focus in integer
"""
return self._focusindex
@focus.setter
def focus(self, value: int):
"""
Focuses on a child component by its index.
Args:
value: the index of the component to focus in integer
"""
if int(value) != self._focusindex:
self._focusindex = int(value)
@property
def height(self):
"""
Returns the width, in characters, of the GUI.
Returns:
an integer representing the height of the GUI
"""
return self._height
@height.setter
def height(self, value: int):
"""
Specifies the height, in characters, of the GUI.
Args:
value: an integer specifying the height of the GUI
"""
self._height = value
@property
def width(self):
"""
Returns the width, in characters, of the GUI.
Returns:
an integer representing the width of the GUI
"""
return self._width
@width.setter
def width(self, value: int):
"""
Specifies the width, in characters, of the GUI.
Args:
value: an integer specifying the width of the GUI
"""
self._width = value
#====================UX====================
def _keypressdetector(self, args: KeyPressEventArgs):
"""
Function delegate used to detect user key press input
Args:
sender:
args:
"""
self._inputkey = args.Key
self._inputkeystr = KeyCodes.tostring(args.Key, args.Key2)
def _run(self):
"""
Begins execution of the GUI thread.
Controlled internally by the GUI class.
"""
#========================Component class========================
class Component:
#====================Magic methods====================
def __init__(self, location, width: int = 0, height: int = 0):
self.width = width
self.height = height
self.location = location
self._focused = False
self.handledinput = False # This variable determines whether the GUI should transmit input to the component
self.FocusLost = FocusLostEventListener()
self.KeyPress = KeyPressEventListener()
self.OnFocus = OnFocusEventListener()
def __eq__(self, other):
return str(self) == str(other)
def __repr__(self):
return self.value()
#====================Methods====================
def value(self):
"""
Each component should be a string which, when printed, yields a rectangular footprint.
Returns:
A string representing the component.
"""
return ""
#====================Properties====================
@property
def focused(self):
"""
Whether the component is receiving focus.
"""
return self._focused
@focused.setter
def focused(self, value: bool):
"""
Notifies this component's OnFocus event listener when it receives focus.
Notifies this component's FocusLost event listener when focus is lost.
"""
if value:
self._focused = True
self.OnFocus.notify(sender=self)
else:
self._focused = False
self.FocusLost.notify(sender=self)
@property
def height(self):
"""
Returns the width, in characters, of the UI.
Returns:
an integer representing the height of the UI
"""
return self._height
@height.setter
def height(self, value: int):
"""
Specifies the height, in characters, of the UI.
Args:
value: an integer specifying the height of the UI
"""
self._height = int(value)
@property
def location(self):
"""
The location of the component inside the UI.
Returns:
a tuple specifying the component's (x, y) location inside the UI
"""
return self._location
@location.setter
def location(self, value):
"""
Sets the location of the component inside the UI.
Args:
value: a tuple specifying the component's (x, y) location inside the UI
"""
self._location = (value[0], value[1])
@property
def width(self):
"""
Returns the width, in characters, of the UI.
Returns:
an integer representing the width of the UI
"""
return self._width
@width.setter
def width(self, value: int):
"""
Specifies the width, in characters, of the UI.
Set to 0 for
Args:
value: an integer specifying the width of the UI
"""
self._width = int(value)
#========================Version History========================
# 1.0
"""
Initial Release
Additions
---------
-BordeeTypes class
-addborder(value: str, bordertype=BorderTypes.ThinBorder, vertical=True, horizontal=True)
-Canvas class
-__init__(self, width: int, height: int, horizontal_borders=True, vertical_borders=False, bordertype=BorderTypes.BlockBorder)
-add(self, component, z_pos: int=-1)
-setzpos(self, z_pos, new_z_pos)
-remove(self, z_pos: int)
-draw(self, delay: float=0, hideoverflown: bool=False, color: str='default')
-Currently the option to color the canvas is limited: can only color the whole canvas+components altogether, instead of simply the canvas's background
-Canvas is the drawing board on which Components are added
-Component class
-__init__(self, x, y)
-value(self)
-Label(Component) class
-__init__(self, x, y, text, showborder=False, bordertype=BorderTypes.ThinBorder, length=-1, overflowindicator="…")
-value(self)
-Can act not only as a label for simple lines or paragraphs, but also as a textbox, a checkbox, or a button
-Illustration(Component) class
-__init__(self, x, y, ascii, border=False)
-A dud/foobar for later implementation of a drawing component
-Will be used to convert external images into colored ASCII drawings
To-Do
-----
-Add a function to superpose two different strings into one, with option to specify coloring for each string component.
-Implement a GUI system to automatically control canvas drawing and offer additional components that are tied with the event system.
-Most likely needs to be inside a separate Python file, as this system would require dependencies of EventSystem.py.
-Will need to rename this file as ShellGUIbase.py, and migrate Label and Illustration classes to the new ShellGUI_Core.py file.
"""
# 1.1
"""
Additions and changes related to borders
Additions
---------
-Added Version History at the end of ShellGUI_Core.py
-Added more border types as tuple constants in BorderTypes class (under the new border format)
-NoBorder, ThinHorizontalBorder, ThinVerticalBorder, ThinUnderline, ThinOverline, BlockHorizontalBorder, BlockVerticalBorder, BlockOverline, BlockUnderline
Changes
-------
-Changed border structure
-Previously the structure is specified as ([horizontal piece], [vertical piece], [upper left corner], [upper right corner], [lower left corner], [lower right corner])
-Changed to ([ul corner], [upper horizontal piece], [ur corner], [left vertical piece], [right vertical piece], [ll corner], [lower horizontal piece], [lr corner])
-This change allows larger diversity in the stylistical types of borders
-Changed addborder() mechanism
-To comply with the new border structure
-Will now also omit blank horizontal border lines (e.g. for ThinUnderline border type)
-Signatures for these functions/methods are changed to comply with the new border structure
-addborder(value: str, bordertype=BorderTypes.ThinBorder, vertical=True, horizontal=True) changed to addborder(value: str, bordertype=BorderTypes.ThinBorder)
-Canvas.__init__(self, width: int, height: int, horizontal_borders=True, vertical_borders=False, bordertype=BorderTypes.BlockBorder)
->Changed to Canvas.__init__(self, width: int, height: int, bordertype=BorderTypes.BlockBorder)
Bug Fixes
---------
-
"""
# 2.0
"""
Refactored ShellGUI.py into two separate modules: ShellGUI_Core.py and ShellGUI_Forms.py
This separation permits better modular forms support without encumbering the core ASCII rendering engine.
Additions
---------
-
Changes
-------
-Removed class
Bug Fixes
---------
-
"""
``` |
{
"source": "9fin/sqlpy",
"score": 2
} |
#### File: 9fin/sqlpy/test_sqlpy.py
```python
from __future__ import print_function
import pytest
import os
import glob
import functools
import psycopg2
from sqlpy import Queries, load_queries, SQLLoadException,\
SQLParseException, SQLArgumentException, SQLpyException, parse_sql_entry, QueryType
import logging
@pytest.fixture()
def enable_logging():
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(module)s %(levelname)s %(message)s')
@pytest.fixture
def queries_file():
return os.path.join(os.getcwd(), 'test_queries.sql')
@pytest.fixture
def queries_file_arr():
return [os.path.join(os.getcwd(), 'test_queries.sql')]
@pytest.fixture
def queries_file_glob():
return glob.glob(os.path.join(os.getcwd(), '*.sql'))
@pytest.fixture
def invalid_file_path():
return 'path_that_does_not_exist.sql'
@pytest.fixture
def invalid_sql_name_start():
return """
-- nam: test_select
-- testing the sqlpi module pls work
SELECT * FROM testdb
""".strip('\n')
@pytest.fixture
def invalid_sql_name_spaces():
return """
-- name: test select
-- testing the sqlpi module pls work
SELECT * FROM testdb
""".strip('\n')
@pytest.fixture
def invalid_sql_built():
return """
-- name: test_built$
-- testing the sqlpi module pls work
SELECT * FROM testdb
WHERE 1=1
AND col_1 = %()s
""".strip('\n')
@pytest.fixture
def invalid_sql_built_args():
return """
-- name: test_built$
-- testing the sqlpi module pls work
SELECT * FROM testdb
WHERE 1=1
AND col_1 = %(arg1)s
AND col_2 = %(arg1)
""".strip('\n')
@pytest.fixture
def sql_bang():
return """
-- name: test_delete!
DELETE FROM testdb
""".strip('\n')
@pytest.fixture
def sql_bang_return():
return """
-- name: test_return<!>
INSERT INTO test (col_1) VALUES ('A') RETURNING col_1
""".strip('\n')
@pytest.fixture
def sql_built():
return """
-- name: test_built$
-- testing the sqlpi module pls work
SELECT * FROM testdb
WHERE 1=1
AND col_1 = %(val_1)s
""".strip('\n')
@pytest.fixture
def sql_proc():
return """
-- name: test_proc_call@
example_fcn_name
""".strip('\n')
@pytest.fixture
def sql_select_1():
return """
-- name: select_1
SELECT 1;
""".strip('\n')
@pytest.fixture(scope="module")
def db_cur():
db_host = 'localhost'
db_port = 5432
db_user = 'postgres'
db_pass = ''
db = psycopg2.connect(dbname='postgres',
user=db_user,
password=<PASSWORD>,
host=db_host,
port=db_port)
yield db.cursor()
db.close()
class TestLoad:
def test_load(self, queries_file):
parsed = load_queries(queries_file)
assert isinstance(parsed, list)
def test_load_arr(self, queries_file_arr):
parsed = load_queries(queries_file_arr)
assert isinstance(parsed, list)
def test_load_name(self, queries_file):
parsed = load_queries(queries_file)
assert parsed[0][0] == 'TEST_SELECT'
def test_load_fcn(self, queries_file):
parsed = load_queries(queries_file)
assert isinstance(parsed[0][2], functools.partial)
def test_load_fcn_name(self, queries_file):
parsed = load_queries(queries_file)
fcn = parsed[0][2]
assert fcn.__name__ == 'TEST_SELECT'
def test_load_fcn_doc(self, queries_file):
parsed = load_queries(queries_file)
fcn = parsed[0][2]
assert fcn.__doc__ == 'testing the sqlpi module pls work\nsecond line comment'
def test_load_fcn_querystring_fmt(self, queries_file):
parsed = load_queries(queries_file)
fcn = parsed[0][2]
assert fcn.__query__ == """select *
-- comment in middle
from public.actor
limit 1;"""
class TestQuery:
def test_query(self, queries_file):
sql = Queries(queries_file)
assert isinstance(sql, Queries)
def test_query_repr(self, queries_file):
sql = Queries(queries_file)
assert 'sqlpy.Queries(' in sql.__repr__()
def test_query_fcn(self, queries_file):
sql = Queries(queries_file)
assert isinstance(sql.TEST_SELECT, functools.partial)
def test_query_fcn_args(self, queries_file):
sql = Queries(queries_file)
assert len(sql.TEST_SELECT.args) == 1
@pytest.mark.usefixtures("enable_logging")
class TestInitLogging:
def test_logging(self, queries_file, caplog):
caplog.set_level(logging.DEBUG)
Queries(queries_file)
for record in caplog.records:
assert record.levelname == 'INFO'
assert 'Found and loaded' in caplog.text
class TestExceptions:
def test_load_exception(self, invalid_file_path):
exc_msg = "[Errno No such file or directory] Could not find file: '{}'"\
.format(invalid_file_path)
with pytest.raises(SQLLoadException, message=exc_msg):
load_queries(invalid_file_path)
def test_parse_exception(self, invalid_sql_name_start):
exc_msg = r'^Query does not start with "-- name:": .*'
with pytest.raises(SQLParseException, match=exc_msg):
parse_sql_entry(invalid_sql_name_start)
def test_parse_exception2(self, invalid_sql_name_spaces):
exc_msg = r'^Query name has spaces: .*'
with pytest.raises(SQLParseException, match=exc_msg):
parse_sql_entry(invalid_sql_name_spaces)
def test_parse_exception3(self, invalid_sql_built):
exc_msg = r'^parse error, no argument found between \(\.\.\.\): .*'
with pytest.raises(SQLParseException, match=exc_msg):
parse_sql_entry(invalid_sql_built)
def test_parse_exception4(self, invalid_sql_built_args):
exc_msg = r'^parse error, arg numbers do not match in string s: .*'
with pytest.raises(SQLParseException, match=exc_msg):
parse_sql_entry(invalid_sql_built_args)
class TestQueryTypes:
def test_type_bang(self, sql_bang):
name, sql_type, fcn = parse_sql_entry(sql_bang)
assert sql_type == QueryType.INSERT_UPDATE_DELETE
def test_type_bang_return(self, sql_bang_return):
name, sql_type, fcn = parse_sql_entry(sql_bang_return)
assert sql_type == QueryType.RETURN_ID
def test_type_built(self, sql_built):
name, sql_type, fcn = parse_sql_entry(sql_built)
assert sql_type == QueryType.SELECT_BUILT
def test_type_proc(self, sql_proc):
name, sql_type, fcn = parse_sql_entry(sql_proc)
assert sql_type == QueryType.CALL_PROC
@pytest.mark.skipif('TRAVIS' not in os.environ, reason="test data only in Travis")
@pytest.mark.usefixtures("enable_logging")
class TestExec:
def test_select_1(self, db_cur, sql_select_1):
name, sql_type, fcn = parse_sql_entry(sql_select_1)
output = fcn(db_cur, n=1)
assert output[0] == 1
def test_select_2(self, db_cur, queries_file):
sql = Queries(queries_file)
output = sql.TEST_SELECT(db_cur, n=1)
assert output[0] == 1
def test_select_3(self, db_cur, queries_file_glob):
sql = Queries(queries_file_glob)
output = sql.TEST_SELECT_B(db_cur, n=1)
assert output[0] == 1
def test_select_4(self, db_cur, queries_file_glob):
sql = Queries(queries_file_glob, uppercase_name=False)
output = sql.test_select_b(db_cur, n=1)
assert output[0] == 1
def test_data1(self, db_cur, queries_file):
sql = Queries(queries_file)
data = ('BEN',)
output = sql.GET_ACTORS_BY_FIRST_NAME(db_cur, data, n=1)
assert output[0] == 83
def test_data1_1(self, db_cur, queries_file):
sql = Queries(queries_file)
data = ('BEN',)
output = sql.GET_ACTORS_BY_FIRST_NAME(db_cur, data)
assert len(output) == 2
def test_data1_2(self, db_cur, queries_file, caplog):
caplog.set_level(logging.DEBUG)
sql = Queries(queries_file, log_query_params=False)
data = ('BEN',)
sql.GET_ACTORS_BY_FIRST_NAME(db_cur, data, n=1)
assert "INFO Arguments: ('BEN',)" not in caplog.text
def test_data1_3(self, db_cur, queries_file, caplog):
caplog.set_level(logging.DEBUG)
sql = Queries(queries_file)
data = ('BEN',)
sql.GET_ACTORS_BY_FIRST_NAME(db_cur, data, n=1)
assert "INFO Arguments: ('BEN',)" in caplog.text
def test_data2(self, db_cur, queries_file, caplog):
caplog.set_level(logging.DEBUG)
sql = Queries(queries_file)
data = ('Jeff', 'Goldblum')
sql.PRE_CLEAR_ACTOR(db_cur, data)
output = sql.INSERT_ACTOR(db_cur, data, n=1)
assert output == ('Jeff', 'Goldblum')
assert "('Jeff', 'Goldblum')" in caplog.text
def test_data2_1(self, db_cur, queries_file):
sql = Queries(queries_file)
data = ('Jeff', 'Goldblum')
output = sql.INSERT_ACTOR(db_cur, data)
assert output == [('Jeff', 'Goldblum')]
def test_data2_2(self, db_cur, queries_file):
sql = Queries(queries_file)
data = (('Jeff', 'Goldblum'), ('Jeff', 'Goldblum'))
sql.DELETE_ACTORS(db_cur, data[0])
output = sql.INSERT_ACTORS(db_cur, data, many=True)
assert output == [('Jeff', 'Goldblum'), ('Jeff', 'Goldblum')]
def test_data2_3(self, db_cur, queries_file):
sql = Queries(queries_file)
data = (('Jeff', 'Goldblum'), ('Jeff', 'Goldblum'))
sql.DELETE_ACTORS(db_cur, data[0])
output = sql.INSERT_ACTORS(db_cur, data, many=True, n=1)
assert output == ('Jeff', 'Goldblum')
def test_data2_4(self, db_cur, queries_file):
sql = Queries(queries_file)
data = (('Jeff', 'Goldblum'), ('Jeff', 'Goldblum'))
sql.DELETE_ACTORS(db_cur, data[0])
output = sql.INSERT_ACTORS(db_cur, data, many=True, n=2)
assert output == [('Jeff', 'Goldblum'), ('Jeff', 'Goldblum')]
def test_data3(self, db_cur, queries_file):
sql = Queries(queries_file)
kwdata = {
'country': 'MARS'
}
output1 = sql.INSERT_COUNTRY(db_cur, kwdata)
output2 = sql.DELETE_COUNTRY(db_cur, kwdata)
assert output1 and output2
def test_data4(self, db_cur, queries_file, caplog):
caplog.set_level(logging.DEBUG)
sql = Queries(queries_file)
kwdata = {
'countires': ['United States'],
'extra_name': 'BEN'
}
output = sql.CUSTOMERS_OR_STAFF_IN_COUNTRY(db_cur, kwdata)
assert len(output) == 37
assert "'countires': ['United States']" in caplog.text
def test_data4_1(self, db_cur, queries_file):
sql = Queries(queries_file)
kwdata = {
'countires': ['United States'],
'extra_name': 'BEN',
'unmatched_arg_trigger': True
}
output = sql.CUSTOMERS_OR_STAFF_IN_COUNTRY(db_cur, kwdata)
assert len(output) == 37
def test_data5(self, db_cur, queries_file):
sql = Queries(queries_file)
kwdata = {
'countires': ['United States'],
'extra_name': 'BEN'
}
output = sql.CUSTOMERS_OR_STAFF_IN_COUNTRY(db_cur, kwdata, n=1)
assert len(output) == 3
def test_data5_1(self, db_cur, queries_file):
exc_msg = r'^Named argument supplied which does not match a SQL clause: .*'
with pytest.raises(SQLArgumentException, match=exc_msg):
sql = Queries(queries_file, strict_parse=True)
kwdata = {
'countires': ['United States'],
'extra_name': 'BEN',
'extra_param': 'I should not be here'
}
sql.CUSTOMERS_OR_STAFF_IN_COUNTRY(db_cur, kwdata, n=1)
def test_data5_2(self, db_cur, queries_file):
sql = Queries(queries_file)
kwdata = {
'countires': ['United States'],
'extra_name': 'BEN'
}
output = sql.CUSTOMERS_OR_STAFF_IN_COUNTRY(db_cur, kwdata, n=3)
assert len(output) == 3
def test_data6(self, db_cur, queries_file):
sql = Queries(queries_file)
kwdata = {
'countires': ['United States'],
'extra_name': 'BEN'
}
identifiers = ('country',)
output = sql.CUSTOMERS_OR_STAFF_IN_COUNTRY_SORT(db_cur, kwdata, n=1, identifiers=identifiers)
assert output == ('BEN', 'EASTER', 'Russian Federation')
def test_data6_1(self, db_cur, queries_file):
sql = Queries(queries_file)
kwdata = {
'countires': ['United States'],
'extra_name': 'BEN'
}
identifiers = {'order_group': ('country', 'last_name')}
output = sql.CUSTOMERS_OR_STAFF_IN_COUNTRY_SORT_GROUP(db_cur, kwdata, n=1, identifiers=identifiers)
assert output == ('BEN', 'EASTER', 'Russian Federation')
def test_data6_2(self, db_cur, queries_file):
sql = Queries(queries_file)
kwdata = {
'countires': ['United States'],
'extra_name': 'BEN'
}
identifiers = {'order_group': 'country'}
output = sql.CUSTOMERS_OR_STAFF_IN_COUNTRY_SORT_GROUP(db_cur, kwdata, n=1, identifiers=identifiers)
assert output == ('BEN', 'EASTER', 'Russian Federation')
def test_proc1(self, db_cur, queries_file):
sql = Queries(queries_file)
output = sql.INVENTORY_CHECK(db_cur, (1, 1))
assert output == [(1,), (2,), (3,), (4,)] or [(4,), (3,), (2,), (1,)]
def test_proc1_1(self, db_cur, queries_file):
sql = Queries(queries_file)
output = sql.INVENTORY_CHECK(db_cur, (1, 1), n=1)
assert output == (1,) or (4,)
def test_proc1_2(self, db_cur, queries_file):
sql = Queries(queries_file)
output = sql.INVENTORY_CHECK(db_cur, (1, 1), n=4)
assert output == [(1,), (2,), (3,), (4,)] or [(4,), (3,), (2,), (1,)]
@pytest.mark.skipif('TRAVIS' not in os.environ, reason="test data only in Travis")
@pytest.mark.usefixtures("enable_logging")
class TestExecExcept:
def test_data1(self, db_cur, queries_file):
with pytest.raises(psycopg2.Error):
sql = Queries(queries_file)
data = ('BEN',)
sql.GET_ACTORS_BY_FIRST_NAME_EXP(db_cur, data, n=1)
def test_data1_1(self, db_cur, queries_file):
with pytest.raises(psycopg2.Error):
sql = Queries(queries_file)
data = ('BEN',)
sql.GET_ACTORS_BY_FIRST_NAME_EXP(db_cur, data)
def test_data2(self, db_cur, queries_file):
with pytest.raises(psycopg2.Error):
sql = Queries(queries_file)
data = ('Jeff', 'Goldblum', 'Jeff', 'Goldblum')
sql.INSERT_ACTOR_EXP(db_cur, data)
def test_data3(self, db_cur, queries_file):
with pytest.raises(psycopg2.Error):
sql = Queries(queries_file)
kwdata = {
'country': 'MARS'
}
sql.INSERT_COUNTRY_EXP(db_cur, kwdata)
sql.DELETE_COUNTRY_EXP(db_cur, kwdata)
def test_data4(self, db_cur, queries_file):
with pytest.raises(psycopg2.Error):
sql = Queries(queries_file)
kwdata = {
'countires': ['United States'],
'extra_name': 'BEN'
}
sql.CUSTOMERS_OR_STAFF_IN_COUNTRY_EXP(db_cur, kwdata)
def test_data5(self, db_cur, queries_file):
with pytest.raises(psycopg2.Error):
sql = Queries(queries_file, strict_parse=True)
kwdata = {
'countires': ['United States'],
'extra_name': 'BEN'
}
sql.CUSTOMERS_OR_STAFF_IN_COUNTRY_EXP(db_cur, kwdata, n=1)
def test_data6(self, db_cur, queries_file):
with pytest.raises(psycopg2.Error):
sql = Queries(queries_file)
kwdata = {
'countires': ['United States'],
'extra_name': 'BEN'
}
identifiers = ('country',)
sql.CUSTOMERS_OR_STAFF_IN_COUNTRY_SORT_EXP(db_cur, kwdata, n=1, identifiers=identifiers)
def test_data6_1(self, db_cur, queries_file):
with pytest.raises(SQLParseException):
sql = Queries(queries_file)
kwdata = {
'countires': ['United States'],
'extra_name': 'BEN'
}
identifiers = 'country'
sql.CUSTOMERS_OR_STAFF_IN_COUNTRY_SORT_GROUP(db_cur, kwdata, n=1, identifiers=identifiers)
def test_data7(self, db_cur, queries_file):
with pytest.raises(SQLpyException):
sql = Queries(queries_file)
data = ('BEN',)
sql.GET_ACTORS_BY_FIRST_NAME(db_cur, data, n='1')
def test_data7_1(self, db_cur, queries_file):
with pytest.raises(SQLpyException):
sql = Queries(queries_file)
data = ('BEN',)
sql.GET_ACTORS_BY_FIRST_NAME(db_cur, data, n='0')
def test_data7_2(self, db_cur, queries_file):
with pytest.raises(SQLpyException):
sql = Queries(queries_file)
data = ('BEN',)
sql.GET_ACTORS_BY_FIRST_NAME(db_cur, data, n=-1)
def test_data8(self, db_cur, queries_file):
with pytest.raises(psycopg2.Error):
sql = Queries(queries_file)
data = (1, 1)
sql.INVENTORY_CHECK_EXP(db_cur, data)
def test_data9(self, db_cur, queries_file):
with pytest.raises(SQLpyException):
sql = Queries(queries_file)
data = (('Jeff', 'Goldblum'), ('Jeff', 'Goldblum'))
output = sql.INSERT_ACTORS(db_cur, data, many=True, n='2')
assert output == [('Jeff', 'Goldblum'), ('Jeff', 'Goldblum')]
def test_data10(self, db_cur, queries_file):
with pytest.raises(SQLpyException):
sql = Queries(queries_file)
kwdata = (['United States'], 'BEN')
output = sql.CUSTOMERS_OR_STAFF_IN_COUNTRY(db_cur, kwdata)
assert len(output) == 37
``` |
{
"source": "9fv/generator-python-microlibrary",
"score": 3
} |
#### File: templates/test/test_example.py
```python
import unittest
import inspect
import os, sys
sys.path.insert(0, os.path.abspath(os.path.join('..','src')))
from <%=name%> import KlassExample
class ExampleTest(unittest.TestCase):
""" Unit tests for PygalleBaseClassTest.
"""
def test_isclass(self):
""" Is {PygalleBaseClass} really a class ? """
self.assertEqual(inspect.isclass(PygalleBaseClass), True)
def test_create_instance(self):
""" Create a new instance of {PygalleBaseClass} """
self.assertIsInstance(PygalleBaseClass(), PygalleBaseClass)
def main():
""" Entry point.
"""
unittest.main()
if __name__ == '__main__':
main()
``` |
{
"source": "9ggy/BruteNitro",
"score": 3
} |
#### File: BruteNitro/src/brutenitro.py
```python
import os, requests, random, threading, json, time
from colorama import Fore
from webserver import keep_alive
keep_alive()
def printer(color, status, code) -> None:
threading.Lock().acquire()
print(f"{color} {status} > {Fore.RESET}discord.gift/{code}")
return
class Worker():
def pick_proxy(self):
with open('proxies.txt', 'r') as f:
proxies = [line.strip() for line in f]
return random.choice(proxies)
def config(self, args, extra=False):
with open('config.json', 'r') as conf:
data = json.load(conf)
if extra:
return data[args][extra]
else:
return data[args]
def run(self):
self.code = "".join(random.choice("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890") for _ in range(16))
try:
req = requests.get(
f'https://discordapp.com/api/v6/entitlements/gift-codes/{self.code}?with_application=false&with_subscription_plan=true', # API Endpoint to check if the nitro code is valid
proxies={
'http': self.config("proxies") + '://' + self.pick_proxy(),
'https': self.config("proxies") + '://' + self.pick_proxy()
},
timeout = 0.5 # Timeout time between each request, sometimes causes proccess to be killed if it is too low of a number.
)
if req.status_code == 200:
printer(Fore.LIGHTGREEN_EX, " Valid ", self.code)
try:
requests.post(
Worker().config("webhook", "url"),
json={
"content": f"Nitro Code, Redeem ASAP\n\nhttps://discord.gift/{self.code}",
"username": Worker().config("webhook", "username"),
"avatar_url": Worker().config("webhook", "avatar")
})
except:
pass
elif req.status_code == 429:
rate = (int(req.json()['retry_after']) / 1000) + 1
printer(Fore.LIGHTBLUE_EX, "RTlimit", self.code)
time.sleep(rate)
except KeyboardInterrupt:
threading.Lock().acquire() # Kill all Threads
print(f"{Fore.LIGHTRED_EX} Stopped > {Fore.RESET}BruteNitro Sniper Stopped by Keyboard Interrupt.")
exit()
except:
pass # Kill me if you want
if __name__ == "__main__": # Driver Code
print("""
.-.
(___________________________()6 `-, BruteNitro | Nitro code brute forcing
( ______________________ /''"`
//\\ //\\
"" "" "" ""
""")
DNG = Worker()
while True:
if threading.active_count() <= int(Worker().config("thread")):
threading.Thread(target=DNG.run).start()
``` |
{
"source": "9gress0r/Sudoku-Solver",
"score": 4
} |
#### File: 9gress0r/Sudoku-Solver/output.py
```python
def make_table(table, solved):
"""
Creates a beautiful table
"""
result = []
header = '+---------FINAL---------+' if solved else '+----------RAW----------+'
result.append(header)
for row in range(9):
first, second, third = ' '.join(map(str, table[row][:3])), ' '.join(map(str, table[row][3:6])), ' '.join(map(str, table[row][6:]))
result.append('| ' + first + ' | ' + second + ' | ' + third + ' |')
if row in [2, 5]:
result.append('| - - - + - - - + - - - |')
result.append(header)
return result
def table_splitted(table1, table2):
"""
Combines two tables
"""
result = [table1[line] + ' ' * 2 + table2[line] for line in range(len(table1))]
return '\n'.join(result)
```
#### File: 9gress0r/Sudoku-Solver/TableClass.py
```python
class Table:
def __init__(self, table):
self.table = table
self.pos_nums = list(range(1, 10))
def __call__(self):
return self.table
def get_square(line1, line2, line3, num):
""" Getting a square """
square = []
if num == 1:
for index in range(3):
square.append(line1[index])
square.append(line2[index])
square.append(line3[index])
elif num == 2:
for index in range(3, 6):
square.append(line1[index])
square.append(line2[index])
square.append(line3[index])
elif num == 3:
for index in range(6, 9):
square.append(line1[index])
square.append(line2[index])
square.append(line3[index])
return square
def get_ver_line(self, num):
""" Getting a vertical line """
return [item[num] for item in self.table]
def get_hor_line(self, row):
"""
Gets all numbers in a horizontal line by a number and turn it into the array
"""
return self.table[row]
def is_validLine(self, line):
""" Checks is a line valid """
logs = []
for item in self.pos_nums:
if line.count(item) == 1:
logs.append(True)
else:
logs.append(False)
return all(logs)
def is_validSquare(self, square):
""" Checks is a square valid """
logs = []
for item in self.pos_nums:
if square.count(item) == 1:
logs.append(True)
else:
logs.append(False)
return all(logs)
def check_vertical_lines(self):
""" Checks all vertical lines in a table """
logs = [self.is_validLine(self.get_ver_line(self.table, col)) for col in range(9)]
return all(logs)
def check_horizontal_lines(self):
""" Checks all horizontal lines in a table """
logs = [self.is_validLine(line) for line in self.table]
return all(logs)
def check_squares(self):
""" Checks all squares in a table """
logs = []
for item in range(0, 7, 3):
line1, line2, line3 = self.table[item], self.table[item + 1], self.table[item + 2]
for num in range(1, 4):
square = self.get_square(line1, line2, line3, num)
logs.append(self.is_validSquare(square))
return all(logs)
def is_validTable(self):
""" Checks is a table valid """
logs = [self.check_vertical_lines(self.table), self.check_horizontal_lines(self.table), self.check_squares(self.table)]
return all(logs)
def get_sq(self, num):
"""
Gets all numbers in a square by a number and turn it into the array
"""
square = []
if num in [1, 2, 3]:
for row in range(3):
if num == 1:
square.extend(self.table[row][:3])
elif num == 2:
square.extend(self.table[row][3:6])
elif num == 3:
square.extend(self.table[row][6:9])
elif num in [4, 5, 6]:
for row in range(3, 6):
if num == 4:
square.extend(self.table[row][:3])
elif num == 5:
square.extend(self.table[row][3:6])
elif num == 6:
square.extend(self.table[row][6:9])
elif num in [7, 8, 9]:
for row in range(6, 9):
if num == 7:
square.extend(self.table[row][:3])
elif num == 8:
square.extend(self.table[row][3:6])
elif num == 9:
square.extend(self.table[row][6:9])
return square
def available_nums(self, row, col):
"""
Returns an array of available numbers for given cell
"""
hor, ver, sq = self.get_hor_line(row), self.get_ver_line(col), list()
if row in range(3):
if col in range(3):
sq = self.get_sq(1)
elif col in range(3, 6):
sq = self.get_sq(2)
elif col in range(6, 9):
sq = self.get_sq(3)
elif row in range(3, 6):
if col in range(3):
sq = self.get_sq(4)
elif col in range(3, 6):
sq = self.get_sq(5)
elif col in range(6, 9):
sq = self.get_sq(6)
elif row in range(6, 9):
if col in range(3):
sq = self.get_sq(7)
elif col in range(3, 6):
sq = self.get_sq(8)
elif col in range(6, 9):
sq = self.get_sq(9)
return [
item for item in self.pos_nums
if item not in hor and item not in ver and item not in sq
]
def find_holes(self):
"""
Returns an array of indexes of cells that can be changed
"""
result = []
for row in range(len(self.table)):
for col in range(len(self.table[row])):
if self.table[row][col] == 0:
result.append((row, col))
return result
def solve(self):
"""
Main function for solving a table
"""
holes = {(row, col): self.available_nums(row, col) for row, col in self.find_holes()}
was_changed = False
for key, val in holes.items():
if len(val) == 1:
was_changed = True
row, col = key
self.table[row][col] = val[0]
if was_changed:
self.solve()
return self.table
``` |
{
"source": "9h03n1x/AD_project",
"score": 3
} |
#### File: src/ego/simple_ego.py
```python
from .ego_base import ego_base
class simple_ego(ego_base):
def __init__(self):
ego_base.__init__(self)
self.length = 1
self.position = {"x": 0, "y": 0, "theta": "N"}
self.move_name = {"N":"WNE", "E":"NES", "S":"ESW", "W":"SWN"}
self.move = {"N":[-1, 0 ],
"E":[ 0, -1],
"S":[ 1, 0 ],
"W":[ 0, 1 ]}
self.direction = ""
def set_position(self,x,y,theta):
self.position = {"x": x, "y": y, "theta": theta}
def update_position(self, movement, step=1):
"""
update the postion of the ego according to a movement in a direction
movement gives the new position, from this and the old postion te direction will be calculated
step gives how many cells are covered in this direction
"""
delta_x = self.position["x"] - movement[0]
delta_y = self.position["y"] - movement[1]
if delta_x == -1 and delta_y == 0:
direction = "N"
elif delta_x == 0 and delta_y == -1:
direction = "E"
elif delta_x == 1 and delta_y == 0:
direction = "S"
elif delta_x == 0 and delta_y == 1:
direction = "W"
else:
direction=self.position["theta"]
self.logger.write_log("delta_x: " +str(delta_x) + "\tdelta_y: " + str(delta_y) + "\tdirection: " + direction)
for i in range(0,step):
self.position["x"] += self.move[direction][1]
self.position["y"] += self.move[direction][0]
self.position["theta"] = direction
def get_move_options(self):
"""
return the options according to the current position, assuming that the ego is not able to reverse
"""
move_opt = []
name_opt = []
directions = self.move_name[self.position["theta"]]
for direction in directions:
move_opt.append(self.move[direction])
if self.position["theta"] == "N":
name_opt = ["L", "^", "R"]
elif self.position["theta"] == "S":
name_opt = ["L", "v", "R"]
elif self.position["theta"] == "E":
name_opt = ["R", ">", "L"]
elif self.position["theta"] == "W":
name_opt = ["R", "<","L"]
else:
pass
#TODO: mapping auf left,straigt,right aus sicht des EGO
return move_opt, name_opt
def get_direction(self, direction):
"""
returns the symbol of current movement
"""
if self.direction == direction:
val = "s_"
options = self.move_name[direction]
elif self.direction == "":
val = "n_"
options = "NWSE"
else:
options = self.move_name[direction]
if self.direction == options[0]:
val = "R_"
elif self.direction == options[2]:
val = "L_"
else:
val = "-_"
self.logger.write_log("direction: " + str(direction) + "\t options: " + str(options))
self.direction = direction
val = val + direction
delta = []
delta_names = []
for ele in options:
delta_names.append(ele)
delta.append(self.move[ele])
return val
```
#### File: src/perception/perception_base.py
```python
class perception_base(object):
'''
providing all the methods to connect to plan
'''
def __init__(self, params):
'''
Constructor
'''
```
#### File: testing/testcases/tc_path_plan_dp.py
```python
from testing.testcases.tc_base import tc_base
from plan.path_planning import path_dynamic_prog
from simulator.visualisation import visualisation
class tp_path_plan_dp(tc_base):
'''
classdocs
'''
def __init__(self):
'''
Constructor
'''
self.name = "tp_path_plan_dp"
tc_base.__init__(self,self.name)
#self.__set_name(self.name)
self.path_plan = path_dynamic_prog()
self.visu = visualisation()
self.grid_size=[15,25]
def precondition_01_init_pathplan(self):
"""
init the pathplanning algo and the visualisation
"""
self.path_plan.set_grid_size(self.grid_size)
self.visu.set_grid(self.grid_size) # hoehe x breite
def precondition_02_set_obsticals(self):
"""
set the defined obsticals in both grid, pathplanner and visu
"""
self.static = [[0,2,7,5],
[0,5,3,15],
[0,1,12,2],
[13,10,15,20],
[8,14,13,17]]
self.path_plan.set_obsticals(self.static,[])
self.visu.set_obsticales(self.static,[])
self.visu.draw_grid(self.path_plan.get_grid())
def teststep_01(self):
"""
calculate the value function
"""
goal = [10,20]
cost = [1,1,1,1]
value_grid, policy_grid = self.path_plan.compute_value(goal, cost)
self.logger.write_log("value_grid: ")
self.visu.draw_grid(value_grid)
self.logger.write_log("policy_grid: ")
self.visu.draw_grid(policy_grid)
```
#### File: src/testing/test_planner.py
```python
from . import testcases
from testing.testcases import tc_path_plan_a_star as testcase
from logger.logger import logger
import time
class test_planner(object):
'''
classdocs
'''
def __init__(self):
'''
Constructor
'''
self.list_tc = {}
self.list_tc_init={}
self.logger = logger("test_planner")
self.results = []
def get_available_testcases(self):
"""
append all testcases to the tc_list
"""
self.list_tc = testcases.get_available_tc()
for key in self.list_tc.keys():
self.list_tc_init[key] = self.list_tc[key]()
def run(self):
"""
runs all the registered testcases
"""
for tc_key in self.list_tc.keys():
self.logger.write_log("-------------------------------------------------")
self.logger.write_log("running testcase: " + tc_key)
starttime = time.time()
tc_obj = self.list_tc_init[tc_key]
tc_class = self.list_tc[tc_key]
preCon_keys = []
testStep_keys = []
postCon_keys = []
for m_key in tc_class.__dict__.keys():
if m_key.find("precondition") != -1:
preCon_keys.append(m_key)
elif m_key.find("teststep") != -1:
testStep_keys.append(m_key)
elif m_key.find("postcondition") != -1:
testStep_keys.append(m_key)
# sort the teststeps
preCon_keys.sort()
testStep_keys.sort()
postCon_keys.sort()
self.logger.write_log("-------------------------------------------------")
self.logger.write_log("running precondition: " + tc_key)
for preCon in preCon_keys:
self.logger.write_log("running precondition: " + preCon + "\n")
tc_class.__dict__[preCon](tc_obj)
self.logger.write_log("-------------------------------------------------")
self.logger.write_log("running teststeps: " + tc_key)
for teststep in testStep_keys:
self.logger.write_log("running teststep: " + teststep + "\n")
tc_class.__dict__[teststep](tc_obj)
self.logger.write_log("-------------------------------------------------")
self.logger.write_log("running postcondition: " + tc_key)
for postCon in postCon_keys:
self.logger.write_log("running postcondition: " + postCon + "\n")
tc_class.__dict__[postCon](tc_obj)
duration = time.time()-starttime
self.logger.write_log("-------------------------------------------------")
self.logger.write_log("finished testcase: " + tc_key +" in " +str(duration)[:6])
self.results.append({"name":tc_key, "duration":duration,"result":False})
return self.results
``` |
{
"source": "9hax/simpleticket",
"score": 2
} |
#### File: 9hax/simpleticket/user.py
```python
import bcrypt
import smtpconfig
import json
import time
import datetime
import random
import string
from simpleticket import m
try:
import userconfig as config
except:
import config
# prepare language files
with open("lang/"+config.LANGUAGE+".json",'r',encoding="utf-8") as langfile:
lang = json.load(langfile)
def resetpw(user):
newPassword = ''.join(random.choices(string.ascii_uppercase + string.digits, k = random.randint(20,30)))
user.password = <PASSWORD>(<PASSWORD>)
m.db.session.commit()
sendmail(user.email, lang["password-reset-mail"].replace("%PW%", newPassword), lang["password-reset"]+" | "+config.SITE_NAME)
del(newPassword)
def verify_login(u, p):
potential_user = m.User.query.filter_by(username=u.lower()).first()
if potential_user:
if bcrypt.checkpw(p.encode('utf-8'), potential_user.password.encode('utf-8')):
return potential_user
return False
def hashPassword(password):
return bcrypt.hashpw(password.encode("utf-8"), bcrypt.gensalt(12)).decode()
def get_userid(username):
return m.User.query.filter_by(username = username).first().id
def get_user(userid):
return m.User.query.get(userid)
def create_ticket(title, text, media, created_by, assigned_to):
new_ticket = m.Ticket()
new_ticket.title = title
new_ticket.is_open = True
new_ticket.text = text
new_ticket.media = media
new_ticket.time = time.time()
new_ticket.created_by = created_by
new_ticket.assigned_to = assigned_to
m.db.session.add(new_ticket)
m.db.session.commit()
def create_ticket_reply(text, media, created_by, main_ticket_id, isNote = False):
new_ticket = m.TicketReply()
new_ticket.text = text
new_ticket.media = media
new_ticket.isNote = isNote
new_ticket.time = time.time()
new_ticket.created_by = created_by
new_ticket.main_ticket_id = main_ticket_id
m.db.session.add(new_ticket)
m.db.session.commit()
def create_user(username, fullname, email, hashedPassword, passwordResetTimer = -1, highPermissionLevel = 0):
new_user = m.User()
new_user.username = username.lower()
new_user.fullname = fullname
new_user.email = email
new_user.password = <PASSWORD>
new_user.passwordResetTimer = passwordResetTimer
new_user.highPermissionLevel = highPermissionLevel
m.db.session.add(new_user)
m.db.session.commit()
def modify_user_password(userid, newPasswordHash):
modified_user = get_user(userid)
modified_user.password = <PASSWORD>
m.db.session.commit()
def sendmail(address, htmlcontent, subject):
import smtplib, ssl
mailstring = "From: "+smtpconfig.SMTP_USER+"\nTo: "+address+"\nSubject: "+subject+"\n\n"+htmlcontent+"\n"
ssl_context = ssl.create_default_context()
with smtplib.SMTP_SSL(smtpconfig.SMTP_SERVER, smtpconfig.SMTP_PORT, context=ssl_context) as smtpserver:
smtpserver.login(smtpconfig.SMTP_USER, smtpconfig.SMTP_PASSWORD)
smtpserver.sendmail(smtpconfig.SMTP_USER, address, mailstring)
def getTime(timestamp):
try:
return datetime.datetime.fromtimestamp(timestamp).strftime(config.TIMEFORMAT)
except:
return "Invalid time"
def hasValidReply(ticketid):
ticketReplyList = m.TicketReply.query.filter_by(main_ticket_id = ticketid).all()
for reply in ticketReplyList:
if m.User.query.filter_by(id = reply.created_by_id).first().highPermissionLevel:
return True
return False
``` |
{
"source": "9helix/NEO-Autoscript",
"score": 3
} |
#### File: NEO-Autoscript/src/utils.py
```python
import pickle
from builtins import open # Keep in context
from pathlib import Path
from config import DATA_FILE
@type.__call__
class PersistentData:
def __init__(self):
if Path(DATA_FILE).exists():
ns = pickle.load(open(Path(DATA_FILE), 'rb'))
self.__dict__.update(ns)
def __del__(self):
ns = self.__dict__
pickle.dump(ns, open(Path(DATA_FILE), 'wb'))
def jd_to_hhmmss(jd):
JDtime = str(jd)
decimal_part = JDtime[JDtime.find('.')+1:]
decimal_part = float(decimal_part)/(10**len(decimal_part))+0.5
day_decimal = str(decimal_part*24)
hr_decimal = day_decimal[:day_decimal.find('.')]
min_decimal = str(round(float(day_decimal.replace(hr_decimal, ''))*60))
#min_decimal = min_decimal[:min_decimal.find('.')]
hr_decimal = int(hr_decimal)
min_decimal = int(min_decimal)
if hr_decimal >= 24:
hr_decimal -= 24
if min_decimal >= 60:
hr_decimal += 1
min_decimal -= 60
if hr_decimal >= 24:
hr_decimal -= 24
hr_decimal = str(hr_decimal)
min_decimal = str(min_decimal)
if len(hr_decimal) == 1:
hr_decimal = '0'+hr_decimal
if len(min_decimal) == 1:
min_decimal = '0'+min_decimal
time = hr_decimal+min_decimal
return time
``` |
{
"source": "9ightcor3/Camouflage-Multifunctional-Bot",
"score": 3
} |
#### File: 9ightcor3/Camouflage-Multifunctional-Bot/metal.py
```python
import cv2
import numpy as np
import os
import time
import RPi.GPIO as GPIO #Import GPIO library
import time #Import time library
GPIO.setmode(GPIO.BCM) #Set GPIO pin numbering
METAL = 26
IN5=23
IN6=24
IN7=25
IN8=8
RED = 2 #Associate pin 23 to TRIG
GREEN = 3
BLUE = 4 #Associate pin 23 to TRIG
LASER=17
IN1=21
IN2=20
IN3=16
IN4=12
GPIO.setup(RED,GPIO.OUT) #Set pin as GPIO out
GPIO.setup(GREEN,GPIO.OUT)
GPIO.setup(BLUE,GPIO.OUT)
GPIO.setup(IN1, GPIO.OUT)
GPIO.setup(IN2, GPIO.OUT)
GPIO.setup(IN3, GPIO.OUT)
GPIO.setup(IN4, GPIO.OUT)
GPIO.setup(LASER, GPIO.OUT)
GPIO.output(LASER, False)
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO.output(IN1, False)
GPIO.output(IN2, False)
GPIO.output(IN3, False)
GPIO.output(IN4, False)
GPIO.output(RED, False)
GPIO.output(BLUE, False)
GPIO.output(GREEN, False)
##
GPIO.setup(METAL,GPIO.IN)
GPIO.setup(IN5, GPIO.OUT)
GPIO.setup(IN6, GPIO.OUT)
GPIO.setup(IN7, GPIO.OUT)
GPIO.setup(IN8, GPIO.OUT)
GPIO.output(IN5, False)
GPIO.output(IN6, False)
GPIO.output(IN7, False)
GPIO.output(IN8, False)
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
def DISPOSAL():
count=0
while(count<5):
print('DISPOSAL')
GPIO.output(IN5, True)
GPIO.output(IN6, False)
time.sleep(1)
GPIO.output(IN5, False)
GPIO.output(IN6, False)
time.sleep(1)
GPIO.output(IN5, False)
GPIO.output(IN6, True)
time.sleep(1)
GPIO.output(IN5, False)
GPIO.output(IN6, False)
time.sleep(1)
count +=1
while(1):
if(GPIO.input(METAL)==False):
print('BOMB DETECTED')
DISPOSAL()
``` |
{
"source": "9il/benchmarks",
"score": 3
} |
#### File: benchmarks/primes/primes.py
```python
import os
import platform
import socket
import sys
UPPER_BOUND = 5000000
PREFIX = 32338
class Node:
def __init__(self):
self.children = {}
self.terminal = False
def generate_primes(m):
result = {2}
for i in range(1, 1 + (m - 1) // 2):
result.add(2 * i + 1)
k, j = 1, 1
sqr = lambda i: i * i
max_n = lambda i: (m - sqr(2 * i + 1)) // (4 * i + 2)
while k > 0:
k = max_n(j)
j += 1
k = j
for i in range(1, k + 1):
for n in range(max_n(i - 1)):
result.discard((2 * i + 1) * (2 * i + 2 * n + 1))
return result
def generate_trie(l):
root = Node()
for el in l:
head = root
for ch in str(el):
head = head.children.setdefault(ch, Node())
head.terminal = True
return root
def find(upper_bound, prefix):
primes = generate_primes(upper_bound)
root = generate_trie(primes)
head, str_prefix = root, str(prefix)
for ch in str_prefix:
head = head.children.get(ch)
if head is None:
return None
queue, result = [(head, str_prefix)], []
while queue:
top, prefix = queue.pop()
if top.terminal:
result.append(int(prefix))
for ch, v in top.children.items():
queue.insert(0, (v, prefix + ch))
return result
def notify(msg):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
if not s.connect_ex(("localhost", 9001)):
s.sendall(bytes(msg, "utf8"))
def verify():
left = [2, 23, 29]
right = find(100, 2)
if left != right:
print("%s != %s" % (left, right), file=sys.stderr)
quit(1)
if __name__ == "__main__":
verify()
notify("%s\t%d" % (platform.python_implementation(), os.getpid()))
results = find(UPPER_BOUND, PREFIX)
notify("stop")
print(results)
``` |
{
"source": "9il/mir_benchmarks",
"score": 3
} |
#### File: mir_benchmarks/other_benchmarks/basic_ops_bench.py
```python
import time
from collections import defaultdict as dd
from timeit import default_timer as timer
import numpy as np
def allocation_and_functions():
pass
def functions(nruns=1):
rows, cols = 500, 600
reduceRowsBy, reduceColsBy = 5, 6
small_int_matrixA = np.random.randint(
1, 10, [int(rows / reduceRowsBy), int(cols / reduceColsBy)]
)
small_int_matrixB = np.random.randint(
1, 10, [int(rows / reduceRowsBy), int(cols / reduceColsBy)]
)
float_matrixA = np.random.rand(rows, cols)
small_float_matrixA = np.random.rand(
int(rows / reduceRowsBy), int(cols / reduceColsBy)
)
small_float_matrixB = np.random.rand(
int(rows / reduceRowsBy), int(cols / reduceColsBy)
)
float_matrixC = np.random.rand(cols, rows)
float_arrayA = np.random.rand(rows * cols)
float_arrayB = np.random.rand(rows * cols)
funcs = dd(list)
name = "Element-wise sum of two {}x{} matrices (int), (1000 loops)".format(
int(rows / reduceRowsBy), int(cols / reduceColsBy)
)
for _ in range(nruns):
start = timer()
for _ in range(1000):
_ = small_int_matrixA + small_int_matrixB
end = timer()
funcs[name].append(end - start)
name = "Element-wise multiplication of two {}x{} matrices (float64), (1000 loops)".format(
int(rows / reduceRowsBy), int(cols / reduceColsBy)
)
for _ in range(nruns):
start = timer()
for _ in range(1000):
_ = small_float_matrixA * small_float_matrixB
end = timer()
funcs[name].append(end - start)
name = "Scalar product of two {} arrays (float64)".format(rows * cols)
for _ in range(nruns):
start = timer()
_ = float_arrayA @ float_arrayB
end = timer()
funcs[name].append(end - start)
name = "Dot product of {}x{} and {}x{} matrices (float64)".format(
rows, cols, cols, rows
)
for _ in range(nruns):
start = timer()
_ = float_matrixA @ float_matrixC
end = timer()
funcs[name].append(end - start)
name = "L2 norm of {}x{} matrix (float64)".format(rows, cols)
for _ in range(nruns):
start = timer()
_ = np.linalg.norm(float_matrixA) ** 2
end = timer()
funcs[name].append(end - start)
name = "Sort of {}x{} matrix (float64)".format(rows, cols)
for _ in range(nruns):
start = timer()
_ = np.sort(float_matrixA, axis=None)
end = timer()
funcs[name].append(end - start)
return funcs
def benchmark():
# TODO allocation_and_functions()
results = functions(20)
for name, runs in results.items():
print("| {} | {} |".format(name, sum(runs) / len(runs)))
if __name__ == "__main__":
benchmark()
``` |
{
"source": "9incloud/lab-ecs-fargate-cd-blue-green-infra",
"score": 2
} |
#### File: lab-ecs-fargate-cd-blue-green-infra/deployment-hooks/app.py
```python
import boto3
import os
import json
REGION = os.environ.get('REGION')
DEBUG = os.environ.get('DEBUG')
def handler(event, context):
if DEBUG is not None and DEBUG != 'false':
print("Received event: " + json.dumps(event, indent=2))
deployment_id = event['DeploymentId']
lifecycle_eventhook_execution_id = event['LifecycleEventHookExecutionId']
session = boto3.Session(region_name=REGION)
client = session.client('codedeploy')
# status # 'Pending' | 'InProgress' | 'Succeeded' | 'Failed' | 'Skipped' | 'Unknown'
client.put_lifecycle_event_hook_execution_status(
deploymentId=deployment_id,
lifecycleEventHookExecutionId=lifecycle_eventhook_execution_id,
status='Succeeded'
)
```
#### File: lab-ecs-fargate-cd-blue-green-infra/flask.d/app.py
```python
from flask import Flask, jsonify, request
app = Flask(__name__)
app_version = "12 for Blue/Green CodeDeploy"
@app.route('/', methods=['GET'])
def welcome():
return f'Welcome to flask framework. Version:{app_version}'
@app.route('/hello', methods=['GET'])
def health_check():
return f'It\'s version:{app_version}'
@app.route('/login', methods=['POST'])
def result():
if request.form.get('data'):
return request.form.get('data')
else:
return jsonify({"message": "ERROR: Unauthorized"}), 401
if __name__ == "__main__":
app.run(debug=True, host='0.0.0.0', port=80)
``` |
{
"source": "9kalikali/networking",
"score": 2
} |
#### File: networking/router/mytopo.py
```python
from mininet.topo import Topo
from mininet.net import Mininet
from mininet.cli import CLI
class RouterTopo(Topo):
def build(self):
h1 = self.addHost('h1')
h2 = self.addHost('h2')
h3 = self.addHost('h3')
r1 = self.addHost('r1')
r2 = self.addHost('r2')
r3 = self.addHost('r3')
self.addLink(h1, r1)
self.addLink(h2, r2)
self.addLink(h3, r3)
self.addLink(r1, r2)
self.addLink(r2, r3)
if __name__ == '__main__':
topo = RouterTopo()
net = Mininet(topo = topo, controller = None)
h1, h2, h3= net.get('h1', 'h2', 'h3')
r1, r2, r3 = net.get('r1', 'r2', 'r3')
h1.cmd('ifconfig h1-eth0 10.0.1.11/24')
h2.cmd('ifconfig h2-eth0 10.0.2.22/24')
h3.cmd('ifconfig h3-eth0 10.0.3.33/24')
h1.cmd('route add default gw 10.0.1.1')
h2.cmd('route add default gw 10.0.2.1')
h3.cmd('route add default gw 10.0.3.1')
for h in (h1, h2, h3):
h.cmd('./scripts/disable_offloading.sh')
h.cmd('./scripts/disable_ipv6.sh')
r1.cmd('ifconfig r1-eth0 10.0.1.1/24')
r1.cmd('ifconfig r1-eth1 10.0.4.1/24')
r1.cmd('route add -net 10.0.2.0 netmask 255.255.255.0 gw 10.0.4.2 dev r1-eth1')
r1.cmd('route add -net 10.0.3.0 netmask 255.255.255.0 gw 10.0.4.2 dev r1-eth1')
r1.cmd('route add -net 10.0.5.0 netmask 255.255.255.252 gw 10.0.4.2 dev r1-eth1')
r2.cmd('ifconfig r2-eth0 10.0.2.1/24')
r2.cmd('ifconfig r2-eth1 10.0.4.2/24')
r2.cmd('ifconfig r2-eth2 10.0.5.1/24')
r2.cmd('route add -net 10.0.1.0 netmask 255.255.255.0 gw 10.0.4.1 dev r2-eth1')
r2.cmd('route add -net 10.0.3.0 netmask 255.255.255.0 gw 10.0.5.2 dev r2-eth2')
r3.cmd('ifconfig r3-eth0 10.0.3.1/24')
r3.cmd('ifconfig r3-eth1 10.0.5.2/24')
r3.cmd('route add -net 10.0.2.0 netmask 255.255.255.0 gw 10.0.5.1 dev r3-eth1')
r3.cmd('route add -net 10.0.1.0 netmask 255.255.255.0 gw 10.0.5.1 dev r3-eth1')
r3.cmd('route add -net 10.0.4.0 netmask 255.255.255.252 gw 10.0.5.1 dev r3-eth1')
for r in (r1, r2, r3):
r.cmd('./scripts/disable_arp.sh')
r.cmd('./scripts/disable_icmp.sh')
r.cmd('./scripts/disable_ip_forward.sh')
net.start()
CLI(net)
net.stop()
``` |
{
"source": "9kin/codeforces-dl",
"score": 3
} |
#### File: codeforces-dl/cfdl/aio.py
```python
import asyncio
from concurrent.futures import ThreadPoolExecutor
from typing import List, Tuple
import aiohttp
from lxml.html import HtmlElement, fromstring
from magic import from_buffer as file_type
from cfdl.bar_utils import Bar
from cfdl.models import Data
from cfdl.utils import (extend_task, get_tasks, get_tutorials, parse_tutorial,
rewrite_link, tasks_jsonify)
from cfdl.xhr import xhr_tutorials
headers = {
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.100 Safari/537.36",
}
# https://stackoverflow.com/questions/59902102/why-is-imperative-mood-important-for-docstrings
# https://stackoverflow.com/questions/3898572/what-is-the-standard-python-docstring-format
def get_tree(
html: str,
) -> HtmlElement:
"""Get HtmlElement from a html.
Args:
html (str): html page content
Returns:
HtmlElement : tree element
"""
tree = fromstring(html)
tree.rewrite_links(rewrite_link)
return tree
async def contests_html(
session: aiohttp.ClientSession,
page: int,
) -> str:
"""Get html of a contest page.
Args:
session (aiohttp.ClientSession) : session
page (int) : number of page (0..)
Returns:
Html of page.
"""
url = "https://codeforces.com/contests/page/{0}?complete=true".format(
page,
)
async with session.get(url) as response:
return await response.text()
def get_contest_from_page(tree: HtmlElement) -> List[Tuple[str, int]]:
"""Get list of tasks from tree.
Args:
tree (HtmlElement) : tree of contest page.
Returns:
List[Tuple[str, int]] : contest_name, contest_id
"""
contests = []
table = tree.xpath(
'//*[@id="pageContent"]/div[1]/div[2]/div[1]/div[6]/table/tr',
)[1:]
for contest in table:
text = contest.text_content().split()
contest_name = " ".join(text[: text.index("Enter")])
contest_id = contest.get("data-contestid")
if contest_id == "693":
continue
contests.append((contest_name, int(contest_id)))
return contests
async def get_html_blog(
session: aiohttp.ClientSession,
url: str,
contests: List[int],
) -> Tuple[str, str, List[int]]:
"""Get html from a blog url.
Args:
session (aiohttp.ClientSession) : session
url (str) : blog url
contests (List[int]) : list of contests
Returns:
Tuple[str, str, List[int]] : url, html, contests list
"""
async with session.get(url) as resp:
return url, await resp.text(), contests
async def get_html_task(
session: aiohttp.ClientSession,
contest_id: int,
task_letter: str,
) -> Tuple[int, str, str]:
"""Get html of condition from a task.
Args:
session (aiohttp.ClientSession) : session
contest_id (int) : contest id of the contest
task_letter (str) : letter of the task
Returns:
Tuple[int, str, str] : contest_id, task_letter, html code of the task.
"""
url = "https://codeforces.com/contest/{0}/problem/{1}?locale=ru".format(
contest_id,
task_letter,
)
async with session.get(url) as resp:
text = await resp.text(errors="ignore")
if file_type(text, mime=True) != "text/html":
return contest_id, task_letter, "<head></head><body></body>"
return contest_id, task_letter, text
async def get_html_contest(
session: aiohttp.ClientSession,
contest_id: int,
) -> Tuple[int, str]:
"""Get html of the contest main page.
Args:
session (aiohttp.ClientSession) : session
contest_id (int) : contest id of the contest
Returns:
Tuple[int, str] : contest_id, html code of the contest.
"""
url = "http://codeforces.com/contest/{contest_id}?locale=ru".format(
contest_id=contest_id,
)
async with session.get(url) as resp:
return contest_id, await resp.text()
async def parse_contest(
session: aiohttp.ClientSession,
contests: List[int],
debug: bool = True,
) -> List[Tuple[int, str]]:
"""Get url for all tutorials and extends tasks for the contests.
Args:
session (aiohttp.ClientSession) : session
contests (List[int]) : list of contests
debug (bool) : if true shows progress bar
Returns:
List[Tuple[int, str]] : contest_id, tutorial url for this contest_id.
"""
loop = asyncio.get_running_loop()
progress_bar = Bar(range(len(contests)), debug=debug)
contests_tree = []
futeres = [get_html_contest(session, url) for url in contests]
with ThreadPoolExecutor() as pool:
for future in asyncio.as_completed(futeres):
contest_id, contest_html = await future
contest_tree = await loop.run_in_executor(
pool,
get_tree,
contest_html,
)
contests_tree.append((contest_id, contest_tree))
progress_bar.update()
progress_bar.set_description(
"parse contest {0}".format(
contest_id,
),
)
blog_urls = []
for contest_id, contest_tree in contests_tree:
extend_task(contest_id, contest_tree)
for url in get_tutorials(contest_tree):
blog_urls.append((contest_id, url))
return blog_urls
async def parse_blog(
session: aiohttp.ClientSession,
data_storage: Data,
blog_urls: dict,
debug: bool = True,
):
"""Extract solutions for all tasks in this tutorials.
Args:
session (aiohttp.ClientSession) : session
data_storage (Data) : data object
blog_urls (dict) : list of contests
debug (bool) : if true shows progress bar
"""
loop = asyncio.get_running_loop()
progress_bar = Bar(range(len(blog_urls)), debug=debug)
futeres = [
get_html_blog(session, url, blog_urls[url]) for url in blog_urls
]
with ThreadPoolExecutor() as pool:
for future in asyncio.as_completed(futeres):
url, html, contests = await future
await loop.run_in_executor(
pool,
parse_tutorial,
html,
data_storage,
contests,
url,
)
progress_bar.update()
progress_bar.set_description("parse blog")
async def parse_tasks(
session: aiohttp.ClientSession,
problems: List[Tuple[int, str]],
debug: bool = True,
) -> List[Tuple[int, str, HtmlElement]]:
"""Get html tree for the tasks.
Args:
session (aiohttp.ClientSession) : session
problems (List[Tuple[int, str]]) : list of tuple (contest_id, task_letter)
debug (bool) : if true shows progress bar
Returns:
List[Tuple[int, str, HtmlElement]] : contest_id, task_letter, tree for task
"""
loop = asyncio.get_running_loop()
tasks = []
progress_bar = Bar(range(len(problems)), debug=debug)
futeres = [
get_html_task(session, contest_id, task_letter)
for contest_id, task_letter in problems
]
with ThreadPoolExecutor() as pool:
for future in asyncio.as_completed(futeres):
contest_id, task_letter, task_html = await future
task_tree = await loop.run_in_executor(pool, get_tree, task_html)
progress_bar.update()
progress_bar.set_description(
"parse task {0}{1}".format(
contest_id,
task_letter,
),
)
tasks.append((contest_id, task_letter, task_tree))
return tasks
async def async_parse(
data_storage: Data,
contests: List[int],
debug: bool = True,
):
"""Parse contests. Save all data in data_storage.
Args:
data_storage (Data) : data object
contests (List[int]) : list of contests
debug (bool) : if true shows progress bar
"""
session = aiohttp.ClientSession()
all_contests = list(set(contests))
blogs = await parse_contest(
session=session,
contests=all_contests,
debug=debug,
)
blog_urls = {}
for contest_id, url in blogs:
if url not in blog_urls:
blog_urls[url] = [contest_id]
elif contest_id not in blog_urls[url]:
blog_urls[url].append(contest_id)
all_tasks = get_tasks(contests)
all_tasks = [str(contest_id) + letter for contest_id, letter in all_tasks]
for tutorial in await xhr_tutorials(all_tasks=all_tasks, debug=debug):
data_storage.add_xhr_data(tutorial)
await parse_blog(
session=session,
blog_urls=blog_urls,
debug=debug,
data_storage=data_storage,
)
all_tasks = get_tasks(contests)
for contest_id, letter, tree in await parse_tasks(
session=session,
problems=all_tasks,
debug=debug,
):
data_storage.add_task_tree(
contest_id=contest_id,
letter=letter,
tree=tree,
)
await session.close()
async def async_get_contest(debug: bool = True) -> List[Tuple[str, int]]:
"""Get all contests from codeforces.
Args:
debug (bool) : if true shows progress bar
Returns:
List[Tuple[str, int]] : contest_name, contest_id
"""
session = aiohttp.ClientSession()
tree = get_tree(await contests_html(session=session, page=1))
cnt_pages = int(
tree.xpath('//*[@id="pageContent"]/div[1]/div[2]/div[2]/ul/li')[-2]
.text_content()
.strip()
)
contests = [*get_contest_from_page(tree)]
loop = asyncio.get_running_loop()
progress_bar = Bar(range(cnt_pages), debug=debug)
progress_bar.update()
progress_bar.set_description("parse contests page")
futeres = [
contests_html(session=session, page=page)
for page in range(2, cnt_pages + 1)
]
with ThreadPoolExecutor() as pool:
for future in asyncio.as_completed(futeres):
html = await future
tree = await loop.run_in_executor(pool, get_tree, html)
contests += get_contest_from_page(tree)
progress_bar.update()
await session.close()
contests.sort(key=lambda element: element[1])
return contests
def get_contests(debug: bool = True) -> List[Tuple[str, int]]:
"""Execute async function
Args:
debug (bool) : if true shows progress bar.
Returns:
List[Tuple[str, int]] : contest_name, contest_id
"""
return asyncio.run(async_get_contest(debug=debug))
def download(data_storage: Data, contests: List[int], debug=True):
"""Run contests parsing. Save data to database.
Args:
data_storage (Data) : data object
contests (List[int]) : list of contests
debug (bool) : if true shows progress bar.
"""
asyncio.run(
async_parse(
data_storage=data_storage,
contests=contests,
debug=debug,
),
)
for task in tasks_jsonify(data=data_storage, tasks=get_tasks(contests)):
data_storage.add_json_task(task)
__all__ = [
"async_get_contest",
"async_parse",
"contests_html",
"download",
"get_contest_from_page",
"get_contests",
"get_html_blog",
"get_html_contest",
"get_html_task",
"get_tree",
"headers",
"parse_blog",
"parse_contest",
"parse_tasks",
]
``` |
{
"source": "9kin/pytmxloader",
"score": 4
} |
#### File: examples/02_render_features/02_3_parallax_scrolling.py
```python
__revision__ = "$Rev: 115 $"
__version__ = "3.0.0." + __revision__[6:-2]
__author__ = 'DR0ID @ 2009-2011'
import sys
import os
import pygame
try:
import _path
except:
pass
import tiledtmxloader
# -----------------------------------------------------------------------------
def main():
"""
Main method.
"""
args = sys.argv[1:]
if len(args) < 1:
path_to_map = os.path.join(os.pardir, "001-1.tmx")
print(("usage: python %s your_map.tmx\n\nUsing default map '%s'\n" % \
(os.path.basename(__file__), path_to_map)))
else:
path_to_map = args[0]
demo_pygame(path_to_map)
# -----------------------------------------------------------------------------
def demo_pygame(file_name):
"""
Example showing how to use the paralax scrolling feature.
"""
# parser the map (it is done here to initialize the
# window the same size as the map if it is small enough)
world_map = tiledtmxloader.tmxreader.TileMapParser().parse_decode(file_name)
# init pygame and set up a screen
pygame.init()
pygame.display.set_caption("tiledtmxloader - " + file_name + \
" - keys: arrows, 0-9")
screen_width = min(1024, world_map.pixel_width)
screen_height = min(768, world_map.pixel_height)
screen = pygame.display.set_mode((screen_width, screen_height))
# load the images using pygame
resources = tiledtmxloader.helperspygame.ResourceLoaderPygame()
resources.load(world_map)
# prepare map rendering
assert world_map.orientation == "orthogonal"
# renderer
renderer = tiledtmxloader.helperspygame.RendererPygame()
# cam_offset is for scrolling
cam_world_pos_x = 0
cam_world_pos_y = 0
# set initial cam position and size
renderer.set_camera_position_and_size(cam_world_pos_x, cam_world_pos_y, \
screen_width, screen_height, alignment='topleft')
# retrieve the layers
sprite_layers = tiledtmxloader.helperspygame.get_layers_from_map(resources)
sprite_layers = [layer for layer in sprite_layers if not layer.is_object_group]
assert len(sprite_layers) >= 2, "use a map with at least 2 layers!"
# set paralax factors
for idx in range(len(sprite_layers)):
sprite_layers[idx].set_layer_paralax_factor(1.0 / len(sprite_layers) * (idx + 1))
# variables for the main loop
clock = pygame.time.Clock()
running = True
speed = 0.075
# set up timer for fps printing
pygame.time.set_timer(pygame.USEREVENT, 1000)
# mainloop
while running:
dt = clock.tick()
# event handling
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
elif event.type == pygame.USEREVENT:
print("fps: ", clock.get_fps())
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
running = False
# find directions
direction_x = pygame.key.get_pressed()[pygame.K_RIGHT] - \
pygame.key.get_pressed()[pygame.K_LEFT]
direction_y = pygame.key.get_pressed()[pygame.K_DOWN] - \
pygame.key.get_pressed()[pygame.K_UP]
# update position
cam_world_pos_x += speed * dt * direction_x
cam_world_pos_y += speed * dt * direction_y
# adjust camera to position according to the keypresses
renderer.set_camera_position(cam_world_pos_x, cam_world_pos_y, "topleft")
# clear screen, might be left out if every pixel is redrawn anyway
screen.fill((0, 0, 0))
# render the map
for sprite_layer in sprite_layers:
if sprite_layer.is_object_group:
# we dont draw the object group layers
# you should filter them out if not needed
continue
else:
renderer.render_layer(screen, sprite_layer)
pygame.display.flip()
# -----------------------------------------------------------------------------
if __name__ == '__main__':
main()
``` |
{
"source": "9kin/shop-new",
"score": 3
} |
#### File: shop-new/shop/forms.py
```python
from flask import request
from flask_wtf import FlaskForm
from flask_wtf.file import FileAllowed, FileField, FileRequired
from werkzeug.security import check_password_hash, generate_password_hash
from wtforms import (
PasswordField,
SelectField,
StringField,
SubmitField,
TextAreaField,
form,
validators,
)
from wtforms.validators import DataRequired, Optional
from .database import User
class UploadForm(FlaskForm):
file = FileField(validators=[FileRequired("File was empty!")])
file = FileField(
validators=[
FileRequired("File was empty!"),
FileAllowed(["txt"], "txt only!"),
]
)
submit = SubmitField("Загрузить")
class LoginForm(FlaskForm):
login = StringField()
password = PasswordField()
def validate_login(self, field):
cur_user = self.get_user()
if cur_user is None:
raise validators.ValidationError("Invalid user")
if not check_password_hash(cur_user.password, self.password.data):
raise validators.ValidationError("Invalid password")
def get_user(self):
return User.get(User.login == self.login.data)
class SearchForm(FlaskForm):
q = StringField("поиск товара", validators=[DataRequired()])
def __init__(self, *args, **kwargs):
if "formdata" not in kwargs:
kwargs["formdata"] = request.args
if "csrf_enabled" not in kwargs:
kwargs["csrf_enabled"] = False
super(SearchForm, self).__init__(*args, **kwargs)
```
#### File: shop-new/shop/lint.py
```python
from os import system
from pathlib import Path
import isort
def main():
APP_DIR = Path(__file__).resolve().parent
files = list(APP_DIR.parent.glob("*.py")) + list(APP_DIR.glob("*.py"))
for file in files:
isort.file(file)
system("black . --line-length 79")
```
#### File: shop-new/shop/search.py
```python
from elasticsearch import Elasticsearch
elasticsearch = Elasticsearch([{"host": "localhost", "port": 9200}])
def search(index, model, expression):
ids, _ = query_index(index, expression)
if len(ids) == 0:
return []
items = [i for i in model.select().where(model.id.in_(ids))]
m = {}
for item in items:
m[item.id] = item
return [m[id] for id in ids]
def add_to_index(index, model):
payload = {}
for field in model.__searchable__:
payload[field] = getattr(model, field)
e = elasticsearch.index(
index=index,
doc_type=index,
id=model.id,
body=payload,
timeout="10000s",
)
def remove_from_index(index, model):
elasticsearch.delete(index=index, doc_type=index, id=model.id)
def query_index(index, query):
# https://stackoverflow.com/a/40390310/13156381
search = elasticsearch.search(
index=index,
doc_type=index,
body={
"query": {
"simple_query_string": {
"query": query,
"default_operator": "AND",
"fields": ["name"],
}
},
"from": 0,
"size": 1000,
},
)
ids = [int(hit["_id"]) for hit in search["hits"]["hits"]]
return ids, search["hits"]["total"]
```
#### File: shop-new/shop/validate.py
```python
import configparser
import os
from pprint import pprint
from prompt_toolkit.validation import ValidationError, Validator
from PyInquirer import Separator, print_json
from PyInquirer import prompt as pypromt
from rich import print
from .database import Config, Item, db
from .ext import parse_config
from .keywords import Keyword, KeywordTable, aslist_cronly
def cprint(string):
print(string, end="")
class FileValidator(Validator):
def validate(self, document):
exp = ValidationError(
message="Please enter a correct file",
cursor_position=len(document.text),
)
file = document.text
if not (os.path.isfile(file) and os.path.exists(file)):
raise exp
def prompt(question):
result = pypromt(question)
if result == {}:
exit(0)
else:
return result[question["name"]]
def get(answer, key):
if answer == {}:
exit(0)
else:
return answer[key]
def get_configs(not_valid=[]):
choices = []
for i, config in enumerate(Config.select()):
if i in not_valid:
choices.append(Separator(config.name))
else:
choices.append(config.name)
return choices
def choose_config(not_valid=[]):
config_name = prompt(
{
"type": "rawlist",
"message": "Select config?",
"name": "config",
"choices": get_configs(not_valid=not_valid),
}
)
return Config.select().where(Config.name == config_name).get()
def input_file():
return prompt(
{
"type": "input",
"name": "config_name",
"message": "Type file name (INI)?",
"validate": FileValidator,
}
)
def main():
db.create_tables([Config])
confirm = prompt(
{
"type": "confirm",
"message": "Use config from database?",
"name": "confirm",
"default": True,
}
)
if not confirm:
choose = prompt(
{
"type": "rawlist",
"name": "list",
"message": "What do you want to do?",
"choices": ["Edit config", "Create new config",],
}
)
if choose == "Edit config":
config = choose_config()
else:
name = prompt(
{
"type": "input",
"name": "config_name",
"message": "Type config name?",
}
)
config = Config(name=name, config="")
config.config = open(input_file()).read()
config.save()
exit(0)
first = choose_config()
second = choose_config(not_valid=[first.id - 1])
m1, warnings1 = parse_config(first.config)
m2, warnings2 = parse_config(second.config)
cnt_1, cnt_2 = 0, 0
keys = sorted(list(set(m1.keys()) | set(m2.keys())))
for group in keys:
f = set()
if group in m1:
f = m1[group]
cnt_1 += len(f)
s = set()
if group in m2:
s = m2[group]
cnt_2 += len(s)
plus = s - f
minus = f - s
cprint(
f"[white bold]{group}[/] [green bold]+{len(plus)}[/] [red bold]-{len(minus)}[/]\n"
)
for item in sorted(list(plus)):
cprint(f"[green bold]{item}[/]\n")
for item in sorted(list(minus)):
cprint(f"[red bold]{item}[/]\n")
print(cnt_1, cnt_2)
``` |
{
"source": "9kitsune/inky",
"score": 3
} |
#### File: library/tests/test_init.py
```python
import sys
import mock
from tools import MockSMBus
def mockery():
"""Mock requires modules."""
sys.modules['RPi'] = mock.Mock()
sys.modules['RPi.GPIO'] = mock.Mock()
sys.modules['spidev'] = mock.Mock()
sys.modules['smbus2'] = mock.Mock()
sys.modules['smbus2'].SMBus = MockSMBus
def test_init_phat_black():
"""Test initialisation of InkyPHAT with 'black' colour choice."""
mockery()
from inky import InkyPHAT
InkyPHAT('black')
def test_init_phat_red():
"""Test initialisation of InkyPHAT with 'red' colour choice."""
mockery()
from inky import InkyPHAT
InkyPHAT('red')
def test_init_phat_yellow():
"""Test initialisation of InkyPHAT with 'yellow' colour choice."""
mockery()
from inky import InkyPHAT
InkyPHAT('red')
def test_init_what_black():
"""Test initialisation of InkyWHAT with 'black' colour choice."""
mockery()
from inky import InkyWHAT
InkyWHAT('black')
def test_init_what_red():
"""Test initialisation of InkyWHAT with 'red' colour choice."""
mockery()
from inky import InkyWHAT
InkyWHAT('red')
def test_init_what_yellow():
"""Test initialisation of InkyWHAT with 'yellow' colour choice."""
mockery()
from inky import InkyWHAT
InkyWHAT('red')
``` |
{
"source": "9kittikul/machinaris",
"score": 2
} |
#### File: api/commands/chiadog_cli.py
```python
import datetime
import os
import psutil
import signal
import shutil
import sqlite3
import time
import traceback
import yaml
from flask import Flask, jsonify, abort, request, flash, g
from subprocess import Popen, TimeoutExpired, PIPE
from api import app
def load_config(blockchain):
return open('/root/.chia/chiadog/config.yaml'.format(blockchain),'r').read()
def save_config(config, blockchain):
try:
# Validate the YAML first
yaml.safe_load(config)
# Save a copy of the old config file
src='/root/.chia/chiadog/config.yaml'
dst='/root/.chia/chiadog/config.yaml'+time.strftime("%Y%m%d-%H%M%S")+".yaml"
shutil.copy(src,dst)
# Now save the new contents to main config file
with open(src, 'w') as writer:
writer.write(config)
except Exception as ex:
app.logger.info(traceback.format_exc())
raise Exception('Updated config.yaml failed validation!\n' + str(ex))
else:
if get_chiadog_pid(blockchain):
stop_chiadog()
start_chiadog()
def get_chiadog_pid(blockchain):
for proc in psutil.process_iter(['pid', 'name', 'cmdline']):
if proc.info['name'] == 'python3' and '/root/.chia/chiadog/config.yaml' in proc.info['cmdline']:
return proc.info['pid']
return None
def dispatch_action(job):
service = job['service']
if service != 'monitoring':
raise Exception("Only monitoring jobs handled here!")
action = job['action']
if action == "start":
start_chiadog()
elif action == "stop":
stop_chiadog()
elif action == "restart":
stop_chiadog()
time.sleep(5)
start_chiadog()
else:
raise Exception("Unsupported action {0} for monitoring.".format(action))
def start_chiadog(chain = None):
#app.logger.info("Starting monitoring....")
if chain:
blockchains = [ chain ]
else:
blockchains = [ b.strip() for b in os.environ['blockchains'].split(',') ]
for blockchain in blockchains:
try:
workdir = "/chiadog"
offset_file = "{0}/debug.log.offset".format(workdir)
if os.path.exists(offset_file):
os.remove(offset_file)
configfile = "/root/.chia/chiadog/config.yaml"
logfile = "/root/.chia/chiadog/logs/chiadog.log"
proc = Popen("nohup /{0}-blockchain/venv/bin/python3 -u main.py --config {1} >> {2} 2>&1 &".format(blockchain, configfile, logfile), \
shell=True, universal_newlines=True, stdout=None, stderr=None, cwd="/chiadog")
except:
app.logger.info('Failed to start monitoring!')
app.logger.info(traceback.format_exc())
def stop_chiadog():
#app.logger.info("Stopping monitoring....")
blockchains = [ b.strip() for b in os.environ['blockchains'].split(',') ]
for blockchain in blockchains:
try:
os.kill(get_chiadog_pid(blockchain), signal.SIGTERM)
except:
app.logger.info('Failed to stop monitoring!')
app.logger.info(traceback.format_exc())
```
#### File: migrations/versions/8e5760ed365b_.py
```python
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '8e5760ed365b'
down_revision = None
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('alerts',
sa.Column('unique_id', sa.String(length=128), nullable=False),
sa.Column('hostname', sa.String(length=255), nullable=False),
sa.Column('priority', sa.String(length=64), nullable=False),
sa.Column('service', sa.String(length=64), nullable=False),
sa.Column('message', sa.String(), nullable=False),
sa.Column('created_at', sa.DateTime(), server_default=sa.text('(CURRENT_TIMESTAMP)'), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('unique_id')
)
op.create_table('blockchains',
sa.Column('hostname', sa.String(length=255), nullable=False),
sa.Column('details', sa.String(), nullable=False),
sa.Column('created_at', sa.DateTime(), server_default=sa.text('(CURRENT_TIMESTAMP)'), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('hostname')
)
op.create_table('challenges',
sa.Column('unique_id', sa.String(length=64), nullable=False),
sa.Column('hostname', sa.String(length=255), nullable=False),
sa.Column('challenge_id', sa.String(length=64), nullable=False),
sa.Column('plots_past_filter', sa.String(length=32), nullable=False),
sa.Column('proofs_found', sa.Integer(), nullable=False),
sa.Column('time_taken', sa.String(length=32), nullable=False),
sa.Column('created_at', sa.String(length=64), nullable=False),
sa.PrimaryKeyConstraint('unique_id')
)
op.create_table('connections',
sa.Column('hostname', sa.String(length=255), nullable=False),
sa.Column('details', sa.String(), nullable=False),
sa.Column('created_at', sa.DateTime(), server_default=sa.text('(CURRENT_TIMESTAMP)'), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('hostname')
)
op.create_table('farms',
sa.Column('hostname', sa.String(length=255), nullable=False),
sa.Column('mode', sa.String(length=32), nullable=False),
sa.Column('status', sa.String(length=128), nullable=False),
sa.Column('plot_count', sa.Integer(), nullable=False),
sa.Column('plots_size', sa.REAL(), nullable=False),
sa.Column('total_chia', sa.REAL(), nullable=False),
sa.Column('netspace_size', sa.REAL(), nullable=False),
sa.Column('expected_time_to_win', sa.String(length=64), nullable=False),
sa.Column('created_at', sa.DateTime(), server_default=sa.text('(CURRENT_TIMESTAMP)'), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('hostname')
)
op.create_table('keys',
sa.Column('hostname', sa.String(length=255), nullable=False),
sa.Column('details', sa.String(), nullable=False),
sa.Column('created_at', sa.DateTime(), server_default=sa.text('(CURRENT_TIMESTAMP)'), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('hostname')
)
op.create_table('plots',
sa.Column('plot_id', sa.String(length=8), nullable=False),
sa.Column('hostname', sa.String(length=255), nullable=False),
sa.Column('dir', sa.String(length=255), nullable=False),
sa.Column('file', sa.String(length=255), nullable=False),
sa.Column('size', sa.Integer(), nullable=False),
sa.Column('created_at', sa.String(length=64), nullable=False),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('plot_id')
)
op.create_table('plottings',
sa.Column('plot_id', sa.String(length=8), nullable=False),
sa.Column('hostname', sa.String(length=255), nullable=False),
sa.Column('k', sa.Integer(), nullable=False),
sa.Column('tmp', sa.String(length=255), nullable=False),
sa.Column('dst', sa.String(length=255), nullable=False),
sa.Column('wall', sa.String(length=8), nullable=False),
sa.Column('phase', sa.String(length=8), nullable=False),
sa.Column('size', sa.String(length=8), nullable=False),
sa.Column('pid', sa.Integer(), nullable=False),
sa.Column('stat', sa.String(length=8), nullable=False),
sa.Column('mem', sa.String(length=8), nullable=False),
sa.Column('user', sa.String(length=8), nullable=False),
sa.Column('sys', sa.String(length=8), nullable=False),
sa.Column('io', sa.String(length=8), nullable=False),
sa.Column('created_at', sa.DateTime(), server_default=sa.text('(CURRENT_TIMESTAMP)'), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('plot_id')
)
op.create_table('wallets',
sa.Column('hostname', sa.String(length=255), nullable=False),
sa.Column('details', sa.String(), nullable=False),
sa.Column('created_at', sa.DateTime(), server_default=sa.text('(CURRENT_TIMESTAMP)'), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('hostname')
)
op.create_table('workers',
sa.Column('hostname', sa.String(length=255), nullable=False),
sa.Column('mode', sa.String(length=64), nullable=False),
sa.Column('services', sa.String(), nullable=False),
sa.Column('url', sa.String(), nullable=False),
sa.Column('config', sa.String(), nullable=False),
sa.Column('latest_ping_result', sa.String(), nullable=True),
sa.Column('ping_success_at', sa.DateTime(), nullable=True),
sa.Column('created_at', sa.DateTime(), server_default=sa.text('(CURRENT_TIMESTAMP)'), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('hostname')
)
# ### end Alembic commands ###
def downgrade_():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('workers')
op.drop_table('wallets')
op.drop_table('plottings')
op.drop_table('plots')
op.drop_table('keys')
op.drop_table('farms')
op.drop_table('connections')
op.drop_table('challenges')
op.drop_table('blockchains')
op.drop_table('alerts')
# ### end Alembic commands ###
def upgrade_stats():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('stat_netspace_size',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('value', sa.REAL(), nullable=True),
sa.Column('created_at', sa.String(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('stat_plot_count',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('value', sa.REAL(), nullable=True),
sa.Column('created_at', sa.String(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('stat_plots_disk_free',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('hostname', sa.String(), nullable=True),
sa.Column('path', sa.String(), nullable=True),
sa.Column('value', sa.REAL(), nullable=True),
sa.Column('created_at', sa.String(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('stat_plots_disk_used',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('hostname', sa.String(), nullable=True),
sa.Column('path', sa.String(), nullable=True),
sa.Column('value', sa.REAL(), nullable=True),
sa.Column('created_at', sa.String(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('stat_plots_size',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('value', sa.REAL(), nullable=True),
sa.Column('created_at', sa.String(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('stat_plots_total_used',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('value', sa.REAL(), nullable=True),
sa.Column('created_at', sa.String(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('stat_plotting_disk_free',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('hostname', sa.String(), nullable=True),
sa.Column('path', sa.String(), nullable=True),
sa.Column('value', sa.REAL(), nullable=True),
sa.Column('created_at', sa.String(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('stat_plotting_disk_used',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('hostname', sa.String(), nullable=True),
sa.Column('path', sa.String(), nullable=True),
sa.Column('value', sa.REAL(), nullable=True),
sa.Column('created_at', sa.String(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('stat_plotting_total_used',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('value', sa.REAL(), nullable=True),
sa.Column('created_at', sa.String(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('stat_time_to_win',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('value', sa.REAL(), nullable=True),
sa.Column('created_at', sa.String(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('stat_total_chia',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('value', sa.REAL(), nullable=True),
sa.Column('created_at', sa.String(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade_stats():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('stat_total_chia')
op.drop_table('stat_time_to_win')
op.drop_table('stat_plotting_total_used')
op.drop_table('stat_plotting_disk_used')
op.drop_table('stat_plotting_disk_free')
op.drop_table('stat_plots_total_used')
op.drop_table('stat_plots_size')
op.drop_table('stat_plots_disk_used')
op.drop_table('stat_plots_disk_free')
op.drop_table('stat_plot_count')
op.drop_table('stat_netspace_size')
# ### end Alembic commands ###
def upgrade_chiadog():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('notification',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('priority', sa.String(length=40), nullable=False),
sa.Column('service', sa.String(length=60), nullable=False),
sa.Column('message', sa.String(length=255), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=False),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade_chiadog():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('notification')
# ### end Alembic commands ###
```
#### File: api/schedules/status_farm.py
```python
import os
import traceback
from flask import g
from common.config import globals
from common.utils import converters
from api import app
from api.commands import chia_cli
from api import utils
# On initialization Chia outputs
def safely_gather_plots_size_gibs(plots_size):
plots_size_gibs = 0
try:
plots_size_gibs = converters.str_to_gibs(plots_size)
except:
app.logger.info("Unconvertable plots size: {0} Using zero.".format(plots_size))
plots_size_gibs = 0
try:
float(plots_size_gibs)
except:
app.logger.info("Unfloatable plots size: {0} Using zero.".format(plots_size))
plots_size_gibs = 0
return plots_size_gibs
def update():
with app.app_context():
hostname = utils.get_hostname()
for blockchain in globals.enabled_blockchains():
try:
farm_summary = chia_cli.load_farm_summary(blockchain)
payload = {
"hostname": hostname,
"blockchain": blockchain,
"mode": os.environ['mode'],
"status": "" if not hasattr(farm_summary, 'status') else farm_summary.status,
"plot_count": farm_summary.plot_count,
"plots_size": safely_gather_plots_size_gibs(farm_summary.plots_size),
"total_coins": 0 if not hasattr(farm_summary, 'total_coins') else farm_summary.total_coins,
"netspace_size": 0 if not hasattr(farm_summary, 'netspace_size') else converters.str_to_gibs(farm_summary.netspace_size),
"expected_time_to_win": "" if not hasattr(farm_summary, 'time_to_win') else farm_summary.time_to_win,
}
utils.send_post('/farms/', payload, debug=False)
except:
app.logger.info("Failed to load Chia farm summary and send.")
app.logger.info(traceback.format_exc())
```
#### File: views/partials/resources.py
```python
import datetime as dt
from flask.views import MethodView
from api import app
from api.extensions.api import Blueprint, SQLCursorPage
from common.extensions.database import db
from common.models import Partial
from .schemas import PartialSchema, PartialQueryArgsSchema, BatchOfPartialSchema, BatchOfPartialQueryArgsSchema
blp = Blueprint(
'Partial',
__name__,
url_prefix='/partials',
description="Operations on all partials recorded on farmer"
)
@blp.route('/')
class Partials(MethodView):
@blp.etag
@blp.arguments(BatchOfPartialQueryArgsSchema, location='query')
@blp.response(200, PartialSchema(many=True))
@blp.paginate(SQLCursorPage)
def get(self, args):
ret = db.session.query(Partial).filter_by(**args)
return ret
@blp.etag
@blp.arguments(BatchOfPartialSchema)
@blp.response(201, PartialSchema(many=True))
def post(self, new_items):
if len(new_items) == 0:
return "No partials provided.", 400
items = []
for new_item in new_items:
item = db.session.query(Partial).get(new_item['unique_id'])
if not item: # Request contains previously received challenges, only add new
item = Partial(**new_item)
items.append(item)
db.session.add(item)
else:
app.logger.debug("Skipping insert of existing partial: {0}".format(new_item['unique_id']))
db.session.commit()
return items
@blp.route('/<hostname>/<blockchain>')
class PartialByHostname(MethodView):
@blp.etag
@blp.response(200, PartialSchema)
def get(self, hostname, blockchain):
return db.session.query(Partial).filter(Partial.hostname==hostname, Partial.blockchain==blockchain)
@blp.etag
@blp.arguments(BatchOfPartialSchema)
@blp.response(200, PartialSchema(many=True))
def put(self, new_items, hostname, blockchain):
items = []
for new_item in new_items:
item = db.session.query(Partial).get(new_item['unique_id'])
if not item: # Request contains previously received challenges, only add new
item = Partial(**new_item)
items.append(item)
db.session.add(item)
db.session.commit()
return items
@blp.etag
@blp.response(204)
def delete(self, hostname, blockchain):
db.session.query(Partial).filter(Partial.hostname==hostname, Partial.blockchain==blockchain).delete()
db.session.commit()
```
#### File: extensions/database/__init__.py
```python
import logging
import traceback
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
def init_app(app):
db.init_app(app)
# No longer necessary here to create tables as flask-migrate manages this
#try:
# db.create_all(app=app)
#except:
# logging.error("Failed to create all for db. {0}".format(traceback.format_exc()))
# traceback.print_exc()
```
#### File: machinaris/scripts/plotman_migrate.py
```python
import logging
import os
import pathlib
import ruamel.yaml
import shutil
import sys
import time
import traceback
import yaml as sys_yaml
TARGET_VERSION = 2
PLOTMAN_SAMPLE = '/machinaris/config/plotman.sample.yaml'
PLOTMAN_EXAMPLE = '/root/.chia/plotman/plotman.sample.yaml'
PLOTMAN_CONFIG = '/root/.chia/plotman/plotman.yaml'
logging.basicConfig(level=logging.INFO)
yaml = ruamel.yaml.YAML()
yaml.indent(mapping=8, sequence=4, offset=2)
yaml.preserve_quotes = True
def migrate_config():
new_config = yaml.load(pathlib.Path(PLOTMAN_SAMPLE))
old_config = yaml.load(pathlib.Path(PLOTMAN_CONFIG))
# Migrate selected settings over from the old config
if 'directories' in old_config:
for setting in ['tmp', 'tmp2']:
if setting in old_config['directories']:
new_config['directories'][setting] = old_config['directories'][setting]
if 'scheduling' in old_config:
for setting in ['tmpdir_stagger_phase_major', 'tmpdir_stagger_phase_minor', \
'tmpdir_stagger_phase_limit', 'tmpdir_max_jobs', 'global_max_jobs', \
'global_stagger_m', 'polling_time_s']:
if setting in old_config['scheduling']:
new_config['scheduling'][setting] = old_config['scheduling'][setting]
if 'plotting' in old_config:
for setting in ['farmer_pk', 'pool_pk', 'type']:
if setting in old_config['plotting']:
new_config['plotting'][setting] = old_config['plotting'][setting]
# Have to handle dst special because of trailing comment about archiving
# which was completely overwriting new commented out section
if 'directories' in old_config:
if 'dst' in old_config['directories']:
old_config_yaml = sys_yaml.safe_load(open(PLOTMAN_CONFIG, 'r'))
# Must use other Yaml to get just the values in list, not old comments
new_config['directories']['dst'] = old_config_yaml['directories']['dst']
# Save a copy of the old config file first
dst = "/root/.chia/plotman/plotman." + time.strftime("%Y%m%d-%H%M%S")+".yaml"
shutil.copy(PLOTMAN_CONFIG, dst)
# Then save the migrated config
yaml.dump(new_config, pathlib.Path(PLOTMAN_CONFIG))
if __name__ == "__main__":
try:
shutil.copy(PLOTMAN_SAMPLE, PLOTMAN_EXAMPLE) # Always place latest example file
if not os.path.exists(PLOTMAN_CONFIG):
print("No existing plotman config found, so copying sample to: {0}".format(PLOTMAN_CONFIG))
shutil.copy(PLOTMAN_SAMPLE, PLOTMAN_CONFIG)
# Check for config version
config = yaml.load(pathlib.Path(PLOTMAN_CONFIG))
if 'version' in config:
version = config["version"][0]
else:
version = 0
if version != TARGET_VERSION:
print("Migrating plotman.yaml as found version: {0}".format(version))
migrate_config()
except:
print(traceback.format_exc())
```
#### File: web/actions/stats.py
```python
import datetime
from shutil import disk_usage
import sqlite3
from flask import g
from sqlalchemy import or_
from common.utils import converters
from common.models.alerts import Alert
from common.models.stats import StatPlotCount, StatPlotsSize, StatTotalChia, StatNetspaceSize, StatTimeToWin, \
StatPlotsTotalUsed, StatPlotsDiskUsed, StatPlotsDiskFree, StatPlottingTotalUsed, \
StatPlottingDiskUsed, StatPlottingDiskFree
from web import app, db, utils
from web.actions import chia
DATABASE = '/root/.chia/machinaris/dbs/stats.db'
ALL_TABLES_BY_HOSTNAME = [
'stat_plots_disk_used',
'stat_plotting_disk_used',
'stat_plots_disk_free',
'stat_plotting_disk_free',
]
def get_stats_db():
db = getattr(g, '_stats_database', None)
if db is None:
db = g._stats_database = sqlite3.connect(DATABASE)
return db
@app.teardown_appcontext
def close_connection(exception):
db = getattr(g, '_stats_database', None)
if db is not None:
db.close()
def load_daily_diff(farm_summary):
for blockchain in farm_summary.farms:
summary = {}
# initialize defaults
since_date = datetime.datetime.now() - datetime.timedelta(hours=24)
since_str = since_date.strftime("%Y%m%d%H%M%S")
summary['plot_count'] = plot_count_diff(since_str, blockchain)
summary['plots_size'] = plots_size_diff(since_str, blockchain)
summary['total_coin'] = total_coin_diff(since_str, blockchain)
summary['netspace_size'] = netspace_size_diff(since_str, blockchain)
#app.logger.info("{0} -> {1}".format(blockchain, summary))
farm_summary.farms[blockchain]['daily_diff'] = summary
def plot_count_diff(since, blockchain):
result = ''
try:
latest = db.session.query(StatPlotCount).filter(StatPlotCount.blockchain==blockchain).order_by(StatPlotCount.created_at.desc()).limit(1).first()
#app.logger.info(latest.value)
before = db.session.query(StatPlotCount).filter(StatPlotCount.blockchain==blockchain, StatPlotCount.created_at <= since).order_by(StatPlotCount.created_at.desc()).limit(1).first()
#app.logger.info(before.value)
if (latest.value - before.value) != 0:
result = "%+0g in last day." % (latest.value - before.value)
except Exception as ex:
app.logger.debug("Failed to query for day diff of plot_count because {0}".format(str(ex)))
#app.logger.info("Result is: {0}".format(result))
return result
def plots_size_diff(since, blockchain):
result = ''
try:
latest = db.session.query(StatPlotsSize).filter(StatPlotsSize.blockchain==blockchain).order_by(StatPlotsSize.created_at.desc()).limit(1).first()
#app.logger.info(latest.value)
before = db.session.query(StatPlotsSize).filter(StatPlotsSize.blockchain==blockchain, StatPlotsSize.created_at <= since).order_by(StatPlotsSize.created_at.desc()).limit(1).first()
#app.logger.info(before.value)
gibs = (latest.value - before.value)
fmtted = converters.gib_to_fmt(gibs)
if fmtted == "0.000 B":
result = ""
elif not fmtted.startswith('-'):
result = "+{0} in last day.".format(fmtted)
else:
result = fmtted
except Exception as ex:
app.logger.debug("Failed to query for day diff of plots_size because {0}".format(str(ex)))
#app.logger.info("Result is: {0}".format(result))
return result
def total_coin_diff(since, blockchain):
result = ''
try:
latest = db.session.query(StatTotalChia).filter(StatTotalChia.blockchain==blockchain).order_by(StatTotalChia.created_at.desc()).limit(1).first()
#app.logger.info(latest.value)
before = db.session.query(StatTotalChia).filter(StatTotalChia.blockchain==blockchain, StatTotalChia.created_at <= since).order_by(StatTotalChia.created_at.desc()).limit(1).first()
#app.logger.info(before.value)
if (latest.value - before.value) != 0:
result = "%+6g in last day." % (latest.value - before.value)
except Exception as ex:
app.logger.debug("Failed to query for day diff of total_coin because {0}".format(str(ex)))
#app.logger.info("Result is: {0}".format(result))
return result
def netspace_size_diff(since, blockchain):
result = ''
try:
latest = db.session.query(StatNetspaceSize).filter(StatNetspaceSize.blockchain==blockchain).order_by(StatNetspaceSize.created_at.desc()).limit(1).first()
#app.logger.info(latest.value)
before = db.session.query(StatNetspaceSize).filter(StatNetspaceSize.blockchain==blockchain, StatNetspaceSize.created_at <= since).order_by(StatNetspaceSize.created_at.desc()).limit(1).first()
#app.logger.info(before.value)
gibs = (latest.value - before.value)
fmtted = converters.gib_to_fmt(gibs)
if fmtted == "0.000 B":
result = ""
elif not fmtted.startswith('-'):
result = "+{0} in last day.".format(fmtted)
else:
result = "{0} in last day.".format(fmtted)
except Exception as ex:
app.logger.debug("Failed to query for day diff of netspace_size because {0}".format(str(ex)))
#app.logger.debug("Result is: {0}".format(result))
return result
def load_daily_farming_summaries():
summary_by_workers = {}
since_date = datetime.datetime.now() - datetime.timedelta(hours=24)
for host in chia.load_farmers():
summary_by_workers[host.displayname] = {}
for wk in host.workers:
summary_by_workers[host.displayname][wk['blockchain']] = daily_summaries(since_date, wk['hostname'], wk['displayname'], wk['blockchain'])
#app.logger.info("{0}-{1}: {2}".format(host.displayname, wk['blockchain'], summary_by_workers[host.displayname][wk['blockchain']]))
return summary_by_workers
def daily_summaries(since, hostname, displayname, blockchain):
result = None
try:
result = db.session.query(Alert).filter(
or_(Alert.hostname==hostname,Alert.hostname==displayname),
Alert.blockchain==blockchain,
Alert.created_at >= since,
Alert.priority == "LOW",
Alert.service == "DAILY"
).order_by(Alert.created_at.desc()).first()
#app.logger.info("Daily for {0}-{1} is {2}".format(displayname, blockchain, result))
if result:
return result.message
except Exception as ex:
app.logger.info("Failed to query for latest daily summary for {0} - {1} because {2}".format(
hostname, blockchain, str(ex)))
return result
def load_recent_disk_usage(disk_type):
db = get_stats_db()
cur = db.cursor()
summary_by_worker = {}
value_factor = "" # Leave at GB for plotting disks
if disk_type == "plots":
value_factor = "/1024" # Divide to TB for plots disks
for host in chia.load_farmers():
hostname = host.hostname
dates = []
paths = {}
sql = "select path, value{0}, created_at from stat_{1}_disk_used where (hostname = ? or hostname = ?) order by created_at, path".format(value_factor, disk_type)
used_result = cur.execute(sql, [ host.hostname, host.displayname, ]).fetchall()
for used_row in used_result:
converted_date = converters.convert_date_for_luxon(used_row[2])
if not converted_date in dates:
dates.append(converted_date)
if not used_row[0] in paths:
paths[used_row[0]] = {}
values = paths[used_row[0]]
values[converted_date] = used_row[1]
if len(dates) > 0:
summary_by_worker[hostname] = { "dates": dates, "paths": paths.keys(), }
for path in paths.keys():
path_values = []
for date in dates:
if path in paths:
path_values.append(paths[path][date])
else:
path_values.append('null')
summary_by_worker[hostname][path] = path_values
app.logger.debug(summary_by_worker.keys())
return summary_by_worker
def load_current_disk_usage(disk_type, hostname=None):
db = get_stats_db()
cur = db.cursor()
summary_by_worker = {}
value_factor = "" # Leave at GB for plotting disks
if disk_type == "plots":
value_factor = "/1024" # Divide to TB for plots disks
for host in chia.load_farmers():
if hostname and not (hostname == host.hostname or hostname == host.displayname):
continue
paths = []
used = []
free = []
sql = "select path, value{0}, created_at from stat_{1}_disk_used where (hostname = ? or hostname = ?) group by path having max(created_at)".format(value_factor, disk_type)
used_result = cur.execute(sql, [ host.hostname, host.displayname, ]).fetchall()
sql = "select path, value{0}, created_at from stat_{1}_disk_free where (hostname = ? or hostname = ?) group by path having max(created_at)".format(value_factor, disk_type)
free_result =cur.execute(sql, [ host.hostname, host.displayname, ]).fetchall()
if len(used_result) != len(free_result):
app.logger.debug("Found mismatched count of disk used/free stats for {0}".format(disk_type))
else:
for used_row in used_result:
paths.append(used_row[0])
used.append(used_row[1])
for free_row in free_result:
if used_row[0] == free_row[0]:
free.append(free_row[1])
continue
if len(paths):
summary_by_worker[host.hostname] = { "paths": paths, "used": used, "free": free}
#app.logger.debug(summary_by_worker.keys())
return summary_by_worker
def prune_workers_status(hostname, displayname, blockchain):
try:
db = get_stats_db()
cur = db.cursor()
for table in ALL_TABLES_BY_HOSTNAME:
cur.execute("DELETE FROM " + table + " WHERE (hostname = :hostname OR hostname = :displayname)",
{"hostname":hostname, "displayname":displayname})
db.commit()
except Exception as ex:
app.logger.info("Failed to remove stale stats for worker {0} - {1} because {2}".format(displayname, blockchain, str(ex)))
``` |
{
"source": "9kl/ys7_snap",
"score": 2
} |
#### File: ys7_snap/ys7_snap/aps.py
```python
from pytz import utc
from apscheduler.schedulers.background import BackgroundScheduler
from apscheduler.jobstores.memory import MemoryJobStore
from apscheduler.executors.pool import ThreadPoolExecutor, ProcessPoolExecutor
from apscheduler.triggers import interval
jobstores = {
'default': MemoryJobStore()
}
executors = {
'default': ThreadPoolExecutor(20),
'processpool': ProcessPoolExecutor(5)
}
job_defaults = {
'coalesce': False,
'max_instances': 3
}
scheduler = BackgroundScheduler(jobstores=jobstores, executors=executors, job_defaults=job_defaults, timezone=utc)
def timer(interval_seconds=10):
def _timer(fn):
trigger = interval.IntervalTrigger(seconds=interval_seconds)
scheduler.add_job(fn, trigger=trigger)
return _timer
```
#### File: ys7_snap/ys7_snap/store.py
```python
import os
import time
import typing
import requests
from PIL import Image
from ys7_snap import settings
from ys7_snap.error import FileStoreError
def save_to_local(video: typing.Dict, url: str):
"""
保存到本地
:param video: 视频信息
:param url: 抓拍图像url
:return:
"""
device_serial = video["deviceSerial"]
picture_time = time.strftime("%Y%m%d%H%M%S", time.localtime(time.time()))
dir_name = time.strftime("%Y%m%d", time.localtime(time.time()))
file_name = f"{device_serial}_{picture_time}.jpg"
download_file(url, settings.FILE_SAVE_DIR, dir_name, file_name)
if settings.THUMBNAIL_COMPRESSION_RATIO > 1:
thumbnail_file_name = f"thumbnail_{file_name}"
create_thumbnail(settings.FILE_SAVE_DIR, dir_name, file_name, thumbnail_file_name,
settings.THUMBNAIL_COMPRESSION_RATIO)
file_name = thumbnail_file_name
d = {
'video': video,
'dir_name': dir_name,
'file_name': file_name
}
return d
def download_file(url: str, root_dir: str, save_dir: str, file_name: str):
"""
下载文件并保存
:param url: 文件url
:param root_dir: 根目录
:param save_dir: 保存目录
:param file_name: 文件名
:return:
"""
file_path = ""
try:
resp = requests.get(url)
dir_path = os.path.join(root_dir, save_dir)
if not os.path.exists(dir_path):
os.makedirs(dir_path)
file_path = os.path.join(dir_path, file_name)
with open(file_path, 'wb') as f:
f.write(resp.content)
except requests.RequestException as ex:
raise FileStoreError(f"request url{url} error,{ex}")
except IOError as ex:
raise FileStoreError(f"save file{file_path} error,{ex}")
def create_thumbnail(root_dir: str, file_dir: str, file_name: str, thumbnail_file_name: str, compression_ratio: int):
"""
创建缩略图
:param root_dir: 根目录
:param file_dir: 文件目录
:param file_name: 文件名称
:param thumbnail_file_name: 缩略图文件名称
:param compression_ratio: 缩略图压缩率
:return:
"""
file_path = os.path.join(root_dir, file_dir, file_name)
if not os.path.exists(file_path):
raise FileStoreError(f"生成缩略图原文件{file_path}不存在。")
thumbnail_file_path = os.path.join(root_dir, file_dir, thumbnail_file_name)
im = Image.open(file_path)
w, h = im.size
im.thumbnail((w // compression_ratio, h // compression_ratio))
im.save(thumbnail_file_path, 'jpeg')
```
#### File: ys7_snap/ys7_snap/tasks.py
```python
import logging
from ys7_snap import aps, settings
from ys7_snap.remote import mqtt_public
from ys7_snap.store import save_to_local
from ys7_snap.ys7 import capture
log = logging.getLogger("ys7_snap")
@aps.timer(interval_seconds=settings.YS_SNAP_TIME)
def on_capture():
videos = settings.VIDEOS
if not videos:
return
for item in videos:
try:
url = capture(item["deviceSerial"], item["channelNo"])
d = save_to_local(item, url)
mqtt_public(settings.MQTT_TOPIC_TIME_SNAP_OUT, d)
except Exception as ex:
log.error(ex)
continue
``` |
{
"source": "9Knight9n/CodART",
"score": 3
} |
#### File: CodART/refactorings/move_class.py
```python
import argparse
import os
from antlr4 import *
from antlr4.TokenStreamRewriter import TokenStreamRewriter
from gen.javaLabeled.JavaLexer import JavaLexer
from gen.javaLabeled.JavaParserLabeled import JavaParserLabeled
from gen.javaLabeled.JavaParserLabeledListener import JavaParserLabeledListener
def log_error(title, message):
"""
log_error method is used for logging erros
:param title: title of the error
:param message: message of the error
:return: None
"""
if title == "Redundant":
print(f"[{title}]: Refactoring is not necessary")
else:
print(f"[{title}]: Refactoring is not allowed")
print(f"{message}")
class MoveClassPreConditionListener(JavaParserLabeledListener):
def __init__(self):
# Move all classes of a file into file_classes list
self.file_classes = []
"""
MoveClassPreConditionListener class is used to check the pre-conditions on
move class refactoring
"""
# Enter a parse tree produced by CompilationUnitContext
def enterCompilationUnit(self, ctx: JavaParserLabeled.CompilationUnitContext):
for declaration in ctx.children:
if isinstance(declaration, JavaParserLabeled.TypeDeclarationContext):
if declaration.classDeclaration() is None:
continue
self.file_classes.append(declaration.classDeclaration().IDENTIFIER().getText())
# Exit a parse tree produced by FieldDeclarationContext
def exitFieldDeclaration(self, ctx: JavaParserLabeled.FieldDeclarationContext):
if ctx.typeType() is None:
return
field_type = ctx.typeType().getText()
if field_type in self.file_classes:
log_error("Forbidden", "This class has fields that dependent on other classes")
exit(0)
# Exit a parse tree produced by Expression0Context
def exitExpression0(self, ctx: JavaParserLabeled.Expression0Context):
if ctx.primary() is None:
return
expression = ctx.primary().getText()
if expression in self.file_classes:
log_error("Forbidden", "This class has dependencies on other classes")
exit(0)
# Exit a parse tree produced by LocalVariableDeclarationContext
def exitLocalVariableDeclaration(self, ctx: JavaParserLabeled.LocalVariableDeclarationContext):
if ctx.typeType() is None:
return
local_variable_type = ctx.typeType().getText()
if local_variable_type in self.file_classes:
log_error("Forbidden", "This class has local variables that dependent on other classes")
exit(0)
class MoveClassRefactoringListener(JavaParserLabeledListener):
"""
To implement the move class refactoring
a stream of tokens is sent to the listener, to build an object token_stream_rewriter
and we move all class methods and fields from the source package to the target package
"""
def __init__(self, common_token_stream: CommonTokenStream = None, class_identifier: str = None,
source_package: str = None, target_package: str = None, filename: str = None, dirname: str = None):
"""
:param common_token_stream: used to edit the content of the parsers
"""
self.enter_class = False
self.token_stream = common_token_stream
self.class_found = False
self.class_fields = []
self.class_methods = []
# Move all the tokens in the source code in a buffer, token_stream_rewriter.
if common_token_stream is not None:
self.token_stream_rewriter = TokenStreamRewriter(common_token_stream)
else:
raise TypeError('common_token_stream is None')
if class_identifier is not None:
self.class_identifier = class_identifier
else:
raise ValueError("class_identifier is None")
if filename is not None:
self.filename = filename
else:
raise ValueError("filename is None")
if dirname is not None:
self.dirname = dirname
else:
raise ValueError("dirname is None")
if source_package is not None:
self.source_package = source_package
else:
raise ValueError("source_package is None")
if target_package is not None:
self.target_package = target_package
else:
raise ValueError("target_package is None")
self.TAB = "\t"
self.NEW_LINE = "\n"
self.code = f"package {self.target_package};{self.NEW_LINE}{self.NEW_LINE}"
# Exit a parse tree produced by JavaParserLabeled#importDeclaration.
def exitImportDeclaration(self, ctx: JavaParserLabeled.ImportDeclarationContext):
text_to_replace = "import " + ctx.qualifiedName().getText() + ';'
if ctx.STATIC() is not None:
text_to_replace = text_to_replace.replace("import", "import static")
self.code += text_to_replace + self.NEW_LINE
# Enter a parse tree produced by JavaParserLabeled#packageDeclaration.
def enterPackageDeclaration(self, ctx: JavaParserLabeled.PackageDeclarationContext):
package_name = ctx.getText()[7:-1]
print(package_name)
if package_name != self.source_package:
raise ValueError(f"The package {package_name} in the file isn't equal to the source package!")
# Exit a parse tree produced by JavaParserLabeled#classBodyDeclaration2.
def exitClassBodyDeclaration2(self, ctx: JavaParserLabeled.ClassBodyDeclaration2Context):
self.enter_class = False
try:
if ctx.memberDeclaration().classDeclaration().IDENTIFIER().getText() != self.class_identifier:
return
except Exception:
return
self.class_found = True
start_index = ctx.start.tokenIndex
stop_index = ctx.stop.tokenIndex
# get the class body from the token_stream_rewriter
class_body = self.token_stream_rewriter.getText(
program_name=self.token_stream_rewriter.DEFAULT_PROGRAM_NAME,
start=start_index,
stop=stop_index
)
self.code += f"import {self.source_package}.*;"
self.code += self.NEW_LINE * 2
self.code += f"// Class \"{self.class_identifier}\" moved here " \
f"from package {self.source_package} by CodART" + self.NEW_LINE + \
f"{class_body}"
# delete class declaration from source class
self.token_stream_rewriter.delete(
program_name=self.token_stream_rewriter.DEFAULT_PROGRAM_NAME,
from_idx=start_index,
to_idx=stop_index
)
old_file = open(self.filename, 'w')
old_file.write(self.token_stream_rewriter.getDefaultText().replace("\r", ""))
print("----------------------------")
print("Class attributes: ", str(self.class_fields))
print("Class methods: ", str(self.class_methods))
print("----------------------------")
# Exit a parse tree produced by JavaParserLabeled#typeDeclaration.
def exitTypeDeclaration(self, ctx: JavaParserLabeled.TypeDeclarationContext):
if ctx.classDeclaration() is not None:
self.enter_class = False
if ctx.classDeclaration().IDENTIFIER().getText() != self.class_identifier:
return
self.enter_class = True
self.class_found = True
start_index = ctx.start.tokenIndex
stop_index = ctx.stop.tokenIndex
# get the class body from the token_stream_rewriter
class_body = self.token_stream_rewriter.getText(
program_name=self.token_stream_rewriter.DEFAULT_PROGRAM_NAME,
start=start_index,
stop=stop_index
)
self.code += f"import {self.source_package}.*;"
self.code += self.NEW_LINE * 2
self.code += f"// Class \"{self.class_identifier}\" moved here " \
f"from package {self.source_package} by CodART" + self.NEW_LINE + \
f"{class_body}"
# delete class declaration from source class
self.token_stream_rewriter.delete(
program_name=self.token_stream_rewriter.DEFAULT_PROGRAM_NAME,
from_idx=start_index,
to_idx=stop_index
)
print("----------------------------")
print("Class attributes: ", str(self.class_fields))
print("Class methods: ", str(self.class_methods))
print("----------------------------")
# Enter a parse tree produced by JavaParserLabeled#fieldDeclaration.
def enterFieldDeclaration(self, ctx: JavaParserLabeled.FieldDeclarationContext):
if not self.enter_class:
return
list_of_fields = ctx.variableDeclarators().getText().split(",")
for field in list_of_fields:
self.class_fields.append(field)
# Enter a parse tree produced by JavaParserLabeled#methodDeclaration.
def enterMethodDeclaration(self, ctx: JavaParserLabeled.MethodDeclarationContext):
if not self.enter_class:
return
method_name = ctx.IDENTIFIER().getText()
self.class_methods.append(method_name)
# Exit a parse tree produced by JavaParserLabeled#compilationUnit.
def exitCompilationUnit(self, ctx: JavaParserLabeled.CompilationUnitContext):
if not self.class_found:
raise ValueError(f"Class \"{self.class_identifier}\" NOT FOUND!")
file_address = self.dirname + '/' + self.target_package.replace('.',
'/') + '/' + self.class_identifier + '.java'
new_file = open(file_address, 'w')
new_file.write(self.code.replace("\r", ""))
print(f"The class \"{self.class_identifier}\" moved to the target package successfully!")
class ReplaceDependentObjectsListener(JavaParserLabeledListener):
"""
To implement the move class refactoring
a stream of tokens is sent to the listener, to build an object token_stream_rewriter
and we move all class methods and fields from the source package to the target package
"""
def __init__(self, common_token_stream: CommonTokenStream = None, class_identifier: str = None,
source_package: str = None, target_package: str = None, filename: str = None, has_import: bool = None):
"""
:param common_token_stream:
"""
self.token_stream = common_token_stream
# Move all the tokens in the source code in a buffer, token_stream_rewriter.
if common_token_stream is not None:
self.token_stream_rewriter = TokenStreamRewriter(common_token_stream)
else:
raise TypeError('common_token_stream is None')
if class_identifier is not None:
self.class_identifier = class_identifier
else:
raise ValueError("class_identifier is None")
if filename is not None:
self.filename = filename
else:
raise ValueError("filename is None")
if has_import is not None:
self.has_import = has_import
else:
raise ValueError("has_import is None")
if source_package is not None:
self.source_package = source_package
else:
raise ValueError("source_package is None")
if target_package is not None:
self.target_package = target_package
else:
raise ValueError("target_package is None")
self.need_import = False
self.TAB = "\t"
self.NEW_LINE = "\n"
self.code = ""
self.mul_imports = []
self.exact_imports = []
# Enter a parse tree produced by CompilationUnitContext
def enterCompilationUnit(self, ctx: JavaParserLabeled.CompilationUnitContext):
# Iterate over the declarations of context to save all import statements
# in exact_imports and mul_imports
for declaration in ctx.children:
if isinstance(declaration, JavaParserLabeled.ImportDeclarationContext):
imported_package = ""
mul = None
if declaration.qualifiedName() is not None:
imported_package += declaration.qualifiedName().getText()
if declaration.MUL() is not None:
mul = declaration.MUL().getText()
imported_package += ".*"
if mul is not None:
self.mul_imports.append(imported_package)
else:
self.exact_imports.append(imported_package)
# Exit a parse tree produced by JavaParserLabeled#importDeclaration.
def exitImportDeclaration(self, ctx: JavaParserLabeled.ImportDeclarationContext):
# extract the imported package from context
imported_package = ""
mul = None
if ctx.qualifiedName() is not None:
imported_package += ctx.qualifiedName().getText()
if ctx.MUL() is not None:
mul = ctx.MUL().getText()
imported_package += '.' + ctx.MUL().getText()
# return if the current import statement is not relevant to source package
if self.source_package not in imported_package:
return
start_index = ctx.start.tokenIndex
stop_index = ctx.stop.tokenIndex
target_exact_package = f"{self.target_package}.{self.class_identifier}"
target_exact_import = f"import {target_exact_package};"
if ctx.STATIC() is not None:
target_exact_import = target_exact_import.replace("import", "import ")
# handle import scenarios in files dependent on the moved class
if target_exact_package in self.exact_imports:
if mul is None:
self.token_stream_rewriter.delete(
program_name=self.token_stream_rewriter.DEFAULT_PROGRAM_NAME,
from_idx=start_index,
to_idx=stop_index + 1
)
else:
if mul is not None:
self.token_stream_rewriter.insertAfter(
program_name=self.token_stream_rewriter.DEFAULT_PROGRAM_NAME,
index=stop_index,
text=self.NEW_LINE + target_exact_import
)
else:
self.token_stream_rewriter.replace(
program_name=self.token_stream_rewriter.DEFAULT_PROGRAM_NAME,
from_idx=start_index,
to_idx=stop_index,
text=target_exact_import
)
self.exact_imports.append(target_exact_package)
# Exit a parse tree produced by JavaParserLabeled#classOrInterfaceType.
def exitClassOrInterfaceType(self, ctx: JavaParserLabeled.ClassOrInterfaceTypeContext):
if not self.has_import or not self.need_import:
if self.class_identifier in ctx.getText().split('.'):
self.need_import = True
# Exit a parse tree produced by JavaParserLabeled#createdName0.
def exitCreatedName0(self, ctx: JavaParserLabeled.CreatedName0Context):
if not self.has_import or not self.need_import:
if self.class_identifier in ctx.getText().split('.'):
self.need_import = True
# Exit a parse tree produced by JavaParserLabeled#expression1.
def exitExpression1(self, ctx: JavaParserLabeled.Expression1Context):
if not self.has_import or not self.need_import:
if ctx.expression().getText == self.class_identifier:
self.need_import = True
# Exit a parse tree produced by JavaParserLabeled#typeDeclaration.
def exitTypeDeclaration(self, ctx: JavaParserLabeled.TypeDeclarationContext):
if ctx.classDeclaration() is not None:
if not self.has_import or self.need_import:
index = ctx.start.tokenIndex
# return if the file has already imported the package
if (self.source_package + '.' + self.class_identifier not in self.exact_imports) \
or (self.target_package + '.' + self.class_identifier in self.exact_imports):
return
# delete class declaration from source class
self.token_stream_rewriter.insertBefore(
program_name=self.token_stream_rewriter.DEFAULT_PROGRAM_NAME,
index=index,
text="import " + self.target_package + '.' + self.class_identifier + ';' + self.NEW_LINE
)
filename = 'AlphaBetaPlayer.java'
class_identifier = 'AlphaBetaPlayer'
source_package = 'chess.player'
target_package = 'chess.test_pack'
directory = '/home/ali/Documents/dev/java-chess/src'
file_counter = 0
def move_class(token_stream, parse_tree, args):
"""
move_class method uses MoveClassRefactorListener to move the class to the target package
:param token_stream: common token stream
:param parse_tree: generated parse tree
:param args: file arguments
:return: None
"""
move_class_listener = MoveClassRefactoringListener(
common_token_stream=token_stream, source_package=source_package, target_package=target_package,
class_identifier=class_identifier, filename=args.file, dirname=directory
)
walker = ParseTreeWalker()
walker.walk(t=parse_tree, listener=move_class_listener)
with open(args.file, mode='w', newline='') as f:
f.write(move_class_listener.token_stream_rewriter.getDefaultText().replace("\r", ""))
def post_move_class_propagation(token_stream, parse_tree, args):
"""
post_move_class_propagation method is used to propagate post-conditions after moving the class
:param token_stream: common token stream
:param parse_tree: generated parse tree
:param args: file arguments
:return: None
"""
has_import = False
has_exact_import = False
file_to_check = open(file=args.file, mode='r')
for line in file_to_check.readlines():
text_line = line.replace('\n', '').replace('\r', '').strip()
if (text_line.startswith('import') and text_line.endswith(source_package + '.' + class_identifier + ';')) \
or (text_line.startswith('import') and text_line.endswith(source_package + '.*;')):
has_import = True
break
if (text_line.startswith('import') and text_line.endswith(target_package + '.' + class_identifier + ';')) \
or (text_line.startswith('import') and text_line.endswith(target_package + '.*;')):
has_exact_import = True
break
if not has_exact_import:
print(f"Start checking file \"{file_to_check.name}\" *** {file_counter}/100")
replace_dependent_object_listener = ReplaceDependentObjectsListener(
common_token_stream=token_stream, source_package=source_package, target_package=target_package,
class_identifier=class_identifier, filename=args.file, has_import=has_import
)
walker = ParseTreeWalker()
walker.walk(t=parse_tree, listener=replace_dependent_object_listener)
with open(args.file, mode='w', newline='') as f:
f.write(replace_dependent_object_listener.token_stream_rewriter.getDefaultText().replace("\r", ""))
print(f"Finish checking file \"{file_to_check.name}\" *** {file_counter}/100")
def get_argument_parser(file):
"""
get_argument_parser method used to parse the arguments
:param file: file name
"""
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument(
'-n', '--file',
help='Input source', default=file)
args = arg_parser.parse_args()
return args
def get_parse_tree_token_stream(args):
"""
returns parse tree and token stream base on the file stream
:param args: file arguments
"""
# Step 1: Load input source into stream
stream = FileStream(args.file, encoding='utf8')
# Step 2: Create an instance of AssignmentStLexer
lexer = JavaLexer(stream)
# Step 3: Convert the input source into a list of tokens
token_stream = CommonTokenStream(lexer)
# Step 4: Create an instance of the AssignmentStParser
parser = JavaParserLabeled(token_stream)
parser.getTokenStream()
# Step 5: Create parse tree
parse_tree = parser.compilationUnit()
return parse_tree, token_stream
def recursive_walk(dir_path):
"""
walks through the specified directory files to implement refactoring
:param dir_path: directory path
"""
global filename
args = get_argument_parser(source_file_path)
parse_tree, token_stream = get_parse_tree_token_stream(args)
# check if the class has dependencies on other classes in the same class
pre_condition_listener = MoveClassPreConditionListener()
walker = ParseTreeWalker()
walker.walk(t=parse_tree, listener=pre_condition_listener)
filename_without_extension, extension = os.path.splitext(filename)
if extension == '.java':
move_class(token_stream, parse_tree, args)
else:
raise ValueError(f"The filename format must be \".java\", but found {extension}!")
for dirname, dirs, files in os.walk(dir_path):
for file in files:
if file == filename or file == class_identifier + '.java':
continue
file_without_extension, extension = os.path.splitext(file)
if extension == '.java':
args = get_argument_parser(os.path.join(dirname, file))
parse_tree, token_stream = get_parse_tree_token_stream(args)
post_move_class_propagation(token_stream, parse_tree, args)
if __name__ == '__main__':
source_package_directory = os.path.join(directory, source_package.replace('.', '/'))
target_package_directory = os.path.join(directory, target_package.replace('.', '/'))
source_file_path = os.path.join(source_package_directory, filename)
target_file_path = os.path.join(target_package_directory, f"{class_identifier}.java")
if not os.path.exists(source_package_directory):
raise NotADirectoryError(f"The package \"{source_package}\" NOT FOUND!")
if not os.path.exists(target_package_directory):
raise NotADirectoryError(f"The package \"{target_package}\" NOT FOUND!")
if not os.path.isfile(source_file_path):
raise FileNotFoundError(f"The file \"{filename}\" NOT FOUND in package {source_package}!")
if os.path.isfile(target_file_path):
log_error("Redundant", f"The class \"{class_identifier}\" already exists in package \"{target_package}\"!")
exit(0)
# start refactoring
recursive_walk(directory)
``` |
{
"source": "9Knight9n/crawler-legislation-uk",
"score": 3
} |
#### File: crawler-legislation-uk/extract/act_list.py
```python
import requests
from bs4 import BeautifulSoup
from extract import headers, base_url
from utils import fix_dir_name
def get_act_list_single_page(url, num):
f = requests.get(base_url + url + "?page=" + str(num), headers=headers)
soup = BeautifulSoup(f.content, 'lxml')
current_page = soup.select("li.currentPage strong")[0].get_text().replace("This is results page ", "")
if num != int(current_page):
return []
acts_query = soup.select("#content tbody tr")
acts = []
last = ["", ""]
for tr in acts_query:
act = {}
a = tr.select("td a")
act['title'] = fix_dir_name(a[0].get_text())
act['url'] = a[0]['href'][1:]
if len(a) > 1:
temp = a[1].get_text().split(" ")
last = temp
else:
temp = last
act['year'] = temp[0]
act['number'] = temp[1] if len(temp) > 1 else ''
acts.append(act)
return acts
# print(get_act_list_single_page('ukpga',1))
```
#### File: crawler-legislation-uk/extract/act.py
```python
import requests
from bs4 import BeautifulSoup
from db.models import Act
from extract import headers, base_url
from utils import convert_xht_to_txt_2
def already_added(url):
return Act.objects.filter(url=url).count() > 0
def get_act_details(act):
# url = base_url + p_id + "?view=extent"
# f = requests.get(url, headers=headers)
# soup = BeautifulSoup(f.content, 'lxml')
# title = soup.select("#pageTitle")[0].get_text()
temp = act['url'].split("/")
type_ = temp[0]
type_ = detect_type(type_, act['title'])
if type_ is None:
print(f'Act "{act["url"]}" is not included in accepted types.skipping...')
act['skipped'] = 'type'
return act
if already_added(act['url']):
print(f'Act "{act["url"]}" already loaded.skipping...')
act['skipped'] = 'duplicate'
return act
pdf_url = base_url + act['url'].replace("/contents", "") + "/data.pdf"
xht_url = base_url + act['url'].replace("/contents", "") + "/data.xht?view=snippet&wrap=true"
note_pdf_url = base_url + act['url'].replace("/contents", "") + "/note/data.pdf"
note_xht_url = base_url + act['url'].replace("/contents", "") + "/note/data.xht?view=snippet&wrap=true"
files = {'.pdf': pdf_url, '.xht': xht_url, '#note.pdf': note_pdf_url, '#note.xht': note_xht_url}
act['files'] = files
act['type'] = type_
return act
def get_txt(url):
f = requests.get(url, headers=headers)
soup = BeautifulSoup(f.content, 'lxml')
for script in soup(["script", "style", "head"]):
script.extract()
return soup
# def get_links(url):
# f = requests.get(url, headers=headers)
# soup = BeautifulSoup(f.content, 'lxml')
# links = []
# tags = soup.find_all("a")
# for tag in tags:
# if "href" in str(tag) and str(tag['href']).startswith(base_url):
# links.append(tag['href'])
# return links
def get_act_txt(url):
text = get_txt(url)
text = convert_xht_to_txt_2(str(text))
if len(text) == 0:
raise Exception(f"failed to load ({url})'s text")
return text
accepted_types = {
'uksi': 'UK Statutory Instruments',
# 'ukci': 'Church Instruments',
# 'uksro': 'Statutory Rules and Orders',
'ukpga': 'Public General Acts',
'ukla': 'Local Acts',
# 'ukcm': 'Church of England Measures',
}
uksi = [
"Order",
"Regulations",
"Rules",
"Scheme",
"Direction",
"Declaration",
]
def detect_type(_type, title):
if _type == "uksi":
return detect_uksi_type(title)
elif _type in accepted_types.keys():
return accepted_types[_type]
else:
return None
def detect_uksi_type(title: str):
title = title.lower()
title = title.replace("order of council", "", len(title))
indices = []
for uksi_ in uksi:
indices.append(title.rfind(uksi_.lower()))
max_ = indices.index(max(indices))
if max(indices) == -1:
return "unknown"
return uksi[max_]
```
#### File: 9Knight9n/crawler-legislation-uk/temp.py
```python
import sys
import os
sys.dont_write_bytecode = True
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'settings')
import django
django.setup()
# Import your models for use in your script
from db.models import *
import pandas as pd
from save import files_list_dir,txt_files_dir
from save.act import save_txt
def remove_files_with_type(type_:str):
excel = pd.read_excel(files_list_dir, )
docs = excel.loc[excel['type'] == type_]
count = 0
# print(docs["title"])
for doc in docs['title']:
# print(doc)
# print(doc['title'])
if os.path.exists(txt_files_dir+"/"+doc+".txt"):
os.remove(txt_files_dir+"/"+doc+".txt")
count += 1
print(f'{count} files deleted.')
def create_files():
for act in Act.objects.all():
save_txt(act.title,act.text)
if __name__ == '__main__':
create_files()
# remove_files_with_type("Church of England Measures")
# remove_files_with_type("Church Instruments")
``` |
{
"source": "9Knight9n/untitled-project",
"score": 2
} |
#### File: Back/chatroom/views.py
```python
from rest_framework.decorators import api_view, permission_classes
from rest_framework import status
from rest_framework.response import Response
from registeration.models import User
from .models import Chatroom
from join.models import Chatroom_User
from .serializers import ShowUChatroomProfileSerializer
@api_view(['POST'])
def createchatroom(request):
data = dict(request.POST)
print(data)
user = User.objects.get(id=data['owner'][0])
Topic = data["selectedTopic"][0]
if Topic == "PL":
chatroom = Chatroom.objects.create(
owner=user,
selectedTopic=data['selectedTopic'][0],
chatroomName=data['chatroomName'][0],
Link=data['Link'][0],
selected=data['selected'][0]
)
chatroom_user = Chatroom_User.objects.create(chatroom=chatroom,user=user)
chatroom_user.save()
if "Description" in request.data.keys() :
chatroom.Description=data['Description'][0]
chatroom.save()
if "Base64" in request.data.keys() :
base64=data['Base64'][0]
newsrc = open('media/chatroom/image/' + str(chatroom.id) + '.txt', 'a')
newsrc.write(base64)
newsrc.close()
else :
src = open('media/chatroom/image/default.txt', 'r')
default = src.read()
src.close()
newsrc = open('media/chatroom/image/' + str(chatroom.id) + '.txt', 'a')
newsrc.write(default)
newsrc.close()
chatroom.chatroomAvatar = 'media/chatroom/image/' + str(chatroom.id) + '.txt'
chatroom.save()
user.numberOfChatrooms += 1
user.save()
# print(chatroom.chatroomAvatar)
return Response({'message': 'New chatroom created'}, status=status.HTTP_201_CREATED)
if Topic == "OS":
chatroom = Chatroom.objects.create(
owner=user,
selectedTopic=data['selectedTopic'][0],
chatroomName=data['chatroomName'][0],
selected=data['selected'][0],
selectedSub=data['selectedSub'][0]
)
chatroom_user = Chatroom_User.objects.create(chatroom=chatroom,user=user)
chatroom_user.save()
if "Description" in request.data.keys() :
chatroom.Description=data['Description'][0]
chatroom.save()
if "Base64" in request.data.keys() :
base64=data['Base64'][0]
newsrc = open('media/chatroom/image/' + str(chatroom.id) + '.txt', 'a')
newsrc.write(base64)
newsrc.close()
else :
src = open('media/chatroom/image/default.txt', 'r')
default = src.read()
src.close()
newsrc = open('media/chatroom/image/' + str(chatroom.id) + '.txt', 'a')
newsrc.write(default)
newsrc.close()
chatroom.chatroomAvatar = 'media/chatroom/image/' + str(chatroom.id) + '.txt'
chatroom.save()
user.numberOfChatrooms += 1
user.save()
return Response({'message': 'New chatroom created'}, status=status.HTTP_201_CREATED)
if Topic == "App":
chatroom = Chatroom.objects.create(
owner=user,
selectedTopic=data['selectedTopic'][0],
chatroomName=data['chatroomName'][0],
Link=data['Link'][0]
)
chatroom_user = Chatroom_User.objects.create(chatroom=chatroom,user=user)
chatroom_user.save()
if "Description" in request.data.keys() :
chatroom.Description=data['Description'][0]
chatroom.save()
if "Base64" in request.data.keys() :
base64=data['Base64'][0]
newsrc = open('media/chatroom/image/' + str(chatroom.id) + '.txt', 'a')
newsrc.write(base64)
newsrc.close()
else :
src = open('media/chatroom/image/default.txt', 'r')
default = src.read()
src.close()
newsrc = open('media/chatroom/image/' + str(chatroom.id) + '.txt', 'a')
newsrc.write(default)
newsrc.close()
chatroom.chatroomAvatar = 'media/chatroom/image/' + str(chatroom.id) + '.txt'
chatroom.save()
user.numberOfChatrooms += 1
user.save()
# chatroom_user = Chatroom_User.objects.create(user=user[0] , chatroom=chatroom)
return Response({'message': 'New chatroom created'}, status=status.HTTP_201_CREATED)
@api_view(['POST'])
def show_chatrooms(request):
user = User.objects.filter(id=request.data["user_id"])
chatroom_user = Chatroom_User.objects.filter(user=user[0])
data = []
print(chatroom_user)
for i in range(len(chatroom_user)):
# data.append(ChatroomSerializer(chatrooms[i]))
data.append({'id':chatroom_user[i].chatroom.id,'name':chatroom_user[i].chatroom.chatroomName})
# print(chatroom_user[i].chatroom.chatroomAvatar)
image = open( str(chatroom_user[i].chatroom.chatroomAvatar), 'r').read()
data[i]['Base64'] = image
# print(data)
return Response(data , status=status.HTTP_200_OK)
@api_view(['POST', ])
@permission_classes([])
def ShowChatroomProfile(request):
chatroom = Chatroom.objects.filter(id=request.data['chatroomId'])
if list(chatroom) != []:
chatroom = chatroom[0]
serializer = ShowUChatroomProfileSerializer(chatroom)
data = serializer.data
filename = 'media/chatroom/image/' + str(chatroom.id) + '.txt'
data['chatroom_profile_image'] = open(filename, 'rb').read()
data['chatroomLink'] = 'http://127.0.0.1:8000/ShowChatroomByLink/chatroom' + str(chatroom.id) + '/'
data['topicLink'] = chatroom.Link
if data['selectedTopic'] == "PL":
data['selectedTopic'] = "Programing Language(" + chatroom.selected + ")"
elif data['selectedTopic'] == "App":
data['selectedTopic'] = "Application"
elif data['selectedTopic'] == "OS":
data['selectedTopic'] = "Operating System(" + chatroom.selected + ")"
return Response(data)
return Response({'message': 'Chatroom not found'})
@api_view(['POST', ])
@permission_classes([])
def EditChatroomProfile(request):
chatroom = Chatroom.objects.filter(id=request.data['chatroomId'])
# print("////////////////////////////////////////////////hi" , request.data.keys())
if list(chatroom) != []:
chatroom = chatroom[0]
serializer = ShowUChatroomProfileSerializer(chatroom)
data = serializer.data
if 'chatroomName' in request.data.keys():
chatroom.chatroomName = request.data['chatroomName']
if 'Description' in request.data.keys():
chatroom.Description = request.data['Description']
if 'topicLink' in request.data.keys() and chatroom.selectedTopic != 'OS':
chatroom.Link = request.data['topicLink']
# print("////////////////////////////////////////////////bye" , request.data.keys())
if 'chatroom_profile_image' in request.data.keys():
filepath = 'media/chatroom/image/' + str(chatroom.id) + '.txt'
# print("//////////////////////////////////////////////////////////////////:::::" , filepath)
file = open(filepath, 'w')
file.write(request.POST['chatroom_profile_image'])
file.close()
chatroom.save()
return Response({'message':'edit successfully'})
return Response({'message': 'Chatroom not found'})
```
#### File: Back/submittext/views.py
```python
from rest_framework import status
from rest_framework.response import Response
from rest_framework.decorators import api_view, permission_classes
from django.db.models import Q
import datetime
import nltk
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
# import time
from .serializer import QuestionSerializer , AnswerSerializer , ShowUserProfileSerializer
from .models import Answer , Question , User_Question , User_Answer
from chatroom.models import Chatroom
from registeration.models import User
@api_view(['POST' , ])
def ShowQuestion(request):
chatroom = Chatroom.objects.filter(id=request.data['ChatroomID'])
requestUser = User.objects.filter(id=request.data['user_id'])
if list(chatroom) != []:
questions = Question.objects.filter(chatroom=chatroom[0])
data_list = []
for i in questions:
serializer = QuestionSerializer(i)
data = serializer.data
# print(usrequestUserer)
# print(i)
user_question = User_Question.objects.filter(user=requestUser[0] , question=i)
# print(user_question)
if list(user_question) == []:
data['sameProblem']=0
else:
data['sameProblem']=user_question[0].voteState
data['time']=i.time.ctime()
if data['file']!=None:
data['file'] = 'http://127.0.0.1:8000' + data['file']
if i.user == None:
data['user'] = 'User is not exist'
data['userid'] = 'no id'
else:
user = i.user
data['user'] = user.username
data['userid'] = user.id
data_list.append(data)
return Response(data_list)
return Response({'message' : 'Chatroom not found'})
@api_view(['POST' , ])
def ShowAnswer(request):
question = Question.objects.filter(id=request.data['QuestionID'])
if list(question) != []:
answers = Answer.objects.filter(question=question[0]).order_by('isAccepted' , 'vote')
answers = answers[::-1]
data_list = []
for i in answers:
serializer = AnswerSerializer(i)
data = serializer.data
user_answer = User_Answer.objects.filter(user=request.data['user_id'] , answer=i)
if list(user_answer) != []:
data["voteState"] = user_answer[0].voteState
else:
data["voteState"] = 0
data['time']=i.time.ctime()
if data['file']!=None:
data['file'] = 'http://127.0.0.1:8000' + data['file']
if i.user == None:
data['user'] = 'User is not exist'
data['userid'] = 'no id'
else:
user = i.user
data['user'] = user.username
data['userid'] = user.id
data_list.append(data)
return Response(data_list)
return Response({'message' : 'Question not found'})
@api_view(['POST' , ])
def ShowUserProfile(request):
user = User.objects.filter(id=request.data['user_id'])
if list(user) != []:
user = user[0]
serializer = ShowUserProfileSerializer(user)
data = serializer.data
filename = 'media/profile/image/' + str(user.id) + '.txt'
data['user_profile_image'] = open(filename, 'rb').read()
# print(data)
return Response(data)
return Response({'message' : 'User not found'})
def calculateSearchOrder(searchText , QuestionText):
value = 0
for word in searchText:
if word in QuestionText:
value += 1
return value
def Sort(sub_li):
sub_li.sort(key = lambda x: x[1])
return sub_li[::-1]
def DetectStopWords(text):
stop_words = set(stopwords.words('english'))
word_tokens = word_tokenize(text)
filtered_sentence = [w for w in word_tokens if not w in stop_words]
return filtered_sentence
def TimeFilter(index):
time = datetime.datetime.now()
if index == 1:
time -= datetime.timedelta(days=730)
elif index == 2:
time -= datetime.timedelta(days=365)
elif index == 3:
time -= datetime.timedelta(days=180)
elif index == 4:
time -= datetime.timedelta(days=90)
elif index == 5:
time -= datetime.timedelta(days=30)
elif index == 6:
time -= datetime.timedelta(days=7)
return time
@api_view(['POST' , ])
def GeneralSearch(request):
# advance filter
# print(request.data)
time_list = []
query = Q()
if 'timePeriod' in request.data.keys():
if int(request.data['timePeriod'][0]) != 0:
time_filter = TimeFilter(int(request.data['timePeriod'][0]))
query = query & Q(time__gte=time_filter)
if 'isAnswered' in request.data.keys():
if request.data['isAnswered'][0] == '1':
query = query & Q(isAnswered=True)
if 'chatroomID' in request.data.keys():
query = query & Q(chatroom=request.data['chatroomID'])
no_member_dic = {0:10 , 1:100 , 2:1000 , 3:5000 , 4:10000 , 5:0}
queryset = []
if 'sort' in request.data.keys():
if request.data['sort'] == '0':
# newest
queryset = Question.objects.filter(query).order_by('time')
elif request.data['sort'] == '1':
#oldest
queryset = Question.objects.filter(query).order_by('time').reverse()
elif request.data['sort'] == '2':
# vote
queryset = Question.objects.filter(query).order_by('vote')
else:
queryset = Question.objects.filter(query)
valuelist = []
searchText = request.data["searchText"]
searchText = DetectStopWords(searchText)
user = User.objects.filter(id=request.data['user_id'])
if list(user) == []:
return Response({'message':'user not found'})
for q in range(len(queryset)):
numberOfUser = 0
if queryset[q].chatroom != None:
numberOfUser = queryset[q].chatroom.numberOfUser
if 'chatroomMember' in request.data.keys():
if int(request.data['chatroomMember']) > 5:
return Response({'message':'index of chatroomMember is not valid'})
if numberOfUser >= no_member_dic[int(request.data['chatroomMember'])]:
value = calculateSearchOrder(searchText , queryset[q].text)
if value > 0:
valuelist.append([q , calculateSearchOrder(searchText , queryset[q].text)])
valuelist = Sort(valuelist)
data_list = []
for i in valuelist:
if i[1] > 0:
data = QuestionSerializer(queryset[i[0]]).data
data['chatroom'] = queryset[i[0]].chatroom
data['time']=queryset[i[0]].time.ctime()
user_question = User_Question.objects.filter(user=user[0], question=queryset[i[0]])
if list(user_question) == []:
data['voteState'] = 0
else:
data['voteState'] = user_question[0].voteState
if queryset[i[0]].user != None:
data['user'] = queryset[i[0]].user.username
data['userid'] = queryset[i[0]].user.id
else:
data['user'] = 'user does not exist'
data['userid'] = 'user does not exist'
data['time']=queryset[i[0]].time.ctime()
data_list.append(data)
chatroom_id_list = []
chatroom_list = []
for i in data_list:
if not i['chatroom'] in chatroom_id_list:
chatroom_id_list.append(i['chatroom'])
if i["chatroom"] != None:
chatroom_data = {}
chatroom_data["ChatroomID"] = i['chatroom'].id
chatroom_data["name"] = i['chatroom'].chatroomName
chatroom_data["image"] = open( str(i["chatroom"].chatroomAvatar), 'r').read()
chatroom_list.append(chatroom_data)
if i["chatroom"] == None:
i["chatroom"] = "not exist"
else:
i["chatroom"] = i["chatroom"].id
return Response({"questions": data_list , "chatrooms": chatroom_list})
#document :
# realtime sreach (only on chatrooms)
# advanced filter :
# chatroom
# period of time
# chatroom with more user
# question with true answer
@api_view(['POST' , ])
def SeggestionChatroomSreach(request):
searchText = request.data["searchText"]
searchTextlist = DetectStopWords(searchText)
# print(searchText)
chatroom_value_list = []
number_of_chatroom = 0
for chatroom in Chatroom.objects.all():
x = min(len(searchText), len(chatroom.chatroomName))
similarty_of_chatroom_name = nltk.edit_distance(searchText[:x], chatroom.chatroomName[:x])
# print(similarty_of_chatroom_name)
if similarty_of_chatroom_name < 3:
chatroom_value_list.append([chatroom , 0])
number_of_chatroom += 1
for question in Question.objects.filter(chatroom=chatroom):
chatroom_value_list[-1][1] += calculateSearchOrder(searchTextlist , question.text)
chatroom_value_list = Sort(chatroom_value_list)
if number_of_chatroom > 10:
chatroom_value_list = chatroom_value_list[:10]
data_list = []
for chatroom in chatroom_value_list:
data = {}
data['chatroom_id'] = chatroom[0].id
data['chatroom_name'] = chatroom[0].chatroomName
data_list.append(data)
return Response(data_list)
@api_view(['POST'])
def AddQuestion(request):
data = dict(request.POST)
chatroom = Chatroom.objects.filter(id=data['chatroom'][0])
user = User.objects.filter(id=request.data['user_id'])
if list(user) != []:
question = Question.objects.create(
user=user[0],
chatroom=chatroom[0],
text=data['text'][0],
time=datetime.datetime.now(),
)
if 'file' in request.FILES.keys():
question.file = request.FILES['file']
question.save()
user[0].askedQuestions += 1
user[0].save()
return Response({'message': 'New question created'}, status=status.HTTP_201_CREATED)
return Response({'message': 'User not found'})
@api_view(['POST'])
def AddAnswer(request):
data = dict(request.POST)
question = Question.objects.filter(id=data['question'][0])
user = User.objects.filter(id=request.data['user_id'])
if list(user) != []:
answer = Answer.objects.create(
user=user[0],
question=question[0],
text=data['text'][0],
time=datetime.datetime.now(),
)
if 'file' in request.FILES.keys():
answer.file = request.FILES['file']
answer.save()
user[0].save()
return Response({'message': 'New answer created'}, status=status.HTTP_201_CREATED)
return Response({'message': 'User not found'})
@api_view(['POST'])
def EditQuestion(request):
data = dict(request.POST)
chatroom = Chatroom.objects.filter(id=data['chatroom'][0])
user = User.objects.filter(id=data['user_id'][0])
question = Question.objects.filter(id=data['id'][0] , user=user[0] , chatroom=chatroom[0])
if list(question) != []:
if 'text' in data.keys():
question[0].text = data['text'][0]
if 'isAnswered' in data.keys():
question[0].isAnswered = data['isAnswered']
if 'file' in request.FILES.keys():
question[0].isAnswered = request.FILES['file']
question[0].save()
return Response({'message':'edit complete'})
else:
return Response({'message':'you can`t edit'})
@api_view(['POST'])
def DeleteQuestion(request):
data = dict(request.POST)
chatroom = Chatroom.objects.filter(id=data['chatroom'][0])
user = User.objects.filter(id=data['user_id'][0])
question = Question.objects.filter(id=data['id'][0] , user=user[0] , chatroom=chatroom[0])
if list(question) != []:
question.delete()
if list(user) != []:
user[0].askedQuestions -= 1
user[0].save()
return Response({'message':'delete complete'})
else:
return Response({'message':'you can`t delete'})
@api_view(['POST'])
def VoteQuestion(request):
data = dict(request.POST)
question = Question.objects.filter(id=data['question_id'][0])
user = User.objects.filter(id=data['user_id'][0])
if (list(user) != []) and (list(question) != []):
user_question = User_Question.objects.filter(user=user[0] , question=question[0])
else:
return Response({"message": 'user or question not exists'})
if list(user_question) != []:
if user_question[0].voteState == int(data['voteState'][0]):
return Response({'message':'this user can not do that'})
else:
if list(question) != []:
question[0].vote += int(data['voteState'][0]) - user_question[0].voteState
question[0].save()
user_question[0].voteState = int(data['voteState'][0])
user_question[0].save()
else:
user_question = User_Question.objects.create(user=user[0] , question=question[0] , voteState=int(data['voteState'][0]))
if list(question) != []:
question[0].vote += int(data['voteState'][0])
question[0].save()
return Response({'message':'done it'})
@api_view(['POST'])
def ShowvoteQuestion(request):
data = dict(request.POST)
question = Question.objects.filter(id=data['question_id'][0])
user = User.objects.filter(id=data['user_id'][0])
user_question = User_Question.objects.filter(user=user[0] , question=question[0])
if list(user_question) != []:
return Response({'message': False})
else:
return Response({'message': True})
@api_view(['POST'])
def EditAnswer(request):
data = dict(request.POST)
print(data)
question = Question.objects.get(id=data['question'][0])
user = User.objects.filter(id=data['user_id'][0])
answer = Answer.objects.filter(id=data['id'][0] )
print(question)
print(user)
print(answer)
if list(answer) != []:
if 'text' in data.keys():
answer[0].text = data['text'][0]
if 'isAccepted' in data.keys():
answerOwner = User.objects.get(id=answer[0].user.id)
if request.data['isAccepted'] == 'true':
answerOwner.answeredQuestions += 1
question.isAnswered = True
print(1,question.isAnswered)
data['isAccepted'] = True
else:
answer = Answer.objects.filter(question=question,isAccepted=True)
print(2,answer)
if len(answer) < 2:
answerOwner.answeredQuestions -= 1
question.isAnswered = False
print(3,question.isAnswered)
data['isAccepted'] = False
answer[0].isAccepted = data['isAccepted']
if 'file' in request.FILES.keys():
answer[0].isAnswered = request.FILES['file']
answer[0].save()
question.save()
answerOwner.save()
print(4,question.isAnswered)
return Response({'message':'edit complete'})
else:
return Response({'message':'you can`t edit'})
@api_view(['POST'])
def DeleteAnswer(request):
data = dict(request.POST)
question = Question.objects.filter(id=data['question'][0])
user = User.objects.filter(id=data['user_id'][0])
answer = Answer.objects.filter(id=request.data['id'] , user=user[0] , question=question[0])
if list(answer) != []:
answer.delete()
return Response({'message':'delete complete'})
else:
return Response({'message':'you can`t delete'})
@api_view(['POST'])
def VoteAnswer(request):
data = dict(request.POST)
answer = Answer.objects.filter(id=data['answer_id'][0])
user = User.objects.filter(id=data['user_id'][0])
user_answer = User_Answer.objects.filter(user=user[0] , answer=answer[0])
if list(user_answer) != []:
if user_answer[0].voteState == int(data['voteState'][0]):
return Response({'message':'this user can not do that'})
else:
if list(answer) != []:
answer[0].vote += int(data['voteState'][0]) - user_answer[0].voteState
answer[0].save()
user_answer[0].voteState = int(data['voteState'][0])
user_answer[0].save()
else:
user_answer = User_Answer.objects.create(user=user[0] , answer=answer[0] , voteState=int(data['voteState'][0]))
if list(answer) != []:
answer[0].vote += int(data['voteState'][0])
answer[0].save()
return Response({'message':'done it'})
@api_view(['POST'])
def ShowVoteAnswer(request):
data = dict(request.POST)
answer = Answer.objects.filter(id=data['answer_id'][0])
user = User.objects.filter(id=request.data['user_id'][0])
user_answer = User_Answer.objects.filter(user=user[0] , answer=answer[0])
if list(user_answer) != []:
return Response({'message': user_answer.voteState})
else:
return Response({'message':0})
``` |
{
"source": "9lash/spotting_gunshots",
"score": 3
} |
#### File: spotting_gunshots/src/lstm_single_layer.py
```python
import matplotlib.pyplot as plt
plt.rcParams['figure.figsize'] = (12,8)
import numpy as np
import tensorflow as tf
import keras
import pandas as pd
from keras_tqdm import TQDMNotebookCallback
from keras.preprocessing.sequence import pad_sequences
from keras.models import Sequential
from keras.layers import Dense, BatchNormalization, Dropout
from keras.layers import LSTM
from keras import regularizers
from keras.utils import plot_model
def data_generator(batch_size, tfrecord, start_frac=0, end_frac=1):
'''
Shuffles the Audioset training data and returns a generator of training data and boolean gunshot labels
batch_size: batch size for each set of training data and labels
tfrecord: filestring of the tfrecord file to train on
start_frac: the starting point of the data set to use, as a fraction of total record length (used for CV)
end_frac: the ending point of the data set to use, as a fraction of total record length (used for CV)
'''
max_len=10
#tfrecord holds data in binary sequence string.
records = list(tf.python_io.tf_record_iterator(tfrecord)) #records holds the array of the tfrecord file
print("Total audioframes in training dataset:", len(records))
records = records[int(start_frac*len(records)):int(end_frac*len(records))] # Make train_set & CV_set
print("After fractioning:")
print("Total audioframes in training dataset:", len(records))
rec_len = len(records) # this is your train set, rest is CV_set
shuffle = np.random.permutation(range(rec_len))
num_batches = rec_len//batch_size - 1
j = 0
gun_labels = [426,427,428,429,430,431]
while True:
X = []
y = []
for idx in shuffle[j*batch_size:(j+1)*batch_size]:
example = records[idx]
tf_seq_example = tf.train.SequenceExample.FromString(example)
example_label = list(np.asarray(tf_seq_example.context.feature['labels'].int64_list.value))
value_x = any((True for x in example_label if x in gun_labels))
if(value_x==True):
y.append(1)
else:
y.append(0)
n_frames = len(tf_seq_example.feature_lists.feature_list['audio_embedding'].feature)
audio_frame = []
for i in range(n_frames):
# audio_frame gets 128 8 bit numbers on each for loop iteration
audio_frame.append(np.frombuffer(tf_seq_example.feature_lists.feature_list['audio_embedding'].
feature[i].bytes_list.value[0],np.uint8).astype(np.float32))
pad = [np.zeros([128], np.float32) for i in range(max_len-n_frames)]
# if clip is less than 10 sec, audio_frame is padded with zeros for
#rest of the secs to make it to 10 sec.
audio_frame += pad
X.append(audio_frame)
j += 1
if j >= num_batches:
shuffle = np.random.permutation(range(rec_len))
j = 0
X = np.array(X)
yield X, np.array(y)
#Trainer
def lstm_trainer(train_tfrecord, train_lr, train_epochs):
# Building the model
lstm_model = Sequential()
lstm_model.add(BatchNormalization(input_shape=(None, 128)))
lstm_model.add(Dropout(0.5))
lstm_model.add(LSTM(128, activation='relu',
kernel_regularizer=regularizers.l2(0.01),
activity_regularizer=regularizers.l2(0.01)))
lstm_model.add(Dense(1, activation='sigmoid'))
# try using different optimizers and different optimizer configs
lstm_model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
# Fitting the LSTM model
batch_size=32
CV_frac = 0.1
train_gen = data_generator(batch_size, train_tfrecord, 0, 1-CV_frac)
val_gen = data_generator(20,train_tfrecord, 1-CV_frac, 1)
rec_len = 17662
lstm_h = lstm_model.fit_generator(train_gen,steps_per_epoch=int(rec_len*(1-CV_frac))//batch_size, epochs=train_epochs,
validation_data=val_gen, validation_steps=int(rec_len*CV_frac)//20,
verbose=1, callbacks=[TQDMNotebookCallback()])
# Plot the model architecture
plot_model(lstm_model, to_file='../results/LSTM/LSTM_Loss=BinCE_Epochs{}_lt={}.png'.format(train_epochs,train_lr))
# Save the lstm model
lstm_model.save('../models/1LayerLSTM__Loss=BinCE_lr={}_Epochs={}.h5'.format(train_lr, train_epochs))
return lstm_h
#Main function
if __name__ == "__main__":
#setting hyperparameters
train_path = '../data/preprocessed/bal_gunspotting_in_school_subset.tfrecord'
epochs = 20
learning_rate = 0.001 #0.001 lr is the trick that works.
print("Training Logistic Regression:")
#train logistic regression with learn rate = 0.1 and epochs 10
lstm_h = lstm_trainer(train_path, learning_rate, epochs)
#Plotting the training performance of the LSTM
plt.plot(lstm_h.history['acc'], 'o-', label='train_acc')
plt.plot(lstm_h.history['val_acc'], 'x-', label='val_acc')
plt.xlabel('Epochs({})'.format(epochs), size=20)
plt.ylabel('Accuracy', size=20)
plt.legend()
plt.savefig('../results/LSTM/LSTM__Loss=BinCE_{}Epochs_lr={}_performance.png'.format(epochs,learning_rate), dpi = 300)
# Metrics for the LSTM
print("Epochs = {}".format(epochs))
print("Average Training loss =", sum(lstm_h.history['loss'])/len(lstm_h.history['loss']))
print("Average Training accuracy=", sum(lstm_h.history['acc'])/len(lstm_h.history['acc'])*100)
print("Average validation loss =", sum(lstm_h.history['val_loss'])/len(lstm_h.history['val_loss']))
print("Average validation accuracy=", sum(lstm_h.history['val_acc'])/len(lstm_h.history['val_acc'])*100)
``` |
{
"source": "9LKQ7ZLC82/cccatalog",
"score": 3
} |
#### File: providers/api/ClevelandMuseum.py
```python
from modules.etlMods import *
LIMIT = 1000
DELAY = 5.0 #time delay (in seconds)
FILE = 'clevelandmuseum_{}.tsv'.format(int(time.time()))
logging.basicConfig(format='%(asctime)s: [%(levelname)s - Cleveland Museum API] =======> %(message)s', level=logging.INFO)
def getMetaData(_data):
license = 'CC0'
version = '1.0'
imgInfo = ''
imgURL = ''
width = ''
height = ''
foreignID = ''
foreignURL = ''
title = ''
creator = ''
metaData = {}
key = None
#verify the license
if (not ('share_license_status' in _data)) or (str(_data['share_license_status']).upper() != 'CC0'):
logging.warning('CC0 license not detected!')
return None
#get the landing page
foreignURL = _data.get('url', None)
#get the image url and dimension
imgInfo = _data.get('images')
if imgInfo and imgInfo.get('web'):
imgURL = imgInfo.get('web', {}).get('url', None)
key = 'web'
elif imgInfo and imgInfo.get('print'):
imgURL = imgInfo.get('print', {}).get('url', None)
key = 'print'
elif imgInfo and imgInfo.get('full'):
imgURL = imgInfo.get('full', {}).get('url', None)
key = 'full'
if (not imgInfo) or (not imgURL):
logging.warning('Image not detected in url {}'.format(foreignURL))
return None
if imgURL and key:
width = imgInfo[key]['width']
height = imgInfo[key]['height']
#provider identifier for the artwork
foreignID = _data.get('id', imgURL)
#title
title = sanitizeString(_data.get('title', ''))
if not foreignURL:
logging.warning('Landing page not detected')
return None
#get creator info
creatorInfo = _data.get('creators', {})
creatorName = None
if creatorInfo:
creatorName = creatorInfo[0].get('description', '')
if creatorName:
creator = sanitizeString(creatorName)
#get additional meta data
metaData['accession_number'] = sanitizeString(_data.get('accession_number', ''))
metaData['technique'] = sanitizeString(_data.get('technique', ''))
metaData['date'] = sanitizeString(_data.get('creation_date', ''))
metaData['credit_line'] = sanitizeString(_data.get('creditline', ''))
metaData['medium'] = sanitizeString(_data.get('technique', ''))
metaData['classification'] = sanitizeString(_data.get('type', ''))
metaData['culture'] = sanitizeString(','.join(list(filter(None, _data.get('culture', '')))))
metaData['tombstone'] = sanitizeString(_data.get('tombstone', ''))
#No description of artwork. The digital_description and wall_description are null.
return [
str(foreignID),
foreignURL,
imgURL,
'\\N',
str(int(float(width))) if width else '\\N',
str(int(float(height))) if height else '\\N',
'\\N',
license,
str(version),
creator if creator else '\\N',
'\\N',
title if title else '\\N',
'\\N' if not metaData else json.dumps(metaData),
'\\N',
'f',
'clevelandmuseum',
'clevelandmuseum'
]
def main():
logging.info('Begin: Cleveland Museum API requests')
offset = 0
isValid = True
while isValid:
startTime = time.time()
endpoint = 'http://openaccess-api.clevelandart.org/api/artworks/?cc0=1&limit={0}&skip={1}'.format(LIMIT, offset)
batch = requestContent(endpoint)
if batch and ('data' in batch):
extracted = batch['data']
if extracted:
result = map(lambda data: getMetaData(data), extracted)
writeToFile(list(result), FILE)
offset += LIMIT
delayProcessing(startTime, DELAY)
else:
isValid = False
break
logging.info('Terminated!')
if __name__ == '__main__':
main()
```
#### File: providers/commoncrawl/RawPixel.py
```python
from Provider import *
logging.basicConfig(format='%(asctime)s - %(name)s: [%(levelname)s - RawPixel] =======> %(message)s', level=logging.INFO)
logger = logging.getLogger(__name__)
class RawPixel(Provider):
def __init__(self, _name, _domain, _cc_index):
Provider.__init__(self, _name, _domain, _cc_index)
def getMetaData(self, _html, _url):
"""
Parameters
------------------
_html: string
The HTML page that was extracted from Common Crawls WARC file.
_url: string
The url for the webpage.
Returns
------------------
A tab separated string which contains the meta data that was extracted from the HTML.
"""
soup = BeautifulSoup(_html, 'html.parser')
otherMetaData = {}
src = None
license = None
version = None
imageURL = None
tags = None
extracted = []
imgInfo = soup.find_all('figure', {'class': re.compile('^(rowgrid-image teaser-item relative lazyload photo_grid)|(rowgrid-image rawpixel-image teaser-item lazyload relative photo_grid)$|()')})
for info in imgInfo:
self.clearFields()
self.provider = self.name
self.source = 'commoncrawl'
#verify the licenses
licenseInfo = info.findChild('a', {'rel': re.compile('^license'), 'href': True})
if licenseInfo:
ccURL = urlparse(licenseInfo.attrs['href'])
license, version = self.getLicense(ccURL.netloc, ccURL.path, _url)
if not license:
logging.warning('License not detected in url: {}'.format(_url))
continue
self.license = license
self.licenseVersion = version
#extract the image
imgInfo = info.findChild('img', {'class': 'lazyload'})
if imgInfo:
self.url = imgInfo.get('data-pin-media')
self.width = imgInfo.get('width')
self.height = imgInfo.get('height')
tmpthumbInfo = imgInfo.get('data-srcset', '')
imgList = tmpthumbInfo.split(', ')
if len(imgList) > 1:
self.thumbnail = imgList[-1].strip()
if not self.url:
logging.warning('Image not detected in url: {}'.format(_url))
continue
#get image ID, landing page and title
self.foreignIdentifier = info.get('data-node')
titleInfo = info.findChild('a', {'class': 'img-link', 'href': True})
if titleInfo:
self.title = self.validateContent('', titleInfo, 'title')
self.foreignLandingURL = self.sanitizeString(self.validateContent(_url, titleInfo, 'href'))
extracted.extend(self.formatOutput)
return extracted
``` |
{
"source": "9Mad-Max5/PlexPlaylistImporter",
"score": 2
} |
#### File: PlexPlaylistImporter/Importers/PTL.py
```python
import Importers
def _ImportMe(PlaylistPath, PlaylistEncoding):
PlaylistFiles=[]
with open(PlaylistPath, 'r', encoding=PlaylistEncoding) as PlaylistFileHandle:
for LineStr in PlaylistFileHandle:
PlaylistFiles.append(LineStr.rstrip('\n'))
return PlaylistFiles
Importers.ImportHandlers['ptl']={'Name':"Plain Text List", 'ImportFunc':_ImportMe}
``` |
{
"source": "9Mad-Max5/RSScrawler",
"score": 2
} |
#### File: RSScrawler/rsscrawler/crawler.py
```python
import traceback
from logging import handlers
import logging
import multiprocessing
import os
import random
import re
import signal
import sys
import time
from docopt import docopt
from requests.packages.urllib3 import disable_warnings as disable_request_warnings
from requests.packages.urllib3.exceptions import InsecureRequestWarning
from rsscrawler import common
from rsscrawler import files
from rsscrawler import version
from rsscrawler.common import Unbuffered
from rsscrawler.common import add_decrypt
from rsscrawler.common import is_device
from rsscrawler.common import longest_substr
from rsscrawler.common import readable_time
from rsscrawler.config import RssConfig
from rsscrawler.db import RssDb
from rsscrawler.myjd import get_device
from rsscrawler.myjd import get_if_one_device
from rsscrawler.myjd import get_info
from rsscrawler.myjd import hoster_check
from rsscrawler.myjd import move_to_downloads
from rsscrawler.myjd import remove_from_linkgrabber
from rsscrawler.myjd import retry_decrypt
from rsscrawler.notifiers import notify
from rsscrawler.ombi import ombi
from rsscrawler.sites.bl import BL
from rsscrawler.sites.dd import DD
from rsscrawler.sites.dj import DJ
from rsscrawler.sites.sf import SF
from rsscrawler.sites.sj import SJ
from rsscrawler.url import check_url
from rsscrawler.web import start
version = "v." + version.get_version()
def crawler(configfile, dbfile, device, rsscrawler, log_level, log_file, log_format):
sys.stdout = Unbuffered(sys.stdout)
logger = logging.getLogger('rsscrawler')
logger.setLevel(log_level)
console = logging.StreamHandler(stream=sys.stdout)
formatter = logging.Formatter(log_format)
console.setLevel(log_level)
logfile = logging.handlers.RotatingFileHandler(log_file)
logfile.setFormatter(formatter)
logfile.setLevel(logging.INFO)
logger.addHandler(logfile)
logger.addHandler(console)
if log_level == 10:
logfile_debug = logging.handlers.RotatingFileHandler(
log_file.replace("RSScrawler.log", "RSScrawler_DEBUG.log"))
logfile_debug.setFormatter(formatter)
logfile_debug.setLevel(10)
logger.addHandler(logfile_debug)
disable_request_warnings(InsecureRequestWarning)
log_debug = logger.debug
ombi_first_launch = True
crawltimes = RssDb(dbfile, "crawltimes")
arguments = docopt(__doc__, version='RSScrawler')
if not arguments['--testlauf']:
while True:
try:
if not device or not is_device(device):
device = get_device(configfile)
scraper = check_url(configfile, dbfile)
start_time = time.time()
crawltimes.update_store("active", "True")
crawltimes.update_store("start_time", start_time * 1000)
log_debug("--------Alle Suchfunktion gestartet.--------")
requested_movies = 0
requested_shows = 0
ombi_string = ""
if device:
ombi_results = ombi(configfile, dbfile, device, log_debug, ombi_first_launch)
device = ombi_results[0]
ombi_results = ombi_results[1]
requested_movies = ombi_results[0]
requested_shows = ombi_results[1]
ombi_first_launch = False
if requested_movies or requested_shows:
ombi_string = " - Ombi suchte: "
if requested_movies:
ombi_string = ombi_string + str(requested_movies) + " Filme"
if requested_shows:
ombi_string = ombi_string + " und "
if requested_shows:
ombi_string = ombi_string + str(requested_shows) + " Serien"
for task in search_pool(configfile, dbfile, device, logger, scraper):
name = task._INTERNAL_NAME
try:
file = " - Liste: " + task.filename
except AttributeError:
file = ""
log_debug("-----------Suchfunktion (" + name + file + ") gestartet!-----------")
device = task.periodical_task()
log_debug("-----------Suchfunktion (" + name + file + ") ausgeführt!-----------")
end_time = time.time()
total_time = end_time - start_time
interval = int(rsscrawler.get('interval')) * 60
random_range = random.randrange(0, interval // 4)
wait = interval + random_range
next_start = end_time + wait
log_debug(
"-----Alle Suchfunktion ausgeführt (Dauer: " + readable_time(
total_time) + ")! Wartezeit bis zum nächsten Suchlauf: " + readable_time(wait) + ombi_string)
print(time.strftime("%Y-%m-%d %H:%M:%S") +
u" - Alle Suchfunktion ausgeführt (Dauer: " + readable_time(
total_time) + u")! Wartezeit bis zum nächsten Suchlauf: " + readable_time(wait) + ombi_string)
crawltimes.update_store("end_time", end_time * 1000)
crawltimes.update_store("total_time", readable_time(total_time))
crawltimes.update_store("next_start", next_start * 1000)
crawltimes.update_store("active", "False")
time.sleep(wait)
log_debug("-------------Wartezeit verstrichen-------------")
except Exception:
traceback.print_exc()
time.sleep(10)
else:
try:
if not device or not is_device(device):
device = get_device(configfile)
scraper = check_url(configfile, dbfile)
start_time = time.time()
log_debug("--------Testlauf gestartet.--------")
requested_movies = 0
requested_shows = 0
ombi_string = ""
if device:
ombi_results = ombi(configfile, dbfile, device, log_debug, ombi_first_launch)
device = ombi_results[0]
ombi_results = ombi_results[1]
requested_movies = ombi_results[0]
requested_shows = ombi_results[1]
if requested_movies or requested_shows:
ombi_string = " - Ombi suchte: "
if requested_movies:
ombi_string = ombi_string + str(requested_movies) + " Filme"
if requested_shows:
ombi_string = ombi_string + " und "
if requested_shows:
ombi_string = ombi_string + str(requested_shows) + " Serien"
for task in search_pool(configfile, dbfile, device, logger, scraper):
name = task._INTERNAL_NAME
try:
file = " - Liste: " + task.filename
except AttributeError:
file = ""
log_debug("-----------Suchfunktion (" + name + file + ") gestartet!-----------")
task.periodical_task()
log_debug("-----------Suchfunktion (" + name + file + ") ausgeführt!-----------")
end_time = time.time()
total_time = end_time - start_time
log_debug(
"---Testlauf ausgeführt (Dauer: " + readable_time(total_time) + ")!---" + ombi_string)
print(time.strftime("%Y-%m-%d %H:%M:%S") +
u" - Testlauf ausgeführt (Dauer: " + readable_time(total_time) + ")!" + ombi_string)
except Exception:
traceback.print_exc()
time.sleep(10)
def web_server(port, local_address, docker, configfile, dbfile, log_level, log_file, log_format, device):
start(port, local_address, docker, configfile, dbfile, log_level, log_file, log_format, device)
def crawldog(configfile, dbfile):
disable_request_warnings(InsecureRequestWarning)
crawljobs = RssConfig('Crawljobs', configfile)
autostart = crawljobs.get("autostart")
db = RssDb(dbfile, 'crawldog')
grabber_was_collecting = False
device = False
while True:
try:
if not device or not is_device(device):
device = get_device(configfile)
myjd_packages = get_info(configfile, device)
grabber_collecting = myjd_packages[2]
if grabber_was_collecting or grabber_collecting:
grabber_was_collecting = grabber_collecting
time.sleep(5)
else:
packages_in_downloader_decrypted = myjd_packages[4][0]
packages_in_linkgrabber_decrypted = myjd_packages[4][1]
offline_packages = myjd_packages[4][2]
encrypted_packages = myjd_packages[4][3]
try:
watched_titles = db.retrieve_all_titles()
except:
watched_titles = False
notify_list = []
if packages_in_downloader_decrypted or packages_in_linkgrabber_decrypted or offline_packages or encrypted_packages:
if watched_titles:
for title in watched_titles:
if packages_in_downloader_decrypted:
for package in packages_in_downloader_decrypted:
if title[0] in package['name'] or title[0].replace(".", " ") in package['name']:
check = hoster_check(configfile, device, [package], title[0], [0])
device = check[0]
if device:
db.delete(title[0])
if packages_in_linkgrabber_decrypted:
for package in packages_in_linkgrabber_decrypted:
if title[0] in package['name'] or title[0].replace(".", " ") in package['name']:
check = hoster_check(configfile, device, [package], title[0], [0])
device = check[0]
episode = RssDb(dbfile, 'episode_remover').retrieve(title[0])
if episode:
filenames = package['filenames']
if len(filenames) > 1:
fname_episodes = []
for fname in filenames:
try:
if re.match(r'.*S\d{1,3}E\d{1,3}.*', fname,
flags=re.IGNORECASE):
fname = re.findall(r'S\d{1,3}E(\d{1,3})', fname,
flags=re.IGNORECASE).pop()
else:
fname = fname.replace("hddl8", "").replace("dd51",
"").replace(
"264", "").replace("265",
"")
except:
fname = fname.replace("hddl8", "").replace("dd51", "").replace(
"264", "").replace("265", "")
fname_episode = "".join(re.findall(r'\d+', fname.split(".part")[0]))
try:
fname_episodes.append(str(int(fname_episode)))
except:
pass
replacer = longest_substr(fname_episodes)
new_fname_episodes = []
for new_ep_fname in fname_episodes:
try:
new_fname_episodes.append(
str(int(new_ep_fname.replace(replacer, ""))))
except:
pass
replacer = longest_substr(new_fname_episodes)
newer_fname_episodes = []
for new_ep_fname in new_fname_episodes:
try:
newer_fname_episodes.append(
str(int(re.sub(replacer, "", new_ep_fname, 1))))
except:
pass
replacer = longest_substr(newer_fname_episodes)
even_newer_fname_episodes = []
for newer_ep_fname in newer_fname_episodes:
try:
even_newer_fname_episodes.append(
str(int(re.sub(replacer, "", newer_ep_fname, 1))))
except:
pass
if even_newer_fname_episodes:
fname_episodes = even_newer_fname_episodes
elif newer_fname_episodes:
fname_episodes = newer_fname_episodes
elif new_fname_episodes:
fname_episodes = new_fname_episodes
delete_linkids = []
pos = 0
for delete_id in package['linkids']:
if str(episode) != str(fname_episodes[pos]):
delete_linkids.append(delete_id)
pos += 1
if delete_linkids:
delete_uuids = [package['uuid']]
RssDb(dbfile, 'episode_remover').delete(title[0])
device = remove_from_linkgrabber(configfile, device, delete_linkids,
delete_uuids)
if autostart:
device = move_to_downloads(configfile, device, package['linkids'],
[package['uuid']])
if device:
db.delete(title[0])
if offline_packages:
for package in offline_packages:
if title[0] in package['name'] or title[0].replace(".", " ") in package['name']:
notify_list.append("[Offline] - " + title[0])
print((u"[Offline] - " + title[0]))
db.delete(title[0])
if encrypted_packages:
for package in encrypted_packages:
if title[0] in package['name'] or title[0].replace(".", " ") in package['name']:
if title[1] == 'added':
if retry_decrypt(configfile, dbfile, device, package['linkids'],
[package['uuid']],
package['urls']):
db.delete(title[0])
db.store(title[0], 'retried')
else:
add_decrypt(package['name'], package['url'], "", dbfile)
device = remove_from_linkgrabber(configfile, device, package['linkids'],
[package['uuid']])
notify_list.append("[Click'n'Load notwendig] - " + title[0])
print(u"[Click'n'Load notwendig] - " + title[0])
db.delete(title[0])
else:
if not grabber_collecting:
db.reset()
if notify_list:
notify(notify_list, configfile)
time.sleep(30)
except Exception:
traceback.print_exc()
time.sleep(30)
def search_pool(configfile, dbfile, device, logger, scraper):
return [
SJ(configfile, dbfile, device, logger, scraper, filename='SJ_Serien', internal_name='SJ'),
SJ(configfile, dbfile, device, logger, scraper, filename='SJ_Serien_Regex', internal_name='SJ'),
SJ(configfile, dbfile, device, logger, scraper, filename='SJ_Staffeln_Regex', internal_name='SJ'),
SJ(configfile, dbfile, device, logger, scraper, filename='MB_Staffeln', internal_name='MB'),
DJ(configfile, dbfile, device, logger, scraper, filename='DJ_Dokus', internal_name='DJ'),
DJ(configfile, dbfile, device, logger, scraper, filename='DJ_Dokus_Regex', internal_name='DJ'),
SF(configfile, dbfile, device, logger, scraper, filename='SJ_Serien', internal_name='SJ'),
SF(configfile, dbfile, device, logger, scraper, filename='SJ_Serien_Regex', internal_name='SJ'),
SF(configfile, dbfile, device, logger, scraper, filename='SJ_Staffeln_Regex', internal_name='SJ'),
SF(configfile, dbfile, device, logger, scraper, filename='MB_Staffeln', internal_name='MB'),
BL(configfile, dbfile, device, logger, scraper, filename='MB_Regex'),
BL(configfile, dbfile, device, logger, scraper, filename='IMDB'),
BL(configfile, dbfile, device, logger, scraper, filename='MB_Filme'),
BL(configfile, dbfile, device, logger, scraper, filename='MB_Staffeln'),
BL(configfile, dbfile, device, logger, scraper, filename='MB_3D'),
DD(configfile, dbfile, device, logger, scraper)
]
def main():
arguments = docopt(__doc__, version='RSScrawler')
print(u"┌──────────────────────────────────────────────┐")
print(u" RSScrawler " + version + " von RiX")
print(u" Mad-Max Testedition")
print(u" https://github.com/rix1337/RSScrawler")
print(u"└──────────────────────────────────────────────┘")
if arguments['--docker']:
configpath = "/config"
else:
configpath = files.config(arguments['--config'])
configfile = os.path.join(configpath, "RSScrawler.ini")
dbfile = os.path.join(configpath, "RSScrawler.db")
print(u"Nutze das Verzeichnis " + configpath + u" für Einstellungen/Logs")
log_level = logging.__dict__[
arguments['--log-level']] if arguments['--log-level'] in logging.__dict__ else logging.INFO
log_file = os.path.join(configpath, 'RSScrawler.log')
log_format = '%(asctime)s - %(message)s'
hostnames = RssConfig('Hostnames', configfile)
def clean_up_hostname(host, string):
if '/' in string:
string = string.replace('https://', '').replace('http://', '')
string = re.findall(r'([a-z-.]*\.[a-z]*)', string)[0]
hostnames.save(host, string)
if re.match(r'.*[A-Z].*', string):
hostnames.save(host, string.lower())
if not string:
print(u'Kein Hostname gesetzt: ' + host.upper() + ' (Seite wird ignoriert!)')
return string
set_hostnames = {}
list_names = ['sj', 'dj', 'sf', 'mb', 'hw', 'hs', 'fx', 'nk', 'fc']
for name in list_names:
hostname = clean_up_hostname(name, hostnames.get(name))
if hostname:
set_hostnames[name] = hostname
if not arguments['--testlauf'] and not set_hostnames:
print(u'Keine Hostnamen in der RSScrawler.ini gefunden! Beende RSScrawler!')
time.sleep(10)
sys.exit(1)
disable_request_warnings(InsecureRequestWarning)
if arguments['--testlauf']:
device = False
else:
if not os.path.exists(configfile):
if arguments['--docker']:
if arguments['--jd-user'] and arguments['--jd-pass']:
device = files.myjd_input(configfile, arguments['--port'], arguments['--jd-user'],
arguments['--jd-pass'], arguments['--jd-device'])
else:
device = False
else:
device = files.myjd_input(configfile, arguments['--port'], arguments['--jd-user'],
arguments['--jd-pass'],
arguments['--jd-device'])
else:
rsscrawler = RssConfig('RSScrawler', configfile)
user = rsscrawler.get('myjd_user')
password = rsscrawler.get('myjd_pass')
if user and password:
device = get_device(configfile)
if not device:
device = get_if_one_device(user, password)
if device:
print(u"Gerätename " + device + " automatisch ermittelt.")
rsscrawler.save('myjd_device', device)
device = get_device(configfile)
else:
device = files.myjd_input(configfile, arguments['--port'], arguments['--jd-user'],
arguments['--jd-pass'], arguments['--jd-device'])
if not device and not arguments['--testlauf']:
print(u'My JDownloader Zugangsdaten fehlerhaft! Beende RSScrawler!')
time.sleep(10)
sys.exit(1)
else:
print(u"Erfolgreich mit My JDownloader verbunden. Gerätename: " + device.name)
rsscrawler = RssConfig('RSScrawler', configfile)
port = int(rsscrawler.get("port"))
docker = False
if arguments['--docker']:
port = int('9090')
docker = True
elif arguments['--port']:
port = int(arguments['--port'])
if rsscrawler.get("prefix"):
prefix = '/' + rsscrawler.get("prefix")
else:
prefix = ''
local_address = 'http://' + common.check_ip() + ':' + str(port) + prefix
if not arguments['--docker']:
print(u'Der Webserver ist erreichbar unter ' + local_address)
if arguments['--keep-cdc']:
print(u"CDC-Tabelle nicht geleert!")
else:
RssDb(dbfile, 'cdc').reset()
p = multiprocessing.Process(target=web_server,
args=(port, local_address, docker, configfile, dbfile, log_level, log_file, log_format,
device))
p.start()
if not arguments['--testlauf']:
c = multiprocessing.Process(target=crawler,
args=(configfile, dbfile, device, rsscrawler, log_level, log_file, log_format))
c.start()
w = multiprocessing.Process(target=crawldog, args=(configfile, dbfile))
w.start()
print(u'Drücke [Strg] + [C] zum Beenden')
def signal_handler():
print(u'Beende RSScrawler...')
p.terminate()
c.terminate()
w.terminate()
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
try:
while True:
signal.pause()
except AttributeError:
while True:
time.sleep(1)
else:
crawler(configfile, dbfile, device, rsscrawler, log_level, log_file, log_format)
p.terminate()
sys.exit(0)
if __name__ == "__main__":
main()
```
#### File: RSScrawler/rsscrawler/fakefeed.py
```python
import json
import re
import feedparser
from bs4 import BeautifulSoup
from rsscrawler.common import rreplace
from rsscrawler.config import RssConfig
from rsscrawler.url import get_url
from rsscrawler.url import get_urls_async
class FakeFeedParserDict(dict):
def __getattr__(self, name):
if name in self:
return self[name]
else:
raise AttributeError("No such attribute: " + name)
def fx_content_to_soup(content):
content = BeautifulSoup(content, 'lxml')
return content
def fx_download_links(content, title, configfile):
hostnames = RssConfig('Hostnames', configfile)
fc = hostnames.get('fc').replace('www.', '').split('.')[0]
try:
try:
content = BeautifulSoup(content, 'lxml')
except:
content = BeautifulSoup(str(content), 'lxml')
try:
download_links = [content.find("a", text=re.compile(r".*" + title + r".*"))['href']]
except:
if not fc:
fc = '^unmatchable$'
print(u"FC Hostname nicht gesetzt. FX kann keine Links finden!")
download_links = re.findall(r'"(https://.+?' + fc + '.+?)"', str(content))
except:
return False
return download_links
def fx_feed_enricher(feed, configfile):
hostnames = RssConfig('Hostnames', configfile)
fc = hostnames.get('fc').replace('www.', '').split('.')[0]
if not fc:
fc = '^unmatchable$'
print(u"FC Hostname nicht gesetzt. FX kann keine Links finden!")
feed = BeautifulSoup(feed, 'lxml')
articles = feed.findAll("article")
entries = []
for article in articles:
try:
article = BeautifulSoup(str(article), 'lxml')
titles = article.findAll("a", href=re.compile(fc))
for title in titles:
title = title.text.encode("ascii", errors="ignore").decode().replace("/", "")
if title:
if "download" in title.lower():
try:
title = str(article.find("strong", text=re.compile(r".*Release.*")).nextSibling)
except:
continue
published = ""
dates = article.findAll("time")
for date in dates:
published = date["datetime"]
entries.append(FakeFeedParserDict({
"title": title,
"published": published,
"content": [
FakeFeedParserDict({
"value": str(article)
})]
}))
except:
print(u"FX hat den Feed angepasst. Parsen teilweise nicht möglich!")
continue
feed = {"entries": entries}
feed = FakeFeedParserDict(feed)
return feed
def fx_search_results(content, configfile, dbfile, scraper):
hostnames = RssConfig('Hostnames', configfile)
fc = hostnames.get('fc').replace('www.', '').split('.')[0]
if not fc:
fc = '^unmatchable$'
print(u"FC Hostname nicht gesetzt. FX kann keine Links finden!")
articles = content.find("main").find_all("article")
result_urls = []
for article in articles:
url = article.find("a")["href"]
if url:
result_urls.append(url)
items = []
if result_urls:
results = []
for url in result_urls:
results.append(get_url(url, configfile, dbfile, scraper))
for result in results:
article = BeautifulSoup(str(result), 'lxml')
titles = article.find_all("a", href=re.compile(fc))
for title in titles:
link = article.find("link", rel="canonical")["href"]
title = title.text.encode("ascii", errors="ignore").decode().replace("/", "")
if title:
if "download" in title.lower():
try:
title = str(content.find("strong", text=re.compile(r".*Release.*")).nextSibling)
except:
continue
items.append([title, link + "|" + title])
return items
def hs_feed_enricher(feed, configfile, dbfile, scraper):
feed = feedparser.parse(feed)
async_results = []
for post in feed.entries:
try:
async_results.append(post.links[0].href)
except:
pass
async_results = get_urls_async(async_results, configfile, dbfile, scraper)[0]
entries = []
if async_results:
for result in async_results:
try:
content = []
details = result
title = BeautifulSoup(details, 'lxml').find("h2", {"class": "entry-title"}).text
published = BeautifulSoup(details, 'lxml').find("p", {"class": "blog-post-meta"}).contents[0]
data = BeautifulSoup(details, 'lxml').find("div", {"class": "entry-content"}).contents[2]
content.append(str(data).replace("\n", ""))
content = "".join(content)
entries.append(FakeFeedParserDict({
"title": title,
"published": published,
"content": [FakeFeedParserDict({
"value": content})]
}))
except:
pass
feed = {"entries": entries}
feed = FakeFeedParserDict(feed)
return feed
def hs_search_results(url):
content = []
search = BeautifulSoup(url, 'lxml')
if search:
results = search.find_all("item")
if results:
for r in results:
try:
title = r.title.next
link = r.find("comments").text
content.append((title, link))
except:
break
return content
def hs_search_to_feedparser_dict(beautifulsoup_object_list):
entries = []
for beautifulsoup_object in beautifulsoup_object_list:
title = beautifulsoup_object["key"]
# TODO: this is entirely broken
item_head = beautifulsoup_object["value"].find_all("p", {"class": "blog-post-meta"})
item_download = beautifulsoup_object["value"].find_all("div", {"class": "entry-content"})
i = 0
for item in item_head:
contents = item_download[i].contents
published = item.contents[0]
content = []
data = contents[2]
content.append(str(data).replace("\n", ""))
content = "".join(content)
entries.append(FakeFeedParserDict({
"title": title,
"published": published,
"content": [FakeFeedParserDict({
"value": content})]
}))
i += 1
feed = {"entries": entries}
feed = FakeFeedParserDict(feed)
return feed
def hs_search_to_soup(url, configfile, dbfile, scraper):
content = []
search = BeautifulSoup(get_url(url, configfile, dbfile, scraper), 'lxml')
if search:
results = search.find_all("item")
if results:
async_results = []
for r in results:
try:
async_results.append(r.link.next)
except:
pass
async_results = get_urls_async(async_results, configfile, dbfile, scraper)[0]
# TODO: This is a bug, if async results is ordered differently than results
i = 0
for r in results:
try:
title = r.title.next
details = BeautifulSoup(async_results[i], 'lxml')
content.append({
"key": title,
"value": details
})
except:
pass
i += 1
return hs_search_to_feedparser_dict(content)
def nk_feed_enricher(content, base_url, configfile, dbfile, scraper):
content = BeautifulSoup(content, 'lxml')
posts = content.findAll("a", {"class": "btn"}, href=re.compile("/release/"))
async_results = []
for post in posts:
try:
async_results.append(base_url + post['href'])
except:
pass
async_results = get_urls_async(async_results, configfile, dbfile, scraper)[0]
entries = []
if async_results:
for result in async_results:
try:
content = []
details = BeautifulSoup(result, 'lxml').find("div", {"class": "article"})
title = details.find("span", {"class": "subtitle"}).text
published = details.find("p", {"class": "meta"}).text
content.append("mkv ")
try:
imdb = details.find("a", href=re.compile("imdb.com"))["href"]
content.append('<a href="' + imdb + '" 9,9</a>')
except:
pass
links = details.find_all("a", href=re.compile("/go/"))
for link in links:
content.append('href="' + base_url + link["href"] + '"' + link.text + '<')
content = "".join(content)
entries.append(FakeFeedParserDict({
"title": title,
"published": published,
"content": [FakeFeedParserDict({
"value": content})]
}))
except:
pass
feed = {"entries": entries}
feed = FakeFeedParserDict(feed)
return feed
def nk_search_results(content, base_url):
content = BeautifulSoup(content, 'lxml')
posts = content.findAll("a", {"class": "btn"}, href=re.compile("/release/"))
results = []
for post in posts:
try:
title = post.parent.parent.parent.find("span", {"class": "subtitle"}).text
link = base_url + post['href']
results.append([title, link])
except:
pass
return results
def j_releases_to_feedparser_dict(releases, list_type, base_url, check_seasons_or_episodes):
releases = json.loads(releases)
entries = []
for release in releases:
if check_seasons_or_episodes:
try:
if list_type == 'seasons' and release['episode']:
continue
elif list_type == 'episodes' and not release['episode']:
continue
except:
continue
title = release['name']
series_url = base_url + '/serie/' + release["_media"]['slug']
published = release['createdAt']
entries.append(FakeFeedParserDict({
"title": title,
"series_url": series_url,
"published": published
}))
feed = {"entries": entries}
feed = FakeFeedParserDict(feed)
return feed
def sf_releases_to_feedparser_dict(releases, list_type, base_url, check_seasons_or_episodes):
content = BeautifulSoup(releases, 'lxml')
releases = content.findAll("div", {"class": "row"}, style=re.compile("order"))
entries = []
for release in releases:
a = release.find("a", href=re.compile("/"))
title = a.text
is_episode = re.match(r'.*(S\d{1,3}E\d{1,3}).*', title)
if check_seasons_or_episodes:
try:
if list_type == 'seasons' and is_episode:
continue
elif list_type == 'episodes' and not is_episode:
continue
except:
continue
series_url = rreplace(base_url + '/api/v1' + a['href'], '/', '/season/', 1)
published = release.find("div", {"class": "datime"}).text
entries.append(FakeFeedParserDict({
"title": title,
"series_url": series_url,
"published": published
}))
feed = {"entries": entries}
feed = FakeFeedParserDict(feed)
return feed
```
#### File: RSScrawler/rsscrawler/files.py
```python
import os
import sys
from rsscrawler.config import RssConfig
from rsscrawler.myjd import get_device
from rsscrawler.myjd import get_if_one_device
def config(configpath):
configfile = "RSScrawler.conf"
if configpath:
f = open(configfile, "w")
f.write(configpath)
f.close()
elif os.path.exists(configfile):
f = open(configfile, "r")
configpath = f.readline()
else:
print(u"Wo sollen Einstellungen und Logs abgelegt werden? Leer lassen, um den aktuellen Pfad zu nutzen.")
configpath = input("Pfad angeben:")
if len(configpath) > 0:
f = open(configfile, "w")
f.write(configpath)
f.close()
if len(configpath) == 0:
configpath = os.path.dirname(sys.argv[0])
configpath = configpath.replace("\\", "/")
configpath = configpath[:-1] if configpath.endswith('/') else configpath
f = open(configfile, "w")
f.write(configpath)
f.close()
configpath = configpath.replace("\\", "/")
configpath = configpath[:-1] if configpath.endswith('/') else configpath
if not os.path.exists(configpath):
os.makedirs(configpath)
return configpath
def myjd_input(configfile, port, user, password, device):
if user and password and device:
print(u"Zugangsdaten aus den Parametern übernommen.")
elif user and password and not device:
device = get_if_one_device(user, password)
if device:
print(u"Gerätename " + device + " automatisch ermittelt.")
else:
print(u"Bitte die Zugangsdaten für My JDownloader angeben:")
user = input("Nutzername/Email:")
password = input("<PASSWORD>:")
device = get_if_one_device(user, password)
if device:
print(u"Gerätename " + device + " automatisch ermittelt.")
else:
device = input(u"Gerätename:")
if not port:
port = '9090'
sections = ['RSScrawler', 'MB', 'SJ', 'DD', 'Notifications', 'Crawljobs']
for section in sections:
RssConfig(section, configfile)
if port:
RssConfig('RSScrawler', configfile).save("port", port)
RssConfig('RSScrawler', configfile).save("myjd_user", user)
RssConfig('RSScrawler', configfile).save("myjd_pass", password)
RssConfig('RSScrawler', configfile).save("myjd_device", device)
device = get_device(configfile)
if device:
return device
else:
return False
```
#### File: RSScrawler/rsscrawler/ombi.py
```python
import json
import re
import requests
from bs4 import BeautifulSoup
from rsscrawler import search
from rsscrawler.common import decode_base64
from rsscrawler.common import encode_base64
from rsscrawler.common import sanitize
from rsscrawler.config import RssConfig
from rsscrawler.db import RssDb
from rsscrawler.url import get_url_headers
def get_imdb(url, configfile, dbfile, scraper):
result = get_url_headers(url, configfile, dbfile,
scraper=scraper,
headers={'Accept-Language': 'de'}
)
output = result[0].text
scraper = result[1]
return output, scraper
def get_title(input):
try:
raw_title = re.findall(
r"<title>(.*) \((?:.*(?:19|20)\d{2})\) - IMDb</title>", input)[0]
except:
raw_title = re.findall(r'<meta name="title" content="(.*) \((?:.*(?:19|20)\d{2}).*\) - IMDb"', input)[0]
return sanitize(raw_title)
def get_year(input):
try:
raw_year = re.findall(r"<title>(?:.*) \((.*(?:19|20)\d{2})\) - IMDb</title>", input)[0]
except:
raw_year = re.findall(r'<meta name="title" content="(?:.*) \((.*(?:19|20)\d{2}).*\) - IMDb"', input)[0]
return sanitize(raw_year)
def imdb_movie(imdb_id, configfile, dbfile, scraper):
try:
result = get_imdb('https://www.imdb.com/title/' +
imdb_id, configfile, dbfile, scraper)
output = result[0]
scraper = result[1]
title = get_title(output)
year = get_year(output)
return title + " " + year, scraper
except:
print(u"[Ombi] - Fehler beim Abruf der IMDb für: " + imdb_id)
return False, False
def imdb_show(ombi_imdb_id, configfile, dbfile, scraper):
try:
result = get_imdb('https://www.imdb.com/title/' + ombi_imdb_id, configfile, dbfile, scraper)
output = result[0]
scraper = result[1]
title = get_title(output)
eps = {}
soup = BeautifulSoup(output, 'lxml')
imdb_id = soup.find_all("meta", property="pageId")[0]["content"]
seasons = soup.find_all("a", href=re.compile(r'.*/title/' + imdb_id + r'/episodes\?season=.*'))
if not seasons:
episode_guide = soup.find_all("a", {"class": "np_episode_guide"})[0]["href"]
result = get_imdb("https://www.imdb.com/" + episode_guide, configfile, dbfile, scraper)
output = result[0]
scraper = result[1]
soup = BeautifulSoup(output, 'lxml')
imdb_id = soup.find_all("meta", property="pageId")[0]["content"]
seasons = soup.find_all("a", href=re.compile(r'.*/title/' + imdb_id + r'/episodes\?season=.*'))
latest_season = int(seasons[0].text)
total_seasons = list(range(1, latest_season + 1))
for sn in total_seasons:
result = get_imdb("https://www.imdb.com/title/" + imdb_id + "/episodes?season=" + str(sn), configfile,
dbfile, scraper)
output = result[0]
scraper = result[1]
ep = []
soup = BeautifulSoup(output, 'lxml')
episodes = soup.find_all("meta", itemprop="episodeNumber")
for e in episodes:
ep.append(int(e['content']))
eps[sn] = ep
return title, eps, scraper
except:
print(u"[Ombi] - Fehler beim Abruf der IMDb für: " + ombi_imdb_id)
return False, False, False
def generate_reg_title(title, counter, quality):
title = title.replace(':', '').replace(' -', '').replace('!', '').replace(
' ', '.').replace("'", '').replace('(', '').replace(')', '')
title += '\..*.'
title += counter
title += '\..*.'
title += quality
title += '.*'
return title
def generate_api_title(title, counter):
title = title.replace(':', '').replace(' -', '').replace('!', '').replace(
' ', '.').replace("'", '').replace('(', '').replace(')', '')
title += ','
title += counter
return title
def ombi(configfile, dbfile, device, log_debug, first_launch):
db = RssDb(dbfile, 'Ombi')
# Liste der aktive Filmsuchen
mbstlist = RssDb(dbfile, 'MB_Staffeln')
mblist = RssDb(dbfile, 'MB_Filme')
sjlist = RssDb(dbfile, 'SJ_Serien')
# Regex Serien für eine bessere suche
sjregexdb = RssDb(dbfile, 'SJ_Serien_Regex')
mbregexdb = RssDb(dbfile, 'MB_Regex')
# Settings for Regex search
sjfilter = RssConfig('SJ', configfile)
sjquality = sjfilter.get('quality')
sjquality = sjquality[:-1]
sjregex = sjfilter.get('regex')
mbfilter = RssConfig('MB', configfile)
mbquality = mbfilter.get('seasonsquality')
mbquality = mbquality[:-1]
mbregex = mbfilter.get('regex')
mbseasons = mbfilter.get('seasonpacks')
config = RssConfig('Ombi', configfile)
url = config.get('url')
api = config.get('api')
if not url or not api:
return [device, [0, 0]]
english = RssConfig('RSScrawler', configfile).get('english')
try:
requested_movies = requests.get(
url + '/api/v1/Request/movie', headers={'ApiKey': api})
requested_movies = json.loads(requested_movies.text)
requested_shows = requests.get(
url + '/api/v1/Request/tv', headers={'ApiKey': api})
requested_shows = json.loads(requested_shows.text)
len_movies = len(requested_movies)
len_shows = len(requested_shows)
if first_launch:
log_debug("Erfolgreich mit Ombi verbunden.")
print(u"Erfolgreich mit Ombi verbunden.")
except:
log_debug("Ombi ist nicht erreichbar!")
print(u"Ombi ist nicht erreichbar!")
return [False, [0, 0]]
scraper = False
for r in requested_movies:
if bool(r.get("approved")):
imdb_id = r.get("imdbId")
# Title aus ombi entnehmen und sonderzeichen entfernen
movie_tit = r.get("title")
movie_tit = movie_tit.replace(':', '').replace(
' -', '').replace(' ', '.')
if not bool(r.get("available")):
# Neue Struktur der DB
if db.retrieve('movie_' + str(imdb_id)) == 'added':
db.delete('movie_' + str(imdb_id))
db.store('movie_' + str(imdb_id), 'search')
elif not db.retrieve('movie_' + str(imdb_id)) == 'search':
response = imdb_movie(imdb_id, configfile, dbfile, scraper)
title = response[0]
if title:
scraper = response[1]
best_result = search.best_result_bl(
title, configfile, dbfile)
print(u"Film: " + title + u" durch Ombi hinzugefügt.")
if best_result:
search.download_bl(
best_result, device, configfile, dbfile)
if english:
title = r.get('title')
best_result = search.best_result_bl(
title, configfile, dbfile)
print(u"Film: " + title +
u"durch Ombi hinzugefügt.")
if best_result:
search.download_bl(
best_result, device, configfile, dbfile)
db.store('movie_' + str(imdb_id), 'search')
else:
log_debug(
"Titel für IMDB-ID nicht abrufbar: " + imdb_id)
elif bool(r.get("available")):
# Migration der vorhandenen von added nach available zum angleichen an die neue DB-values
if db.retrieve('movie_' + str(imdb_id)) == 'added':
db.delete('movie_' + str(imdb_id))
db.store('movie_' + str(imdb_id), 'available')
if db.retrieve('movie_' + str(imdb_id)) == 'search':
db.delete('movie_' + str(imdb_id))
db.store('movie_' + str(imdb_id), 'available')
if not db.retrieve('movie_' + str(imdb_id)) == 'available':
db.store('movie_' + str(imdb_id), 'available')
if mblist.retrieve_key(str(movie_tit)):
mblist.delete(str(movie_tit))
for r in requested_shows:
imdb_id = r.get("imdbId")
show_tit = r.get("title")
show_tit = show_tit.replace(':', '').replace(
' -', '').replace('!', '').replace('(', '').replace(')', '').replace("'", '')
infos = None
child_requests = r.get("childRequests")
for cr in child_requests:
if bool(cr.get("approved")):
if not bool(cr.get("available")):
details = cr.get("seasonRequests")
for season in details:
# counter for episodes
searchepisodes = 0
sn = season.get("seasonNumber")
eps = []
episodes = season.get("episodes")
s = str(sn)
if len(s) == 1:
s = "0" + s
s = "S" + s
show_tits = generate_reg_title(
show_tit, s, sjquality)
mbshow_tits = generate_reg_title(
show_tit, s, mbquality)
for episode in episodes:
if not bool(episode.get("available")):
searchepisodes += 1
enr = episode.get("episodeNumber")
e = str(enr)
if len(e) == 1:
e = "0" + e
se = s + "E" + e
if db.retrieve('show_' + str(imdb_id) + '_' + se) == 'added':
db.delete(
'show_' + str(imdb_id) + '_' + se)
db.store('show_' + str(imdb_id) +
'_' + se, 'search')
eps.append(enr)
elif not db.retrieve('show_' + str(imdb_id) + '_' + se) == 'search':
db.store('show_' + str(imdb_id) +
'_' + se, 'search')
eps.append(enr)
if db.retrieve('show_' + str(imdb_id) + '_' + se) == 'search':
show_titse = generate_reg_title(
show_tit, se, sjquality)
show_tit_search = generate_api_title(
show_tit, s)
if sjregex == True:
if not sjregexdb.retrieve_key(show_titse):
sjregexdb.store_key(show_titse)
print(u"Episode " + show_titse +
u" zu Regex hinzugefuegt.")
elif bool(episode.get("available")):
enr = episode.get("episodeNumber")
e = str(enr)
if len(e) == 1:
e = "0" + e
se = s + "E" + e
if db.retrieve('show_' + str(imdb_id) + '_' + se) == 'added':
db.delete(
'show_' + str(imdb_id) + '_' + se)
db.store('show_' + str(imdb_id) +
'_' + se, 'available')
elif db.retrieve('show_' + str(imdb_id) + '_' + se) == 'search':
db.delete(
'show_' + str(imdb_id) + '_' + se)
db.store('show_' + str(imdb_id) +
'_' + se, 'available')
elif not db.retrieve('show_' + str(imdb_id) + '_' + se) == 'available':
db.store('show_' + str(imdb_id) +
'_' + se, 'available')
if db.retrieve('show_' + str(imdb_id) + '_' + se) == 'available':
show_titse = generate_reg_title(
show_tit, se, sjquality)
if sjregex == True:
if sjregexdb.retrieve_key(show_titse):
sjregexdb.delete(show_titse)
print(u"Episode " + show_titse +
u" von Regex entfernt.")
if searchepisodes < 2:
if sjregex == True:
if sjregexdb.retrieve_key(show_tits):
sjregexdb.delete(show_tits)
print(u"Staffel " + show_tits +
u" von SJ Regex entfernt.")
if mbregex == True and mbseasons == True:
if mbregexdb.retrieve_key(mbshow_tits):
mbregexdb.delete(mbshow_tits)
print(u"Staffel " + mbshow_tits +
u" von MB Regex entfernt.")
if mbstlist.retrieve_key(str(show_tit)):
mbstlist.delete(str(show_tit))
if sjlist.retrieve_key(str(show_tit)):
sjlist.delete(str(show_tit))
elif searchepisodes > 3:
if sjregex == True:
if not sjregexdb.retrieve_key(show_tits):
sjregexdb.store_key(show_tits)
print(u"Staffel " + show_tits +
u" zu SJ-Regex hinzugefuegt.")
if mbregex == True and mbseasons == True:
if not mbregexdb.retrieve_key(mbshow_tits):
mbregexdb.store_key(mbshow_tits)
print(u"Staffel " + mbshow_tits +
u" zu MB-Regex hinzugefuegt.")
searchepisodes = 0
if eps:
if not infos:
infos = imdb_show(
imdb_id, configfile, dbfile, scraper)
if infos:
title = infos[0]
all_eps = infos[1]
scraper = infos[2]
check_sn = False
if all_eps:
check_sn = all_eps.get(sn)
if check_sn:
sn_length = len(eps)
check_sn_length = len(check_sn)
if check_sn_length > sn_length:
for ep in eps:
e = str(ep)
if len(e) == 1:
e = "0" + e
se = s + "E" + e
payload = search.best_result_sj(
title, configfile, dbfile)
if payload:
payload = decode_base64(
payload).split("|")
payload = encode_base64(
payload[0] + "|" + payload[1] + "|" + se)
added_episode = search.download_sj(
payload, configfile, dbfile)
if not added_episode:
payload = decode_base64(
payload).split("|")
payload = encode_base64(
payload[0] + "|" + payload[1] + "|" + s)
add_season = search.download_sj(
payload, configfile, dbfile)
for e in eps:
e = str(e)
if len(e) == 1:
e = "0" + e
se = s + "E" + e
if db.retrieve('show_' + str(imdb_id) + '_' + se) == 'added':
db.delete(
'show_' + str(imdb_id) + '_' + se)
db.store(
'show_' + str(imdb_id) + '_' + se, 'search')
elif not db.retrieve('show_' + str(imdb_id) + '_' + se) == 'search':
db.store(
'show_' + str(imdb_id) + '_' + se, 'search')
if not add_season:
log_debug(
u"Konnte kein Release für " + title + " " + se + "finden.")
break
if not db.retrieve('show_' + str(imdb_id) + '_' + se) == 'search':
db.store(
'show_' + str(imdb_id) + '_' + se, 'search')
else:
payload = search.best_result_sj(
title, configfile, dbfile)
if payload:
payload = decode_base64(
payload).split("|")
payload = encode_base64(
payload[0] + "|" + payload[1] + "|" + s)
search.download_sj(
payload, configfile, dbfile)
for ep in eps:
e = str(ep)
if len(e) == 1:
e = "0" + e
se = s + "E" + e
if db.retrieve('show_' + str(imdb_id) + '_' + se) == 'added':
db.delete(
'show_' + str(imdb_id) + '_' + se)
db.store(
'show_' + str(imdb_id) + '_' + se, 'search')
elif not db.retrieve('show_' + str(imdb_id) + '_' + se) == 'search':
db.store(
'show_' + str(imdb_id) + '_' + se, 'search')
print(u"Serie/Staffel/Episode: " +
title + u" durch Ombi hinzugefügt.")
else:
details = cr.get("seasonRequests")
for season in details:
searchepisodes = 0
sn = season.get("seasonNumber")
eps = []
episodes = season.get("episodes")
s = str(sn)
if len(s) == 1:
s = "0" + s
s = "S" + s
show_tits = generate_reg_title(
show_tit, s, sjquality)
mbshow_tits = generate_reg_title(
show_tit, s, mbquality)
for episode in episodes:
# Datenbank erweiterung ok
if bool(episode.get("available")):
searchepisodes += 1
enr = episode.get("episodeNumber")
e = str(enr)
if len(e) == 1:
e = "0" + e
se = s + "E" + e
if db.retrieve('show_' + str(imdb_id) + '_' + se) == 'added':
db.delete(
'show_' + str(imdb_id) + '_' + se)
db.store('show_' + str(imdb_id) +
'_' + se, 'available')
elif db.retrieve('show_' + str(imdb_id) + '_' + se) == 'search':
db.delete(
'show_' + str(imdb_id) + '_' + se)
db.store('show_' + str(imdb_id) +
'_' + se, 'available')
elif not db.retrieve('show_' + str(imdb_id) + '_' + se) == 'available':
db.store('show_' + str(imdb_id) +
'_' + se, 'available')
if db.retrieve('show_' + str(imdb_id) + '_' + se) == 'available':
show_titse = generate_reg_title(
show_tit, se, sjquality)
if sjregex == True:
if sjregexdb.retrieve_key(show_titse):
sjregexdb.delete(show_titse)
print(u"Episode " + show_titse +
u" von Regex entfernt.")
if searchepisodes > 3:
if sjregex == True:
if sjregexdb.retrieve_key(show_tits):
sjregexdb.delete(show_tits)
print(u"Staffel " + show_tits +
u" von SJ Regex entfernt.")
if mbregex == True and mbseasons == True:
if mbregexdb.retrieve_key(mbshow_tits):
mbregexdb.delete(mbshow_tits)
print(u"Staffel " + mbshow_tits +
u" von MB Regex entfernt.")
if mbstlist.retrieve_key(str(show_tit)):
mbstlist.delete(str(show_tit))
if sjlist.retrieve_key(str(show_tit)):
sjlist.delete(str(show_tit))
elif searchepisodes < 2:
if sjregex == True:
if not sjregexdb.retrieve_key(show_tits):
sjregexdb.store_key(show_tits)
print(u"Staffel " + show_tits +
u" zu SJ-Regex hinzugefuegt.")
if mbregex == True and mbseasons == True:
if not mbregexdb.retrieve_key(mbshow_tits):
mbregexdb.store_key(mbshow_tits)
print(u"Staffel " + mbshow_tits +
u" zu MB-Regex hinzugefuegt.")
return [device, [len_movies, len_shows]]
``` |
{
"source": "9mat/wiki-link",
"score": 3
} |
#### File: wiki-link/wikilink/wiki_link.py
```python
from re import compile
from requests import get, HTTPError
from bs4 import BeautifulSoup
from wikilink.db.connection import Connection
from wikilink.db.page import Page
from wikilink.db.link import Link
class WikiLink:
def __init__(self):
pass
def setup_db(self, db, name, password, ip, port):
"""Setting up database
Args:
db(str): Database engine, currently support "mysql" and "postgresql"
name(str): database username
password(str): database password
ip(str): IP address of database
port(str): port that databse is running on
Returns:
None
"""
self.db = Connection(db, name, password, ip, port)
def min_link(self, source_url, dest_url, limit=6):
"""return minimum number of link
Args:
db(str): Database engine, currently support "mysql" and "postgresql"
name(str): database username
password(str): database password
ip(str, optional): IP address of database. Default to "127.0.0.1"
port(str): port that databse is running on
Returns:
int: minimum number of sepration between startinga nd ending urls
"""
# update page for both starting and ending url
source_id = self.insert_url(source_url.split("/wiki/")[-1])
dest_id = self.insert_url(dest_url.split("/wiki/")[-1])
separation = self.db.session.query(Link.number_of_separation).filter(Link.from_page_id == source_id, \
Link.to_page_id == dest_id).all()
# check if the link already exists
if str(separation) is not None and len(separation) != 0:
return separation[0][0]
number_of_separation = 0
queue = [source_id]
already_seen = set(queue)
while number_of_separation <= limit and len(queue) > 0:
number_of_separation += 1
temporary_queue = queue
queue = []
# find outbound links from current url
for url_id in temporary_queue:
self.update_url(url_id)
neighbors = self.db.session.query(Link).filter(Link.from_page_id == url_id, \
Link.number_of_separation == 1).all()
for n in neighbors:
if n.to_page_id == dest_id:
self.insert_link(source_id, dest_id, number_of_separation)
return number_of_separation
if n.to_page_id not in already_seen:
already_seen.add(n.to_page_id)
queue.append(n.to_page_id)
if number_of_separation > limit:
print("No solution within limit! Consider to increase the limit.")
return
else:
print("there is no path from {} to {}".format(starting_url, ending_url))
def update_url(self, url_id):
""" Scrap urls from given url id and insert into database
Args:
starting_id: the stripped starting url
ending_id: the stripped ending url
number_of_separation:
Returns:
None
Raises:
HTTPError: if An HTTP error occurred
"""
# retrieve url from id
url = self.db.session.query(Page.url).filter(Page.id == url_id).first()
# handle exception where page not found or server down or url mistyped
try:
response = get('https://en.wikipedia.org/wiki/' + str(url[0]))
html = response.text
except HTTPError:
return
else:
if html is None:
return
else:
soup = BeautifulSoup(html, "html.parser")
# update all wiki links with tag 'a' and attribute 'href' start with '/wiki/'
# (?!...) : match if ... does not match next
links = soup.findAll("a", href=compile("(/wiki/)((?!:).)*$"))
for link in links:
# only insert link starting with /wiki/ and update Page if not exist
inserted_url = link.attrs['href'].split("/wiki/")[-1]
inserted_id = self.insert_url(inserted_url)
# update links table with starting page if it not exists
self.insert_link(url_id, inserted_id, 1)
def insert_url(self, url):
""" insert into table Page if not exist and return the url id
Args:
url(str): wiki url to update
Returns:
int: url id
"""
page_list = self.db.session.query(Page).filter(Page.url == url).all()
if len(page_list) == 0:
page = Page(url=url)
self.db.session.add(page)
self.db.session.commit()
url_id = self.db.session.query(Page.id).filter(Page.url == url).all()[0][0]
self.insert_link(url_id, url_id, 0)
return url_id
else:
return self.db.session.query(Page.id).filter(Page.url == url).all()[0][0]
def insert_link(self, from_page_id, to_page_id, no_of_separation):
""" insert link into database if link is not existed
Args:
from_page_id: id of "from" page
to_page_id: id of "to" page
no_of_separation:
Returns:
None
"""
link_between_2_pages = self.db.session.query(Link).filter(Link.from_page_id == from_page_id,
Link.to_page_id == to_page_id).all()
if len(link_between_2_pages) == 0:
link = Link(from_page_id=from_page_id, to_page_id=to_page_id, number_of_separation=no_of_separation)
self.db.session.add(link)
self.db.session.commit()
``` |
{
"source": "9names/bl_iot_sdk_autoformatted",
"score": 2
} |
#### File: bl_iot_sdk_autoformatted/image_conf/flash_build.py
```python
import os
import sys
import fdt
import struct
import shutil
import binascii
import hashlib
import lzma
import toml
import itertools
import bootheader_cfg_keys as B_CFG_KEYS
import efuse_cfg_keys as E_CFG_KEYS
from configobj import ConfigObj
from Cryptodome.Hash import SHA256
app_path = ""
chip_name = ""
bl_factory_params_file_prefix = "bl_factory_params_"
bin_build_out_path = "build_out"
default_conf_path = ""
efuse_mask_file = ""
efuse_file = ""
eflash_loader_cfg_org = ""
eflash_loader_cfg = ""
dict_xtal = {"24M": 1, "32M": "2", "38.4M": "3", "40M": "4", "26M": "5", "RC32M": "6"}
def bl_find_file_list(key_val, endswith):
file_path_list = []
conf_path = os.path.join(app_path, "img_conf")
if os.path.exists(conf_path):
files = os.listdir(conf_path)
for f in files:
if key_val in f and f.endswith(endswith):
find_file = os.path.join(conf_path, f)
file_path_list.append(find_file)
# return find_file
if file_path_list != []:
return file_path_list
conf_path = os.path.join(os.path.abspath(".."), "image_conf", default_conf_path)
files = os.listdir(conf_path)
for f in files:
if key_val in f and f.endswith(endswith):
find_file = os.path.join(conf_path, f)
file_path_list.append(find_file)
# return find_file
return file_path_list
def bl_find_file(key_val, endswith):
conf_path = os.path.join(app_path, "img_conf")
if os.path.exists(conf_path):
files = os.listdir(conf_path)
for f in files:
if key_val in f and f.endswith(endswith):
find_file = os.path.join(conf_path, f)
return find_file
conf_path = os.path.join(os.path.abspath(".."), "image_conf", default_conf_path)
files = os.listdir(conf_path)
for f in files:
if key_val in f and f.endswith(endswith):
find_file = os.path.join(conf_path, f)
return find_file
class bl_efuse_boothd_gen:
def __init__(self):
self.utils = bl_utils()
def bootheader_update_flash_pll_crc(self, bootheader_data):
flash_cfg_start = 8
flash_cfg_len = 4 + 84 + 4
# magic+......+CRC32
flash_cfg = bootheader_data[
flash_cfg_start + 4 : flash_cfg_start + flash_cfg_len - 4
]
crcarray = self.utils.get_crc32_bytearray(flash_cfg)
bootheader_data[
flash_cfg_start + flash_cfg_len - 4 : flash_cfg_start + flash_cfg_len
] = crcarray
pll_cfg_start = flash_cfg_start + flash_cfg_len
pll_cfg_len = 4 + 8 + 4
# magic+......+CRC32
pll_cfg = bootheader_data[pll_cfg_start + 4 : pll_cfg_start + pll_cfg_len - 4]
crcarray = self.utils.get_crc32_bytearray(pll_cfg)
bootheader_data[
pll_cfg_start + pll_cfg_len - 4 : pll_cfg_start + pll_cfg_len
] = crcarray
return bootheader_data
def get_int_mask(self, pos, length):
ones = "1" * 32
zeros = "0" * 32
mask = ones[0 : 32 - pos - length] + zeros[0:length] + ones[0:pos]
return int(mask, 2)
def update_data_from_cfg(self, config_keys, config_file, section):
cfg = BFConfigParser()
cfg.read(config_file)
# get finally data len
filelen = 0
for key in config_keys:
offset = int(config_keys.get(key)["offset"], 10)
if offset > filelen:
filelen = offset
filelen += 4
data = bytearray(filelen)
data_mask = bytearray(filelen)
for key in cfg.options(section):
if config_keys.get(key) == None:
print(key + " not exist")
continue
val = cfg.get(section, key)
if val.startswith("0x"):
val = int(val, 16)
else:
val = int(val, 10)
offset = int(config_keys.get(key)["offset"], 10)
pos = int(config_keys.get(key)["pos"], 10)
bitlen = int(config_keys.get(key)["bitlen"], 10)
oldval = self.utils.bytearray_to_int(
self.utils.bytearray_reverse(data[offset : offset + 4])
)
oldval_mask = self.utils.bytearray_to_int(
self.utils.bytearray_reverse(data_mask[offset : offset + 4])
)
newval = (oldval & self.get_int_mask(pos, bitlen)) + (val << pos)
if val != 0:
newval_mask = oldval_mask | (~self.get_int_mask(pos, bitlen))
else:
newval_mask = oldval_mask
data[offset : offset + 4] = self.utils.int_to_4bytearray_l(newval)
data_mask[offset : offset + 4] = self.utils.int_to_4bytearray_l(newval_mask)
return data, data_mask
def bootheader_create_do(
self, chipname, chiptype, config_file, section, output_file=None, if_img=False
):
efuse_bootheader_path = os.path.join(app_path, bin_build_out_path)
try:
# sub_module = __import__("bootheader_cfg_keys", fromlist=[chiptype])
bh_data, tmp = self.update_data_from_cfg(
B_CFG_KEYS.bootheader_cfg_keys, config_file, section
)
# bh_data, tmp = self.update_data_from_cfg(sub_module.bootheader_cfg_keys, config_file, section)
bh_data = self.bootheader_update_flash_pll_crc(bh_data)
if output_file == None:
fp = open(
efuse_bootheader_path
+ "/"
+ section.lower().replace("_cfg", ".bin"),
"wb+",
)
else:
fp = open(output_file, "wb+")
if section == "BOOTHEADER_CFG" and chiptype == "bl60x":
final_data = bytearray(8 * 1024)
# add sp core feature
# halt
bh_data[118] = bh_data[118] | (1 << 2)
final_data[0:176] = bh_data
final_data[4096 + 0 : 4096 + 176] = bh_data
# change magic
final_data[4096 + 2] = 65
# change waydis to 0xf
final_data[117] = final_data[117] | (15 << 4)
# change crc and hash ignore
final_data[4096 + 118] = final_data[4096 + 118] | 0x03
bh_data = final_data
if if_img == True:
# clear flash magic
bh_data[8:12] = bytearray(4)
# clear clock magic
bh_data[100:104] = bytearray(4)
fp.write(bh_data[0:176])
else:
fp.write(bh_data)
fp.close()
fp = open(efuse_bootheader_path + "/flash_para.bin", "wb+")
fp.write(bh_data[12 : 12 + 84])
fp.close()
except Exception as err:
print("bootheader_create_do fail!!")
print(err)
traceback.print_exc(limit=5, file=sys.stdout)
def bootheader_create_process(
self,
chipname,
chiptype,
config_file,
output_file1=None,
output_file2=None,
if_img=False,
):
fp = open(config_file, "r")
data = fp.read()
fp.close()
if "BOOTHEADER_CFG" in data:
self.bootheader_create_do(
chipname, chiptype, config_file, "BOOTHEADER_CFG", output_file1, if_img
)
if "BOOTHEADER_CPU0_CFG" in data:
self.bootheader_create_do(
chipname,
chiptype,
config_file,
"BOOTHEADER_CPU0_CFG",
output_file1,
if_img,
)
if "BOOTHEADER_CPU1_CFG" in data:
self.bootheader_create_do(
chipname,
chiptype,
config_file,
"BOOTHEADER_CPU1_CFG",
output_file2,
if_img,
)
def efuse_create_process(self, chipname, chiptype, config_file, output_file=None):
efuse_file = os.path.join(app_path, bin_build_out_path, "efusedata.bin")
# sub_module = __import__("efuse_cfg_keys.py", fromlist=[chiptype])
efuse_data, mask = self.update_data_from_cfg(
E_CFG_KEYS.efuse_cfg_keys, config_file, "EFUSE_CFG"
)
# efuse_data, mask = self.update_data_from_cfg(sub_module.efuse_cfg_keys, config_file, "EFUSE_CFG")
if output_file == None:
fp = open(efuse_file, "wb+")
else:
fp = open(output_file, "wb+")
fp.write(efuse_data)
fp.close()
efuse_mask_file = os.path.join(
app_path, bin_build_out_path, "efusedata_mask.bin"
)
if output_file == None:
fp = open(efuse_mask_file, "wb+")
else:
fp = open(output_file.replace(".bin", "_mask.bin"), "wb+")
fp.write(mask)
fp.close()
def efuse_boothd_create_process(self, chipname, chiptype, config_file):
self.bootheader_create_process(chipname, chiptype, config_file)
self.efuse_create_process(chipname, chiptype, config_file)
class bl_utils:
# 12345678->0x12,0x34,0x56,0x78
def hexstr_to_bytearray_b(self, hexstring):
return bytearray.fromhex(hexstring)
def hexstr_to_bytearray(self, hexstring):
return bytearray.fromhex(hexstring)
def hexstr_to_bytearray_l(self, hexstring):
b = bytearray.fromhex(hexstring)
b.reverse()
return b
def int_to_2bytearray_l(self, intvalue):
return struct.pack("<H", intvalue)
def int_to_2bytearray_b(self, intvalue):
return struct.pack(">H", intvalue)
def int_to_4bytearray_l(self, intvalue):
src = bytearray(4)
src[3] = (intvalue >> 24) & 0xFF
src[2] = (intvalue >> 16) & 0xFF
src[1] = (intvalue >> 8) & 0xFF
src[0] = (intvalue >> 0) & 0xFF
return src
def int_to_4bytearray_b(self, intvalue):
val = int_to_4bytearray_l(intvalue)
val.reverse()
return val
def bytearray_reverse(self, a):
l = len(a)
b = bytearray(l)
i = 0
while i < l:
b[i] = a[l - i - 1]
i = i + 1
return b
def bytearray_to_int(self, b):
return int(binascii.hexlify(b), 16)
def string_to_bytearray(self, string):
return bytes(string, encoding="utf8")
def bytearray_to_str(self, bytesarray):
return str(bytesarray)
def get_random_hexstr(self, n_bytes):
hextring = ""
i = 0
while i < n_bytes:
hextring = hextring + str(binascii.hexlify(random.randint(0, 255)))
i = i + 1
return hextring
def get_crc32_bytearray(self, data):
crc = binascii.crc32(data)
return self.int_to_4bytearray_l(crc)
def copyfile(self, srcfile, dstfile):
if os.path.isfile(srcfile):
fpath, fname = os.path.split(dstfile)
if not os.path.exists(fpath):
os.makedirs(fpath)
shutil.copyfile(srcfile, dstfile)
else:
print("Src file not exists")
sys.exit()
def enable_udp_send_log(self, server, local_echo):
global udp_send_log, udp_socket_server, upd_log_local_echo
udp_send_log = True
upd_log_local_echo = local_echo
udp_socket_server = server
def add_udp_client(self, tid, upd_client):
udp_clinet_dict[tid] = upd_client
def remove_udp_client(self, tid):
del udp_clinet_dict[tid]
def Update_Cfg(self, cfg, section, key, value):
if cfg.has_option(section, key):
cfg.set(section, key, str(value))
else:
# print key," not found,adding it"
cfg.set(section, key, str(value))
def get_byte_array(self, str):
return str.encode("utf-8")
# class BFConfigParser(configparser.ConfigParser):
# def __init__(self, defaults=None):
# configparser.ConfigParser.__init__(self, defaults=defaults)
#
# def optionxform(self, optionstr):
# return optionstr
class BFConfigParser:
cfg_infile = None
cfg_obj = ConfigObj()
def __init__(self, file=None):
self.cfg_infile = file
self.cfg_obj = ConfigObj(self.cfg_infile)
def read(self, file=None):
self.cfg_infile = file
self.cfg_obj = ConfigObj(self.cfg_infile, encoding="UTF8")
return self.cfg_obj
def get(self, section, key):
ret = self.cfg_obj[section][key]
if ret == '""':
return ""
else:
return ret
def set(self, section, key, value):
self.cfg_obj[section][key] = str(value)
def sections(self,):
return self.cfg_obj.keys()
def delete_section(self, section):
del self.cfg_obj[section]
def update_section_name(self, oldsection, newsection):
_sections = self.cfg_obj.keys()
for _section in _sections:
print(_section)
if _section == oldsection:
print(self.cfg_obj[_section])
self.cfg_obj[newsection] = self.cfg_obj[oldsection]
self.delete_section(oldsection)
def options(self, section):
return self.cfg_obj[section]
def has_option(self, section, key):
_sections = self.cfg_obj.keys()
for _section in _sections:
if _section == section:
for _key in self.cfg_obj[_section]:
if _key == key:
return True
else:
continue
else:
continue
return False
def write(self, outfile=None, flag=None):
if outfile == None:
self.cfg_obj.filename = self.cfg_infile
else:
self.cfg_obj.filename = outfile
self.cfg_obj.write()
class PtCreater(bl_utils):
def __init__(self, config_file):
# if not os.path.exists(config_file):
# config_file = os.path.join(default_conf_path, "partition_cfg_2M.toml")
# config_file = bl_find_file("partition_cfg_", ".toml")
self.parsed_toml = toml.load(config_file)
self.entry_max = 16
self.pt_new = False
def __create_pt_table_do(self, lists, file):
entry_table = bytearray(36 * self.entry_max)
entry_cnt = 0
for item in lists:
entry_type = item["type"]
entry_name = item["name"]
entry_device = item["device"]
entry_addr0 = item["address0"]
entry_addr1 = item["address1"]
entry_maxlen0 = item["size0"]
entry_maxlen1 = item["size1"]
entry_len = item["len"]
entry_table[36 * entry_cnt + 0] = self.int_to_2bytearray_l(entry_type)[0]
if len(entry_name) >= 8:
print("Entry name is too long!")
return False
entry_table[
36 * entry_cnt + 3 : 36 * entry_cnt + 3 + len(entry_name)
] = bytearray(entry_name, "utf-8") + bytearray(0)
entry_table[
36 * entry_cnt + 12 : 36 * entry_cnt + 16
] = self.int_to_4bytearray_l(entry_addr0)
entry_table[
36 * entry_cnt + 16 : 36 * entry_cnt + 20
] = self.int_to_4bytearray_l(entry_addr1)
entry_table[
36 * entry_cnt + 20 : 36 * entry_cnt + 24
] = self.int_to_4bytearray_l(entry_maxlen0)
entry_table[
36 * entry_cnt + 24 : 36 * entry_cnt + 28
] = self.int_to_4bytearray_l(entry_maxlen1)
entry_cnt += 1
# partition table header
# 0x54504642
pt_table = bytearray(16)
pt_table[0] = 0x42
pt_table[1] = 0x46
pt_table[2] = 0x50
pt_table[3] = 0x54
pt_table[6:8] = self.int_to_2bytearray_l(int(entry_cnt))
pt_table[12:16] = self.get_crc32_bytearray(pt_table[0:12])
entry_table[36 * entry_cnt : 36 * entry_cnt + 4] = self.get_crc32_bytearray(
entry_table[0 : 36 * entry_cnt]
)
data = pt_table + entry_table[0 : 36 * entry_cnt + 4]
fp = open(file, "wb+")
fp.write(data)
fp.close()
return True
def create_pt_table(self, file):
self.pt_new = True
return self.__create_pt_table_do(self.parsed_toml["pt_entry"], file)
def get_pt_table_addr(self):
addr0 = self.parsed_toml["pt_table"]["address0"]
addr1 = self.parsed_toml["pt_table"]["address1"]
return addr0, addr1
def construct_table(self):
parcel = {}
if self.pt_new == True:
parcel["pt_new"] = True
else:
parcel["pt_new"] = False
parcel["pt_addr0"] = self.parsed_toml["pt_table"]["address0"]
parcel["pt_addr1"] = self.parsed_toml["pt_table"]["address1"]
for tbl_item in self.parsed_toml["pt_entry"]:
if tbl_item["name"] == "factory":
parcel["conf_addr"] = tbl_item["address0"]
if tbl_item["name"] == "FW_CPU0":
parcel["fw_cpu0_addr"] = tbl_item["address0"]
if tbl_item["name"] == "FW":
parcel["fw_addr"] = tbl_item["address0"]
if tbl_item["name"] == "media":
parcel["media_addr"] = tbl_item["address0"]
if tbl_item["name"] == "mfg":
parcel["mfg_addr"] = tbl_item["address0"]
return parcel
class bl_img_create_do(bl_utils):
def __init__(self):
cfg = BFConfigParser()
keyslot0 = 28
keyslot1 = keyslot0 + 16
keyslot2 = keyslot1 + 16
keyslot3 = keyslot2 + 16
keyslot4 = keyslot3 + 16
keyslot5 = keyslot4 + 16
keyslot6 = keyslot5 + 16
wr_lock_key_slot_4_l = 13
wr_lock_key_slot_5_l = 14
wr_lock_boot_mode = 15
wr_lock_dbg_pwd = 16
wr_lock_sw_usage_0 = 17
wr_lock_wifi_mac = 18
wr_lock_key_slot_0 = 19
wr_lock_key_slot_1 = 20
wr_lock_key_slot_2 = 21
wr_lock_key_slot_3 = 22
wr_lock_key_slot_4_h = 23
wr_lock_key_slot_5_h = 24
rd_lock_dbg_pwd = 25
rd_lock_key_slot_0 = 26
rd_lock_key_slot_1 = 27
rd_lock_key_slot_2 = 28
rd_lock_key_slot_3 = 29
rd_lock_key_slot_4 = 30
rd_lock_key_slot_5 = 31
#####################update efuse info##########################################
def img_update_efuse(
self, sign, pk_hash, flash_encryp_type, flash_key, sec_eng_key_sel, sec_eng_key
):
fp = open(cfg.get("Img_Cfg", "efuse_file"), "rb")
efuse_data = bytearray(fp.read()) + bytearray(0)
fp.close()
fp = open(cfg.get("Img_Cfg", "efuse_mask_file"), "rb")
efuse_mask_data = bytearray(fp.read()) + bytearray(0)
fp.close()
mask_4bytes = bytearray.fromhex("FFFFFFFF")
efuse_data[0] |= flash_encryp_type
efuse_data[0] |= sign << 2
if flash_encryp_type > 0:
efuse_data[0] |= 0x80
efuse_mask_data[0] |= 0xFF
rw_lock = 0
if pk_hash != None:
efuse_data[keyslot0:keyslot2] = pk_hash
efuse_mask_data[keyslot0:keyslot2] = mask_4bytes * 8
rw_lock |= 1 << wr_lock_key_slot_0
rw_lock |= 1 << wr_lock_key_slot_1
if flash_key != None:
if flash_encryp_type == 1:
# aes 128
efuse_data[keyslot2:keyslot4] = flash_key
efuse_mask_data[keyslot2:keyslot4] = mask_4bytes * 8
elif flash_encryp_type == 2:
# aes 192
efuse_data[keyslot2:keyslot4] = flash_key
efuse_mask_data[keyslot2:keyslot4] = mask_4bytes * 8
elif flash_encryp_type == 3:
# aes 256
efuse_data[keyslot2:keyslot4] = flash_key
efuse_mask_data[keyslot2:keyslot4] = mask_4bytes * 8
rw_lock |= 1 << wr_lock_key_slot_2
rw_lock |= 1 << wr_lock_key_slot_3
rw_lock |= 1 << rd_lock_key_slot_2
rw_lock |= 1 << rd_lock_key_slot_3
if sec_eng_key != None:
if flash_encryp_type == 0:
if sec_eng_key_sel == 0:
efuse_data[keyslot2:keyslot3] = sec_eng_key[16:32]
efuse_data[keyslot3:keyslot4] = sec_eng_key[0:16]
efuse_mask_data[keyslot2:keyslot4] = mask_4bytes * 8
rw_lock |= 1 << wr_lock_key_slot_2
rw_lock |= 1 << wr_lock_key_slot_3
rw_lock |= 1 << rd_lock_key_slot_2
rw_lock |= 1 << rd_lock_key_slot_3
if sec_eng_key_sel == 1:
efuse_data[keyslot3:keyslot4] = sec_eng_key[16:32]
efuse_data[keyslot2:keyslot3] = sec_eng_key[0:16]
efuse_mask_data[keyslot2:keyslot4] = mask_4bytes * 8
rw_lock |= 1 << wr_lock_key_slot_2
rw_lock |= 1 << wr_lock_key_slot_3
rw_lock |= 1 << rd_lock_key_slot_2
rw_lock |= 1 << rd_lock_key_slot_3
if flash_encryp_type == 1:
if sec_eng_key_sel == 0:
efuse_data[keyslot4:keyslot5] = sec_eng_key[0:16]
efuse_mask_data[keyslot4:keyslot5] = mask_4bytes * 4
rw_lock |= 1 << wr_lock_key_slot_4_l
rw_lock |= 1 << wr_lock_key_slot_4_h
rw_lock |= 1 << rd_lock_key_slot_4
if sec_eng_key_sel == 1:
efuse_data[keyslot4:keyslot5] = sec_eng_key[0:16]
efuse_mask_data[keyslot4:keyslot5] = mask_4bytes * 4
rw_lock |= 1 << wr_lock_key_slot_4_l
rw_lock |= 1 << wr_lock_key_slot_4_h
rw_lock |= 1 << rd_lock_key_slot_4
# set read write lock key
efuse_data[124:128] = self.int_to_4bytearray_l(rw_lock)
efuse_mask_data[124:128] = self.int_to_4bytearray_l(rw_lock)
fp = open(cfg.get("Img_Cfg", "efuse_file"), "wb+")
fp.write(efuse_data)
fp.close()
fp = open(cfg.get("Img_Cfg", "efuse_mask_file"), "wb+")
fp.write(efuse_mask_data)
fp.close()
####################get sign and encrypt info##########################################
def img_create_get_sign_encrypt_info(self, bootheader_data):
sign = bootheader_data[116] & 0x3
encrypt = (bootheader_data[116] >> 2) & 0x3
key_sel = (bootheader_data[116] >> 4) & 0x3
return sign, encrypt, key_sel
####################get hash ignore ignore##########################################
def img_create_get_hash_ignore(self, bootheader_data):
return (bootheader_data[118] >> 1) & 0x1
####################get crc ignore ignore##########################################
def img_create_get_crc_ignore(self, bootheader_data):
return bootheader_data[118] & 0x1
#####################update boot header info##########################################
def img_create_update_bootheader(self, bootheader_data, hash, seg_cnt):
# update segment count
bootheader_data[120:124] = self.int_to_4bytearray_l(seg_cnt)
# update hash
sign, encrypt, key_sel = self.img_create_get_sign_encrypt_info(bootheader_data)
if self.img_create_get_hash_ignore(bootheader_data) == 1 and sign == 0:
# do nothing
pass
else:
bootheader_data[132:164] = hash
# update header crc
if self.img_create_get_crc_ignore(bootheader_data) == 1:
# do nothing
pass
else:
hd_crcarray = self.get_crc32_bytearray(bootheader_data[0 : 176 - 4])
bootheader_data[176 - 4 : 176] = hd_crcarray
print("Header crc: ", binascii.hexlify(hd_crcarray))
return bootheader_data[0:176]
#####################update segment header according segdata#########################
def img_create_update_segheader(self, segheader, segdatalen, segdatacrc):
segheader[4:8] = segdatalen
segheader[8:12] = segdatacrc
return segheader
#####################do hash of image################################################
def img_create_sha256_data(self, data_bytearray):
hashfun = SHA256.new()
hashfun.update(data_bytearray)
return self.hexstr_to_bytearray(hashfun.hexdigest())
#####################encrypt image, mainly segdata#####################################
def img_create_encrypt_data(
self, data_bytearray, key_bytearray, iv_bytearray, flash_img
):
if flash_img == 0:
cryptor = AES.new(key_bytearray, AES.MODE_CBC, iv_bytearray)
ciphertext = cryptor.encrypt(data_bytearray)
else:
# iv = Crypto.Util.Counter.new(128, initial_value = long(binascii.hexlify(iv_bytearray),16))
iv = Counter.new(128, initial_value=int(binascii.hexlify(iv_bytearray), 16))
cryptor = AES.new(key_bytearray, AES.MODE_CTR, counter=iv)
ciphertext = cryptor.encrypt(data_bytearray)
return ciphertext
#####################sign image(hash code)#####################################
def img_create_sign_data(
self, data_bytearray, privatekey_file_uecc, publickey_file
):
sk = ecdsa.SigningKey.from_pem(open(privatekey_file_uecc).read())
vk = ecdsa.VerifyingKey.from_pem(open(publickey_file).read())
pk_data = vk.to_string()
pk_hash = self.img_create_sha256_data(pk_data)
signature = sk.sign(
data_bytearray,
hashfunc=hashlib.sha256,
sigencode=ecdsa.util.sigencode_string,
)
# return len+signature+crc
len_array = self.int_to_4bytearray_l(len(signature))
sig_field = len_array + signature
crcarray = self.get_crc32_bytearray(sig_field)
return pk_data, pk_hash, sig_field + crcarray
######################## read one file and append crc if needed#####################
def img_create_read_file_append_crc(self, file, crc):
fp = open(file, "rb")
read_data = bytearray(fp.read())
crcarray = bytearray(0)
if crc:
crcarray = self.get_crc32_bytearray(read_data)
fp.close()
return read_data + crcarray
def img_creat_process(self, flash_img):
encrypt_blk_size = 16
padding = bytearray(encrypt_blk_size)
data_tohash = bytearray(0)
ret = "OK"
cfg_section = "Img_Cfg"
# get segdata to deal with
segheader_file = []
if flash_img == 0:
for files in cfg.get(cfg_section, "segheader_file").split(" "):
segheader_file.append(str(files))
segdata_file = []
for files in cfg.get(cfg_section, "segdata_file").split(" "):
segdata_file.append(str(files))
if flash_img == 1:
break
# get bootheader
boot_header_file = cfg.get(cfg_section, "boot_header_file")
bootheader_data = self.img_create_read_file_append_crc(boot_header_file, 0)
# decide encrypt and sign
encrypt = 0
sign, encrypt, key_sel = self.img_create_get_sign_encrypt_info(bootheader_data)
aesiv_data = bytearray(0)
pk_data = bytearray(0)
if sign != 0:
publickey_file = cfg.get(cfg_section, "publickey_file")
privatekey_file_uecc = cfg.get(cfg_section, "privatekey_file_uecc")
if encrypt != 0:
encrypt_key_org = self.hexstr_to_bytearray(
cfg.get(cfg_section, "aes_key_org")
)
global encrypt_key
if encrypt == 1:
encrypt_key = encrypt_key_org[0:16]
elif encrypt == 2:
encrypt_key = encrypt_key_org[0:32]
elif encrypt == 3:
encrypt_key = encrypt_key_org[0:24]
encrypt_iv = self.hexstr_to_bytearray(cfg.get(cfg_section, "aes_iv"))
iv_crcarray = self.get_crc32_bytearray(encrypt_iv)
aesiv_data = encrypt_iv + iv_crcarray
data_tohash = data_tohash + aesiv_data
# decide seg_cnt values
seg_cnt = len(segheader_file)
if flash_img == 0 and seg_cnt != len(segdata_file):
print("Segheader count and segdata count not match")
return "FAIL", data_tohash
data_toencrypt = bytearray(0)
if flash_img == 0:
i = 0
seg_header_list = []
seg_data_list = []
while i < seg_cnt:
# read seg data and calculate crcdata
seg_data = self.img_create_read_file_append_crc(segdata_file[i], 0)
padding_size = 0
if len(seg_data) % encrypt_blk_size != 0:
padding_size = encrypt_blk_size - len(seg_data) % encrypt_blk_size
seg_data += padding[0:padding_size]
segdata_crcarray = self.get_crc32_bytearray(seg_data)
seg_data_list.append(seg_data)
# read seg header and replace segdata's CRC
seg_header = self.img_create_read_file_append_crc(segheader_file[i], 0)
seg_header = img_create_update_segheader(
seg_header,
self.int_to_4bytearray_l(len(seg_data)),
segdata_crcarray,
)
segheader_crcarray = self.get_crc32_bytearray(seg_header)
seg_header = seg_header + segheader_crcarray
seg_header_list.append(seg_header)
i = i + 1
# get all data to encrypt
i = 0
while i < seg_cnt:
# ,now changed to encrypted since download tool's segdata len is from bootrom
data_toencrypt += seg_header_list[i]
data_toencrypt += seg_data_list[i]
i += 1
else:
seg_data = self.img_create_read_file_append_crc(segdata_file[0], 0)
padding_size = 0
if len(seg_data) % encrypt_blk_size != 0:
padding_size = encrypt_blk_size - len(seg_data) % encrypt_blk_size
seg_data += padding[0:padding_size]
data_toencrypt += seg_data
seg_cnt = len(data_toencrypt)
# do encrypt
if encrypt != 0:
data_toencrypt = img_create_encrypt_data(
data_toencrypt, encrypt_key, encrypt_iv, flash_img
)
# get fw data
fw_data = bytearray(0)
data_tohash += data_toencrypt
fw_data = data_toencrypt
# hash fw img
hash = self.img_create_sha256_data(data_tohash)
# update boot header and recalculate crc
bootheader_data = self.img_create_update_bootheader(
bootheader_data, hash, seg_cnt
)
# add signautre
signature = bytearray(0)
pk_hash = None
if sign == 1:
pk_data, pk_hash, signature = img_create_sign_data(
data_tohash, privatekey_file_uecc, publickey_file
)
pk_data = pk_data + self.get_crc32_bytearray(pk_data)
# write whole image
if flash_img == 1:
bootinfo_file_name = cfg.get(cfg_section, "bootinfo_file")
fp = open(bootinfo_file_name, "wb+")
bootinfo = bootheader_data + pk_data + signature + aesiv_data
fp.write(bootinfo)
fp.close()
fw_file_name = cfg.get(cfg_section, "img_file")
fp = open(fw_file_name, "wb+")
fp.write(fw_data)
fp.close()
# update efuse
if encrypt != 0:
if encrypt == 1:
# AES 128
img_update_efuse(
sign,
pk_hash,
1,
encrypt_key + bytearray(32 - len(encrypt_key)),
key_sel,
None,
)
if encrypt == 2:
# AES 256
img_update_efuse(
sign,
pk_hash,
3,
encrypt_key + bytearray(32 - len(encrypt_key)),
key_sel,
None,
)
if encrypt == 3:
# AES 192
img_update_efuse(
sign,
pk_hash,
2,
encrypt_key + bytearray(32 - len(encrypt_key)),
key_sel,
None,
)
else:
self.img_update_efuse(sign, pk_hash, encrypt, None, key_sel, None)
else:
whole_img_file_name = cfg.get(cfg_section, "whole_img_file")
fp = open(whole_img_file_name, "wb+")
img_data = bootheader_data + pk_data + signature + aesiv_data + fw_data
fp.write(img_data)
fp.close()
# update efuse
if encrypt != 0:
if encrypt == 1:
# AES 128
img_update_efuse(
sign,
pk_hash,
1,
None,
key_sel,
encrypt_key + bytearray(32 - len(encrypt_key)),
)
if encrypt == 2:
# AES 256
img_update_efuse(
sign,
pk_hash,
3,
None,
key_sel,
encrypt_key + bytearray(32 - len(encrypt_key)),
)
if encrypt == 3:
# AES 192
img_update_efuse(
sign,
pk_hash,
2,
None,
key_sel,
encrypt_key + bytearray(32 - len(encrypt_key)),
)
else:
img_update_efuse(sign, pk_hash, 0, None, key_sel, bytearray(32))
return "OK", data_tohash
def usage():
print(sys.argv[0], "\n")
print("-i/--img_type= :image type:media or if")
print("-h/--help :helper")
#######################################################################
def img_create_do(self, options, img_dir_path=None, config_file=None):
print("Image create path: ", img_dir_path)
if config_file == None:
config_file = img_dir_path + "/img_create_cfg.ini"
cfg.read(config_file)
print("Config file: ", config_file)
img_type = "media"
signer = "none"
ret = "OK"
data_tohash = bytearray(0)
try:
opts, args = getopt.getopt(
options, "i:s:Hh", ["img_type=", "signer=", "help"]
)
for option, value in opts:
if option in ["-h", "-H"]:
usage()
if option in ["-i", "--img_type"]:
img_type = value
if option in ["-s", "--signer"]:
signer = value
except getopt.GetoptError as err:
# will something like "option -a not recognized")
print(err)
usage()
if img_type == "media":
flash_img = 1
else:
flash_img = 0
# deal image creation
ret, data_tohash = self.img_creat_process(flash_img)
if ret != "OK":
print("Fail to create images!")
return
def create_sp_media_image(self, config, cpu_type=None):
global cfg
cfg = BFConfigParser()
cfg.read(config)
self.img_creat_process(1)
class bl_img_create(bl_img_create_do):
def img_create(
self,
options,
chipname="bl60x",
chiptype="bl60x",
img_dir=None,
config_file=None,
):
img_dir_path = os.path.join(app_path, chipname, "img_create")
if img_dir is None:
self.img_create_do(options, img_dir_path, config_file)
else:
self.img_create_do(options, img_dir, config_file)
def create_sp_media_image_file(self, config, chiptype="bl60x", cpu_type=None):
self.create_sp_media_image(config, cpu_type)
class bl_device_tree:
def bl_dts2dtb(self, src_addr="", dest_addr=""):
if "" == src_addr or "" == dest_addr:
print("bl_dts2dtb please check arg.")
return
with open(src_addr, "r") as f:
tmp1_dts = f.read()
tmp2_dtb = fdt.parse_dts(tmp1_dts)
dest_addr = os.path.join(app_path, bin_build_out_path, dest_addr)
with open(dest_addr, "wb") as f:
f.write(tmp2_dtb.to_dtb(version=17))
def bl_ro_params_device_tree(self, in_dts_config, out_bin_file):
dts_config = in_dts_config
bin_file = out_bin_file
self.bl_dts2dtb(dts_config, bin_file)
class bl_whole_img_generate:
def bl_create_flash_default_data(self, length):
datas = bytearray(length)
for i in range(length):
datas[i] = 0xFF
return datas
def bl_get_largest_addr(self, addrs, files):
maxlen = 0
datalen = 0
for i in range(len(addrs)):
if int(addrs[i], 16) > maxlen:
maxlen = int(addrs[i], 16)
datalen = os.path.getsize(files[i])
return maxlen + datalen
def bl_get_file_data(self, files):
datas = []
for file in files:
with open(file, "rb") as fp:
data = fp.read()
datas.append(data)
return datas
def bl_write_flash_img(self, d_addrs, d_files, flash_size):
whole_img_len = self.bl_get_largest_addr(d_addrs, d_files)
whole_img_data = self.bl_create_flash_default_data(whole_img_len)
filedatas = self.bl_get_file_data(d_files)
for i in range(len(d_addrs)):
start_addr = int(d_addrs[i], 16)
whole_img_data[start_addr : start_addr + len(filedatas[i])] = filedatas[i]
# dst_file = os.path.join(app_path, bin_build_out_path, "whole_flash_data.bin")
dst_file = os.path.join(
app_path, bin_build_out_path, "whole_{}.bin".format(file_finally_name)
)
fp = open(dst_file, "wb+")
fp.write(whole_img_data)
print("Generating BIN File to %s" % (dst_file))
fp.close()
def bl_image_gen_cfg(
self, raw_bin_name, bintype, key=None, iv=None, cfg_ini=None, cpu_type=None
):
cfg = BFConfigParser()
if cfg_ini in [None, ""]:
f_org = bl_find_file("img_create_cfg", ".conf")
f = os.path.join(app_path, bin_build_out_path, "img_create_cfg.ini")
# if os.path.isfile(f) == False:
shutil.copy(f_org, f)
else:
f = cfg_ini
cfg.read(f)
if bintype == "fw":
if cpu_type == None:
bootinfo_file = os.path.join(
app_path, bin_build_out_path, "bootinfo.bin"
)
img_file = os.path.join(app_path, bin_build_out_path, "img.bin")
else:
bootinfo_file = os.path.join(
app_path,
bin_build_out_path,
"bootinfo_{0}.bin".format(cpu_type.lower()),
)
img_file = os.path.join(
app_path, bin_build_out_path, "img_{0}.bin".format(cpu_type.lower())
)
else:
bootinfo_file = os.path.join(
app_path, bin_build_out_path, "bootinfo_{0}.bin".format(bintype)
)
img_file = os.path.join(
app_path, bin_build_out_path, "img_{0}.bin".format(bintype)
)
if cpu_type != None:
img_section_name = "Img_" + cpu_type + "_Cfg"
else:
if "Img_CPU0_Cfg" in cfg.sections():
img_section_name = "Img_CPU0_Cfg"
else:
img_section_name = "Img_Cfg"
bh_file = os.path.join(app_path, bin_build_out_path, "bootheader.bin")
efuse_file = os.path.join(app_path, bin_build_out_path, "efusedata.bin")
efuse_mask_file = os.path.join(
app_path, bin_build_out_path, "efusedata_mask.bin"
)
cfg.set(img_section_name, "boot_header_file", bh_file)
cfg.set(img_section_name, "efuse_file", efuse_file)
cfg.set(img_section_name, "efuse_mask_file", efuse_mask_file)
cfg.set(img_section_name, "segdata_file", raw_bin_name)
cfg.set(img_section_name, "bootinfo_file", bootinfo_file)
cfg.set(img_section_name, "img_file", img_file)
if key:
cfg.set(img_section_name, "aes_key_org", key)
if iv:
cfg.set(img_section_name, "aes_iv", iv)
cfg.write(f, "w")
return f
def bl_image_gen(self, bintype, raw_bin_name, key=None, iv=None, cfg_ini=None):
# python bflb_img_create.py -c np -i media -s none
f = self.bl_image_gen_cfg(raw_bin_name, bintype)
# exe_genitor(['bflb_img_create.exe', '-c', 'np', '-i', 'media', '-s', 'none'])
img_create = bl_img_create()
img_create.create_sp_media_image_file(f)
def bl_fw_boot_head_gen(
self,
boot2,
xtal,
config,
encrypt=False,
chipname="bl60x",
chiptype="bl60x",
cpu_type=None,
):
cfg = BFConfigParser()
cfg.read(config)
if cpu_type != None:
bootheader_section_name = "BOOTHEADER_" + cpu_type + "_CFG"
else:
if "BOOTHEADER_CPU0_CFG" in cfg.sections():
bootheader_section_name = "BOOTHEADER_CPU0_CFG"
else:
bootheader_section_name = "BOOTHEADER_CFG"
if boot2 == True:
cfg.set(bootheader_section_name, "img_start", "0x2000")
cfg.set(bootheader_section_name, "cache_enable", "1")
cfg.set(bootheader_section_name, "crc_ignore", "1")
cfg.set(bootheader_section_name, "hash_ignore", "1")
# cfg.set(bootheader_section_name,'sfctrl_clk_delay', '0')
if cpu_type != None:
cfg.set(bootheader_section_name, "halt_cpu1", "1")
cfg.set(bootheader_section_name, "key_sel", "0")
if encrypt:
cfg.set(bootheader_section_name, "encrypt_type", "1")
else:
cfg.set(bootheader_section_name, "encrypt_type", "0")
cfg.set(bootheader_section_name, "xtal_type", dict_xtal[xtal])
cfg.write(config)
create = bl_efuse_boothd_gen()
create.efuse_boothd_create_process(chipname, chiptype, config)
def bl_whole_flash_bin_create(
self, bin_file, boot2, ro_params, pt_parcel, media, mfg, flash_opt="1M"
):
d_files = []
d_addrs = []
if pt_parcel == None:
return False
if boot2 == True:
d_files.append(
os.path.join(app_path, bin_build_out_path, "bootinfo_boot2.bin")
)
d_addrs.append("00000000")
d_files.append(os.path.join(app_path, bin_build_out_path, "img_boot2.bin"))
d_addrs.append("00002000")
if pt_parcel != None and len(pt_parcel) > 0 and pt_parcel["pt_new"] == True:
d_files.append(os.path.join(app_path, bin_build_out_path, "partition.bin"))
d_addrs.append(hex(pt_parcel["pt_addr0"])[2:])
d_files.append(os.path.join(app_path, bin_build_out_path, "partition.bin"))
d_addrs.append(hex(pt_parcel["pt_addr1"])[2:])
if bin_file == True and "fw_addr" in pt_parcel:
d_files.append(os.path.join(app_path, bin_build_out_path, "bootinfo.bin"))
d_addrs.append(hex(pt_parcel["fw_addr"])[2:])
d_files.append(os.path.join(app_path, bin_build_out_path, "img.bin"))
d_addrs.append(hex(pt_parcel["fw_addr"] + 0x1000)[2:])
if ro_params != None and len(ro_params) > 0 and pt_parcel["conf_addr"] != None:
bl_ro_device_tree = bl_device_tree()
dtb_file = os.path.join(app_path, bin_build_out_path, "ro_params.dtb")
bl_ro_device_tree.bl_ro_params_device_tree(ro_params, dtb_file)
d_files.append(os.path.join(app_path, bin_build_out_path, "ro_params.dtb"))
d_addrs.append(hex(pt_parcel["conf_addr"])[2:])
if media == True and pt_parcel["media_addr"] != None:
d_files.append(os.path.join(app_path, bin_build_out_path, "media.bin"))
d_addrs.append(hex(pt_parcel["media_addr"])[2:])
if mfg == True:
d_files.append(
os.path.join(app_path, bin_build_out_path, "bootinfo_mfg.bin")
)
d_addrs.append(hex(pt_parcel["mfg_addr"])[2:])
d_files.append(os.path.join(app_path, bin_build_out_path, "img_mfg.bin"))
d_addrs.append(hex(pt_parcel["mfg_addr"] + 0x1000)[2:])
if len(d_files) > 0 and len(d_addrs) > 0:
cfg = BFConfigParser()
cfg.read(eflash_loader_cfg)
self.bl_write_flash_img(d_addrs, d_files, flash_opt)
files_str = " ".join(d_files)
addrs_str = " ".join(d_addrs)
cfg.set("FLASH_CFG", "file", files_str)
cfg.set("FLASH_CFG", "address", addrs_str)
cfg.write(eflash_loader_cfg, "w")
return True
else:
return False
class bl_img_ota:
def bl_mfg_ota_header(self, file_bytearray, use_xz):
ota_conf = bl_find_file("ota", ".toml")
parsed_toml = toml.load(ota_conf)
header_len = 512
header = bytearray()
file_len = len(file_bytearray)
m = hashlib.sha256()
# 16 Bytes header
data = b"BL60X_OTA_Ver1.0"
for b in data:
header.append(b)
# 4 Byte ota file type
if use_xz:
data = b"XZ "
else:
data = b"RAW "
for b in data:
header.append(b)
# 4 Bytes file length
file_len_bytes = file_len.to_bytes(4, byteorder="little")
for b in file_len_bytes:
header.append(b)
# 8 Bytes pad
header.append(0x01)
header.append(0x02)
header.append(0x03)
header.append(0x04)
header.append(0x05)
header.append(0x06)
header.append(0x07)
header.append(0x08)
# 16 Bytes Hardware version
data = bytearray(parsed_toml["ota"]["version_hardware"].encode())
data_len = 16 - len(data)
for b in data:
header.append(b)
while data_len > 0:
header.append(0x00)
data_len = data_len - 1
# 16 Bytes firmware version
data = bytearray(parsed_toml["ota"]["version_software"].encode())
data_len = 16 - len(data)
for b in data:
header.append(b)
while data_len > 0:
header.append(0x00)
data_len = data_len - 1
# 32 Bytes SHA256
m.update(file_bytearray)
hash_bytes = m.digest()
for b in hash_bytes:
header.append(b)
header_len = header_len - len(header)
while header_len > 0:
header.append(0xFF)
header_len = header_len - 1
return header
def bl_mfg_ota_xz_gen(self, chipname="bl60x", chiptype="bl60x", cpu_type=None):
bl60x_xz_filters = [{"id": lzma.FILTER_LZMA2, "dict_size": 32768}]
fw_ota_bin = bytearray()
fw_ota_bin_xz = bytearray()
if cpu_type == None:
FW_OTA_path = os.path.join(
app_path,
bin_build_out_path,
"ota/{}/FW_OTA.bin".format(file_finally_name),
)
else:
FW_OTA_path = os.path.join(
app_path,
bin_build_out_path,
"ota/{}/".format(file_finally_name) + cpu_type + "_OTA.bin",
)
with open(FW_OTA_path, mode="rb") as bin_f:
file_bytes = bin_f.read()
for b in file_bytes:
fw_ota_bin.append(b)
if cpu_type == None:
FW_OTA_path = os.path.join(
app_path,
bin_build_out_path,
"ota/{}/FW_OTA.bin.xz".format(file_finally_name),
)
else:
FW_OTA_path = os.path.join(
app_path,
bin_build_out_path,
"ota/{}/".format(file_finally_name) + cpu_type + "_OTA.bin.xz",
)
with lzma.open(
FW_OTA_path, mode="wb", check=lzma.CHECK_CRC32, filters=bl60x_xz_filters
) as xz_f:
xz_f.write(fw_ota_bin)
print("Generating BIN File to %s" % (FW_OTA_path))
with open(FW_OTA_path, mode="rb") as f:
file_bytes = f.read()
for b in file_bytes:
fw_ota_bin_xz.append(b)
fw_ota_bin_xz_ota = self.bl_mfg_ota_header(fw_ota_bin_xz, use_xz=1)
for b in fw_ota_bin_xz:
fw_ota_bin_xz_ota.append(b)
if cpu_type == None:
FW_OTA_path = os.path.join(
app_path,
bin_build_out_path,
"ota/{}/FW_OTA.bin.xz.ota".format(file_finally_name),
)
else:
FW_OTA_path = os.path.join(
app_path,
bin_build_out_path,
"ota/{}/".format(file_finally_name) + cpu_type + "_OTA.bin.xz.ota",
)
with open(FW_OTA_path, mode="wb") as f:
f.write(fw_ota_bin_xz_ota)
print("Generating BIN File to %s" % (FW_OTA_path))
def bl_mfg_ota_bin_gen(self, chipname="bl60x", chiptype="bl60x", cpu_type=None):
fw_header_len = 4096
fw_ota_bin = bytearray()
ota_path = os.path.join(app_path, bin_build_out_path)
if os.path.isdir(ota_path) == False:
os.mkdir(ota_path)
if cpu_type == None:
bootinfo_fw_path = os.path.join(
app_path, bin_build_out_path, "bootinfo.bin"
)
else:
bootinfo_fw_path = os.path.join(
app_path, bin_build_out_path, "bootinfo_" + cpu_type.lower() + ".bin"
)
with open(bootinfo_fw_path, mode="rb") as f:
file_bytes = f.read(4096)
for b in file_bytes:
fw_ota_bin.append(b)
i = fw_header_len - len(fw_ota_bin)
while i > 0:
fw_ota_bin.append(0xFF)
i = i - 1
if cpu_type == None:
img_fw_path = os.path.join(app_path, bin_build_out_path, "img.bin")
else:
img_fw_path = os.path.join(
app_path, bin_build_out_path, "img_" + cpu_type.lower() + ".bin"
)
with open(img_fw_path, mode="rb") as f:
file_bytes = f.read()
for b in file_bytes:
fw_ota_bin.append(b)
fw_ota_bin_header = self.bl_mfg_ota_header(fw_ota_bin, use_xz=0)
FW_OTA_path = os.path.join(app_path, bin_build_out_path, "ota")
if not os.path.exists(FW_OTA_path):
os.makedirs(FW_OTA_path)
FW_OTA_path = os.path.join(FW_OTA_path, file_finally_name)
if not os.path.exists(FW_OTA_path):
os.makedirs(FW_OTA_path)
if cpu_type == None:
FW_OTA_path = os.path.join(FW_OTA_path, "FW_OTA.bin")
else:
FW_OTA_path = os.path.join(FW_OTA_path, cpu_type + "_OTA.bin")
with open(FW_OTA_path, mode="wb") as f:
f.write(fw_ota_bin)
print("Generating BIN File to %s" % (FW_OTA_path))
for b in fw_ota_bin:
fw_ota_bin_header.append(b)
if cpu_type == None:
FW_OTA_path = os.path.join(
app_path,
bin_build_out_path,
"ota/{}/FW_OTA.bin.ota".format(file_finally_name),
)
else:
FW_OTA_path = os.path.join(
app_path,
bin_build_out_path,
"ota/{}/".format(file_finally_name) + cpu_type + "_OTA.bin.ota",
)
with open(FW_OTA_path, mode="wb") as f:
f.write(fw_ota_bin_header)
print("Generating BIN File to %s" % (FW_OTA_path))
self.bl_mfg_ota_xz_gen(chipname, chiptype, cpu_type)
class bl_flash_select:
def get_suitable_file_name(self, cfg_dir, flash_id):
conf_files = []
for home, dirs, files in os.walk(cfg_dir):
for filename in files:
if filename.split("_")[-1] == flash_id + ".conf":
conf_files.append(filename)
if len(conf_files) > 1:
for i in range(len(conf_files)):
tmp = conf_files[i].split(".")[0]
print("%d:%s" % (i + 1, tmp))
return conf_files[i]
elif len(conf_files) == 1:
return conf_files[0]
else:
return ""
def update_flash_cfg_do(
self, chipname, chiptype, flash_id, file=None, create=False, section=None
):
cfg_dir = os.path.join(os.getcwd(), chiptype, "flash_select")
conf_name = self.get_suitable_file_name(cfg_dir, flash_id)
print(os.path.join(cfg_dir, conf_name))
value_key = []
if os.path.isfile(os.path.join(cfg_dir, conf_name)) == False:
return False
fp = open(os.path.join(cfg_dir, conf_name), "r")
for line in fp.readlines():
value = line.split("=")[0].strip()
if value == "[FLASH_CFG]":
continue
value_key.append(value)
cfg1 = BFConfigParser()
cfg1.read(os.path.join(cfg_dir, conf_name))
cfg2 = BFConfigParser()
cfg2.read(file)
for i in range(len(value_key)):
if cfg1.has_option("FLASH_CFG", value_key[i]) and cfg2.has_option(
section, value_key[i]
):
tmp_value = cfg1.get("FLASH_CFG", value_key[i])
bflb_utils = bl_utils()
bflb_utils.Update_Cfg(cfg2, section, value_key[i], tmp_value)
cfg2.write(file, "w+")
def bl_flash_loader_list(self, chipname, chiptype, bh_cfg_file):
eflash_loader_cfg = os.path.join(
app_path, bin_build_out_path, "eflash_loader_cfg.ini"
)
cfg = BFConfigParser()
cfg.read(eflash_loader_cfg)
if cfg.has_option("FLASH_CFG", "flash_id"):
flash_id_str = cfg.get("FLASH_CFG", "flash_id")
if type(flash_id_str) is str:
flash_id_list = flash_id_str.split(",")
return flash_id_list
elif type(flash_id_str) is list:
return flash_id_str
# for flash_id in flash_id_list:
# print("========= chip flash id: %s =========" % flash_id)
# if chiptype == "bl602":
# if self.update_flash_cfg_do(chipname, chiptype, flash_id, bh_cfg_file, False, "BOOTHEADER_CFG") == False:
# error = "flash_id:" + flash_id + " do not support"
# return error
# elif chiptype == "bl60x":
# if self.update_flash_cfg_do(chipname, chiptype, flash_id, bh_cfg_file, False, "BOOTHEADER_CPU0_CFG") == False:
# error = "flash_id:" + flash_id + " do not support"
# return error
else:
error = "Do not find flash_id in eflash_loader_cfg.ini"
return error
def bl_flash_update(self, chipname, chiptype, bh_cfg_file, flash_id):
print("========= chip flash id: %s =========" % flash_id)
if chiptype == "bl602":
if (
self.update_flash_cfg_do(
chipname, chiptype, flash_id, bh_cfg_file, False, "BOOTHEADER_CFG"
)
== False
):
error = "flash_id:" + flash_id + " do not support"
return error
elif chiptype == "bl60x":
if (
self.update_flash_cfg_do(
chipname,
chiptype,
flash_id,
bh_cfg_file,
False,
"BOOTHEADER_CPU0_CFG",
)
== False
):
error = "flash_id:" + flash_id + " do not support"
return error
def bl_flash_loader(self, chipname, chiptype, bh_cfg_file):
eflash_loader_cfg = os.path.join(
app_path, bin_build_out_path, "eflash_loader_cfg.ini"
)
cfg = BFConfigParser()
cfg.read(eflash_loader_cfg)
if cfg.has_option("FLASH_CFG", "flash_id"):
flash_id_str = cfg.get("FLASH_CFG", "flash_id")
flash_id_list = flash_id_str.split(",")
print("++++++++")
print(flash_id_list, type(flash_id_list))
for flash_id in flash_id_list:
print(flash_id)
print("========= chip flash id: %s =========" % flash_id)
if chiptype == "bl602":
if (
self.update_flash_cfg_do(
chipname,
chiptype,
flash_id,
bh_cfg_file,
False,
"BOOTHEADER_CFG",
)
== False
):
error = "flash_id:" + flash_id + " do not support"
return error
elif chiptype == "bl60x":
if (
self.update_flash_cfg_do(
chipname,
chiptype,
flash_id,
bh_cfg_file,
False,
"BOOTHEADER_CPU0_CFG",
)
== False
):
error = "flash_id:" + flash_id + " do not support"
return error
else:
error = "Do not find flash_id in eflash_loader_cfg.ini"
return error
if __name__ == "__main__":
abs_path = os.path.abspath("..")
app_path = os.path.join(abs_path, "customer_app", sys.argv[1])
demo_name = sys.argv[1]
chip_name = sys.argv[2].lower()
default_conf_path = chip_name
eflash_loader_cfg_org = bl_find_file("eflash_loader_cfg", ".conf")
eflash_loader_cfg = os.path.join(
app_path, bin_build_out_path, "eflash_loader_cfg.ini"
)
shutil.copy(eflash_loader_cfg_org, eflash_loader_cfg)
# 找到efuse_bootheader_cfg
f_org = bl_find_file("efuse_bootheader_cfg", ".conf")
f = os.path.join(app_path, bin_build_out_path, "efuse_bootheader_cfg.ini")
# if os.path.isfile(f) == False:
shutil.copy(f_org, f)
# 选择flash型号
flash_sele = bl_flash_select()
flashid_list = flash_sele.bl_flash_loader_list(chip_name, chip_name, f)
pt_file_list = bl_find_file_list("partition_cfg_", ".toml")
ro_list = bl_find_file_list(bl_factory_params_file_prefix, ".dts")
img_boot2_file_list = bl_find_file_list("blsp_boot2_", ".bin")
arrange_group_list = list(
itertools.product(pt_file_list, ro_list, img_boot2_file_list, flashid_list)
)
for group in arrange_group_list:
# 找到partition
# pt_file = bl_find_file("partition_cfg_", ".toml")
pt_file = group[0]
pt_name = pt_file.split("partition_cfg_")
pt_name = pt_name[1].split(".toml")
pt_name = "pt{}".format(pt_name[0])
pt_helper = PtCreater(pt_file)
pt_helper.create_pt_table(
os.path.join(app_path, bin_build_out_path, "partition.bin")
)
pt_parcel = pt_helper.construct_table()
# flashid
flash_sele.bl_flash_update(chip_name, chip_name, f, group[3])
flash_id_name = group[3]
# 找到device_tree
# ro = bl_find_file(bl_factory_params_file_prefix, ".dts")
ro = group[1]
xtal = ro.split("IoTKitA_")
xtal = xtal[1].split(".dts")
xtal = xtal[0]
dts_name = "dts{}".format(xtal)
img_gen = bl_whole_img_generate()
img_gen.bl_fw_boot_head_gen(True, xtal, f, False, chip_name, chip_name)
# 找到boot2
# img_boot2_file = bl_find_file("blsp_boot2_", ".bin")
img_boot2_file = group[2]
boot2_name = img_boot2_file.split("blsp_boot2_")
boot2_name = boot2_name[1].split(".bin")
boot2_name = "boot2{}".format(boot2_name[0])
file_finally_name = "{}_{}_{}_{}".format(
dts_name, pt_name, boot2_name, flash_id_name
)
img_gen.bl_image_gen("boot2", img_boot2_file)
img_gen.bl_fw_boot_head_gen(False, xtal, f, False, chip_name, chip_name)
img_gen.bl_image_gen(
"fw", os.path.join(app_path, "build_out", demo_name + ".bin")
)
img_ota = bl_img_ota()
img_ota.bl_mfg_ota_bin_gen(chip_name, chip_name, None)
img_gen.bl_whole_flash_bin_create(True, True, ro, pt_parcel, None, None, "2M")
``` |
{
"source": "9nix00/django-qiniu",
"score": 2
} |
#### File: django-qiniu/django_qiniu/backends.py
```python
from qiniustorage import backends
from django.utils.encoding import filepath_to_uri
from six.moves.urllib_parse import urljoin
from django.conf import settings
class QiniuStorage(backends.QiniuStorage):
def url(self, name):
name = self._normalize_name(self._clean_name(name))
name = filepath_to_uri(name)
expire = 3600 if not hasattr(settings, 'QINIU_PREVIEW_EXPIRE') else settings.QINIU_PREVIEW_EXPIRE
protocol = 'https://' if self.secure_url else 'http://'
url = urljoin(protocol + self.bucket_domain, name)
return self.auth.private_download_url(url, expires=expire)
pass
```
#### File: django-qiniu/django_qiniu/utils.py
```python
from account_helper.middleware import get_current_user_id
from django.utils import timezone
from django.conf import settings
from hashlib import sha1
import os
def user_upload_dir(instance, filename):
name_struct = os.path.splitext(filename)
current_user_id = get_current_user_id()
expire = 3600 if not hasattr(settings, 'QINIU_PREVIEW_EXPIRE') else settings.QINIU_PREVIEW_EXPIRE
return '{4}/{0}/{3}/{1}{2}'.format(current_user_id,
sha1(filename.encode('utf-8')).hexdigest(),
name_struct[-1] if len(name_struct) > 1 else '',
timezone.now().strftime('%Y-%m-%d-%H-%M'),
expire)
``` |
{
"source": "9nix00/fantasy",
"score": 2
} |
#### File: fantasy/fantasy_command/__init__.py
```python
import click
from flask import current_app as app
from flask.cli import with_appcontext
@click.group()
def ff():
"""
Fantasy toolbox.
"""
pass
``` |
{
"source": "9nnnice/python_training",
"score": 3
} |
#### File: python_training/test/test_modify_group.py
```python
from unicodedata import name
from model.group import Group
import random
def test_modify_group(app, db, check_ui):
if app.group.count() == 0:
app.group.create(Group(name="test"))
old_groups = db.get_group_list()
group = random.choice(old_groups)
new_group = Group(id=group.id, name="Patchname")
app.group.modify_group_by_id(new_group.id, new_group)
new_groups = db.get_group_list()
index = old_groups.index(group)
old_groups[index] = new_group
assert sorted(old_groups, key=Group.id_or_max) == sorted(
new_groups, key=Group.id_or_max)
if check_ui:
assert sorted(new_groups, key=Group.id_or_max) == sorted(
app.group.get_group_list(), key=Group.id_or_max)
``` |
{
"source": "9oelM/rdsa",
"score": 4
} |
#### File: rdsa/data-structures/doubly_linked_list.py
```python
from typing import List
class DoublyLinkedListNode:
def __init__(self, x):
self.item = x
self.prev = None
self.next = None
def later_node(self, i):
if i == 0: return self
assert self.next
return self.next.later_node(i - 1)
class DoublyLinkedList:
def __init__(self):
self.head = None
self.tail = None
def __iter__(self):
node = self.head
while node:
yield node.item
node = node.next
def __str__(self):
return '-'.join([('(%s)' % x) for x in self])
def build(self, X: List[DoublyLinkedListNode]) -> None:
for a in X:
self.insert_last(a)
def get_at(self, i: int) -> None:
node = self.head.later_node(i)
return node.item
def set_at(self, i, x) -> None:
node = self.head.later_node(i)
node.item = x
def insert_first(self, x) -> None:
###########################
# Part (a): Implement me! #
###########################
pass
def insert_last(self, x) -> None:
###########################
# Part (a): Implement me! #
###########################
pass
def delete_first(self) -> None:
x = None
###########################
# Part (a): Implement me! #
###########################
return x
def delete_last(self) -> None:
x = None
###########################
# Part (a): Implement me! #
###########################
return x
def remove(self, x1, x2) -> None:
L2 = DoublyLinkedList()
###########################
# Part (b): Implement me! #
###########################
return L2
def splice(self, x, L2) -> None:
###########################
# Part (c): Implement me! #
###########################
pass
```
#### File: rdsa/data-structures/linked_list.py
```python
class Node:
nxt = None
def __init__(self, data):
self.data = data
def __str__(self) -> str:
nxt_notation = 'Node' if self.nxt is not None else 'None'
return f'Node {{ data: {self.data}, nxt: {nxt_notation} }}'
"""
Singly linked list.
"""
class LinkedList:
length = 0
head = None
tail = None
def __init__(self, *args):
self.length = len(args)
current_node = None
for _, data in enumerate(args):
node = Node(data)
if self.head is None:
self.head = node
current_node = node
else:
current_node.nxt = node
current_node = node
self.tail = current_node
def delete(self, index):
"""
Takes O(n) because it needs searching and deleting.
But the deletion itself just takes O(1) time
"""
if index >= self.length or index < 0:
raise ValueError(f'{index} out of range.')
current_node = self.head
previous_node = None
target_index = 0
while target_index != index:
previous_node = current_node
current_node = current_node.nxt
target_index += 1
if previous_node is not None:
previous_node.nxt = current_node.nxt
if current_node.nxt is None:
self.tail = None
else:
self.head = self.head.nxt
self.length -= 1
def last(self):
return self.tail
def first(self):
return self.head
def len(self):
return self.length
def append(self, data):
new_last_elem = Node(data)
self.tail.nxt = new_last_elem
self.tail = new_last_elem
self.length += 1
def insert(self, index, data):
"""
takes O(n) because it needs to traverse through the list and insert
the actual insertion takes O(1)
"""
if index >= self.length or index < 0:
raise ValueError(f'{index} out of range.')
if index == self.length - 1:
self.append(data)
return
new_node = Node(data)
current_node = self.head
previous_node = None
target_index = 0
while target_index != index:
previous_node = current_node
current_node = current_node.nxt
target_index += 1
if previous_node is not None:
previous_node.nxt = new_node
new_node.nxt = current_node
if new_node.nxt is None:
self.tail = new_node
else:
previous_head = self.head
self.head = new_node
new_node.nxt = previous_head
self.length += 1
def at(self, index):
"""
takes O(n) because it needs to traverse through the linked list
"""
if index <= 0 or index >= self.length:
return
current_node = self.head
target_index = 0
while target_index != index:
current_node = current_node.nxt
target_index += 1
return current_node.data
def __str__(self):
current_node = self.head
all_data = ""
while current_node is not None:
pointer_or_empty_space = "" if all_data == "" else "->"
all_data += f'{pointer_or_empty_space}{current_node.data}'
current_node = current_node.nxt
return f'{all_data}'
print("l = LinkedList(1,2,3,4)")
l = LinkedList(1,2,3,4)
print(l.len())
print(l)
print("l.append(5)")
l.append(5)
print("print(l)")
print(l)
print("l.append(6)")
l.append(6)
print(l)
print(l.len())
print("l.insert(0, 222)")
l.insert(0, 222)
print(l)
print(l.len())
print("l.insert(3, 555)")
l.insert(3, 555)
print(l)
print(l.len())
print("l.insert(1, 333)")
l.insert(1, 333)
print(l)
print(l.len())
print("l.insert(l.len() - 1, 99999)")
l.insert(l.len() - 1, 99999)
print(l)
print(l.len())
print("print(l.at(1))")
print(l.at(1))
print("print(l.last())")
print(l.last())
print("print(l.first())")
print(l.first())
print("l.delete(0)")
l.delete(0)
print(l)
print(l.len())
print("l.delete(l.len() - 1)")
l.delete(l.len() - 1)
print(l)
print(l.len())
print("l.delete(3)")
l.delete(3)
print(l)
print(l.len())
```
#### File: rdsa/data-structures/sorts.py
```python
from math import inf
from pprint import pprint
import sys
import types
from typing import List, Type, Union
def find_max_index(arr: List[int], tracking_index: int) -> int:
if tracking_index == 0:
return 0
maybe_max_index = find_max_index(arr, tracking_index - 1)
if arr[tracking_index] < arr[maybe_max_index]:
return maybe_max_index
return tracking_index
def selection_sort_recursive(arr: List[int], i: Union[int, None] = None) -> List[int]:
"""
find the largest number in A[:i + 1] and swap it to A[i]
"""
if i is None:
i = len(arr) - 1
if i == 0:
return arr
max_index = find_max_index(arr, i)
arr[i], arr[max_index] = arr[max_index], arr[i]
return selection_sort_recursive(arr, i - 1)
def selection_sort(arr: List[int]) -> List[int]:
"""
sort i items from the end, and decrease i until 1
"""
# if [9,7,6,4,1], then i would enumerate as 4,3,2,1
for i in range(len(arr) - 1, 0, -1):
# if i = 4, then j would enumerate as 0,1,2,3
largest_j = i
for j in range(i):
if arr[largest_j] < arr[j]:
largest_j = j
arr[largest_j], arr[i] = arr[i], arr[largest_j]
return arr
def insert_last(arr, i):
if i > 0 and arr[i] < arr[i - 1]:
arr[i], arr[i - 1] = arr[i - 1], arr[i]
insert_last(arr, i - 1)
def insertion_sort_recursive(arr: List[int], i = None):
"""
"""
if i is None:
i = len(arr - 1)
if i > 0:
insertion_sort_recursive(arr, i - 1)
insert_last(arr, i)
def insertion_sort(arr: List[int]) -> List[int]:
"""
sort i items from the beginning, and increase i until len(arr)
"""
for i in range(1, len(arr)):
j = i
# swap all until arr[j] < arr[j - 1]
while j > 0 and arr[j] < arr[j - 1]:
arr[j - 1], arr[j] = arr[j], arr[j - 1]
j = j - 1
return arr
def merge(arr0: List[int], arr1: List[int]) -> List[int]:
i, j = len(arr0) - 1, len(arr1) - 1
current_insertion_index = len(arr0) + len(arr1) - 1
merged_sorted_list = [None] * (len(arr0) + len(arr1))
while (i >= 0 or j >= 0) and current_insertion_index >= 0:
# -1 means the list has run out of elements already
if (arr0[i] > arr1[j] and i != -1) or j == -1:
merged_sorted_list[current_insertion_index] = arr0[i]
i -= 1
elif j != -1 or i == -1:
merged_sorted_list[current_insertion_index] = arr1[j]
j -= 1
current_insertion_index -= 1
return merged_sorted_list
# left, right: all inclusive
def merge_sort(arr: List[int], left: int, right: int) -> List[int]:
# if 4 elements, then (0 + 3 + 1) // 2 = 2
# if 5, (0 + 4 + 1) // 2 = 2
middle = (left + right) // 2
if left == right and middle == right:
return [arr[middle]]
arr0 = merge_sort(arr, left, middle)
arr1 = merge_sort(arr, middle + 1, right)
return merge(arr0, arr1)
def merge_sort_improved(arr: List[int], left = 0, right = None):
if right is None:
right = len(arr)
if 1 < right - left:
center = (left + right + 1) // 2
merge_sort_improved(arr, left, center)
merge_sort_improved(arr, center, right)
left_array, right_array = arr[left:center], arr[center:right]
i, j = 0, 0
while left < right:
if (j > len(right_array)) or (i < len(left_array) and left_array[i] < right_array[j]):
arr[left] = left_array[i]
i += 1
else:
arr[left] = right_array[j]
j += 1
left += 1
def direct_access_array_sort(arr: List[int]) -> List[int]:
"""
Note: only small numbers
"""
daa = [None] * (max(arr) + 1)
for _, elem in enumerate(arr):
daa[elem] = elem
return [x for x in daa if x is not None]
def tuple_sort(arr: List[int]):
pass
def counting_sort(arr: List[int]) -> List[int]:
new_arr = [[] for _ in range(max(arr) + 1)]
for _, elem in enumerate(arr):
new_arr[elem].append(elem)
sorted_array = []
for chain in new_arr:
for elem in chain:
sorted_array.append(elem)
return sorted_array
class Box:
key = None
item = None
digits = None
def __str__(self) -> str:
return f"Box{{digits: {self.digits} item: {self.item}}}"
def counting_sort_box(arr: List[Type[Box]]) -> List[Type[Box]]:
new_arr = [[] for _ in range(max([x.key for x in arr]) + 1)]
for _, elem in enumerate(arr):
new_arr[elem.key].append(elem)
i = 0
for chain in new_arr:
for elem in chain:
arr[i] = elem
i += 1
return arr
def radix_sort(arr: List[int]):
# the length of the original array
n = len(arr)
max_element_value = 1 + max(arr)
# c = length of a digit tuple
# dividing bit length of max_element_value by the length of the original array and adding one
# gives the number of digits that can represent the numbers in the original array in base n
# problem: it will give a number minus one the desired number for some inputs like [112,134,1245,61,63,919,41,9], leading to incorrect calculations
c = 1 + (max_element_value.bit_length() // n.bit_length())
print(f"{c} = 1 + ({max_element_value.bit_length()} // {n.bit_length()})")
D = [Box() for a in arr]
# prepare digits to be sorted, in a particular base
for i in range(n):
D[i].digits = []
D[i].item = arr[i] # to be accessed for later
high = arr[i]
for j in range(c):
# x // y, x % y = divmod(x, y)
high, low = divmod(high, n)
D[i].digits.append(low)
for d in D:
print(d)
# sort from least significant to most significant digit
for i in range(c):
for j in range(n):
D[j].key = D[j].digits[i]
counting_sort_box(D)
# print(f"{i}th counting sort")
# for d in D:
# print(d)
# output item in the original array
for i in range(n):
arr[i] = D[i].item
return arr
```
#### File: rdsa/data-structures/test_doubly_linked_list.py
```python
import unittest
from doubly_linked_list import DoublyLinkedList
# Change to True to visualize output
verbose = False
tests = (
(
[('insert_last', 3), ('insert_first', 2), ('insert_last', 8), ('insert_first', 2), ('insert_last', 9), ('insert_first', 7), ('delete_last',), ('delete_last',), ('delete_first',), ('splice/remove', 1, 2), ('splice/remove', 1, 2), ('splice/remove', 1, 2), ('splice/remove', 1, 2), ('splice/remove', 1, 2)],
[9, 8, 7, (2, 2, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 3)],
),
(
[('insert_first', 11), ('insert_first', 7), ('insert_first', 11), ('insert_last', 10), ('insert_first', 18), ('insert_first', 9), ('insert_last', 5), ('insert_first', 25), ('insert_first', 11), ('insert_first', 12), ('delete_first',), ('delete_first',), ('delete_last',), ('delete_last',), ('delete_first',), ('splice/remove', 2, 2), ('splice/remove', 3, 2), ('splice/remove', 1, 2), ('splice/remove', 1, 2), ('splice/remove', 1, 2)],
[12, 11, 5, 10, 25, (9, 18, 0, 1, 0, 1, 0, 1, 11, 0, 0, 1, 1, 7, 11)],
),
(
[('insert_first', 39), ('insert_first', 59), ('insert_last', 59), ('insert_first', 52), ('insert_first', 21), ('insert_last', 53), ('insert_first', 61), ('insert_first', 58), ('insert_last', 49), ('insert_last', 30), ('insert_last', 19), ('insert_first', 25), ('insert_first', 59), ('insert_last', 33), ('insert_first', 33), ('insert_last', 42), ('delete_last',), ('delete_last',), ('delete_first',), ('delete_last',), ('delete_first',), ('delete_first',), ('delete_last',), ('delete_last',), ('splice/remove', 2, 4), ('splice/remove', 5, 4), ('splice/remove', 6, 4), ('splice/remove', 1, 4), ('splice/remove', 6, 4)],
[42, 33, 33, 19, 59, 25, 30, 49, (58, 61, 0, 1, 2, 3, 21, 0, 1, 2, 3, 0, 1, 2, 0, 0, 1, 2, 3, 1, 2, 3, 3, 52, 59, 39, 59, 53)],
),
(
[('insert_first', 64), ('insert_last', 45), ('insert_last', 10), ('insert_first', 70), ('insert_last', 25), ('insert_first', 48), ('insert_first', 26), ('insert_last', 27), ('insert_last', 96), ('insert_last', 90), ('insert_last', 64), ('insert_last', 8), ('insert_first', 65), ('insert_first', 34), ('insert_last', 20), ('insert_last', 31), ('insert_last', 84), ('insert_last', 76), ('insert_last', 73), ('insert_last', 39), ('delete_first',), ('delete_last',), ('delete_last',), ('delete_first',), ('delete_last',), ('delete_first',), ('delete_last',), ('delete_first',), ('delete_first',), ('delete_last',), ('splice/remove', 4, 5), ('splice/remove', 2, 5), ('splice/remove', 7, 5), ('splice/remove', 7, 5), ('splice/remove', 4, 5)],
[34, 39, 73, 65, 76, 26, 84, 48, 70, 31, (64, 45, 10, 0, 1, 0, 1, 2, 3, 4, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 25, 27, 0, 1, 2, 3, 4, 96, 90, 64, 8, 20)],
),
(
[('insert_last', 105), ('insert_first', 105), ('insert_first', 142), ('insert_last', 130), ('insert_last', 83), ('insert_last', 75), ('insert_last', 78), ('insert_last', 83), ('insert_last', 82), ('insert_first', 49), ('insert_first', 117), ('insert_last', 75), ('insert_last', 122), ('insert_first', 99), ('insert_first', 14), ('insert_last', 6), ('insert_first', 17), ('insert_last', 103), ('insert_last', 101), ('insert_last', 142), ('insert_last', 62), ('insert_last', 85), ('insert_first', 47), ('insert_last', 82), ('delete_first',), ('delete_first',), ('delete_last',), ('delete_last',), ('delete_first',), ('delete_first',), ('delete_last',), ('delete_last',), ('delete_first',), ('delete_first',), ('delete_last',), ('delete_first',), ('splice/remove', 6, 6), ('splice/remove', 9, 6), ('splice/remove', 10, 6), ('splice/remove', 8, 6), ('splice/remove', 2, 6)],
[47, 17, 82, 85, 14, 99, 62, 142, 117, 49, 101, 142, (105, 105, 130, 0, 1, 2, 3, 4, 5, 83, 75, 78, 83, 0, 1, 0, 1, 2, 3, 4, 5, 2, 0, 0, 1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 3, 4, 5, 82, 75, 122, 6, 103)],
),
)
def run_test(ops):
DS = DoublyLinkedList()
ans = []
if verbose:
print(DS)
for op in ops:
if verbose:
print(*op)
if op[0] == "insert_first":
x = op[1]
DS.insert_first(x)
if op[0] == "insert_last":
x = op[1]
DS.insert_last(x)
if op[0] == "delete_first":
ans.append(DS.delete_first())
if op[0] == "delete_last":
ans.append(DS.delete_last())
if (op[0] == "splice/remove") and DS.head:
i, n = op[1], op[2]
L = DoublyLinkedList()
L.build(range(n))
if verbose:
print('L: ', L)
x1 = DS.head.later_node(i)
x2 = x1.next
DS.splice(x1, L)
assert x2 != None
for _ in range(n):
L = DS.remove(x1.next, x2.prev)
x2 = x1.next
DS.splice(x1, L)
if verbose:
print(DS)
ans.append(tuple([x for x in DS]))
return ans
def check(test):
ops, staff_sol = test
student_sol = run_test(ops)
n1 = len(staff_sol)
n2 = len(student_sol)
if n1 != n2: return False
for i in range(n1):
if staff_sol[i] != student_sol[i]: return False
return True
class TestCases(unittest.TestCase):
def test_01(self): self.assertTrue(check(tests[ 0]))
def test_02(self): self.assertTrue(check(tests[ 1]))
def test_03(self): self.assertTrue(check(tests[ 2]))
def test_04(self): self.assertTrue(check(tests[ 3]))
def test_05(self): self.assertTrue(check(tests[ 4]))
if __name__ == '__main__':
res = unittest.main(verbosity = 3, exit = False)
```
#### File: leetcode.com/same-tree/same-tree.py
```python
from turtle import left
from typing import List, Optional
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def traverse(self, binary_tree: List[int], node: TreeNode) -> List[int]:
binary_tree.append(node.val)
if node.left:
self.traverse(binary_tree, node.left)
else:
binary_tree.append(None)
if node.right:
self.traverse(binary_tree, node.right)
else:
binary_tree.append(None)
def isSameTree(self, p: Optional[TreeNode], q: Optional[TreeNode]) -> bool:
binary_trees = []
for _, root_node in enumerate([p, q]):
current_node: Optional[TreeNode] = root_node
binary_tree = []
if not root_node:
binary_trees.append(binary_tree)
continue
self.traverse(binary_tree, current_node)
binary_trees.append(binary_tree)
return binary_trees[0] == binary_trees[1] # O(len(binary_tree))
class Solution:
def isSameTree(self, p, q):
if p and q:
return p.val == q.val and self.isSameTree(p.left, q.left) and self.isSameTree(p.right, q.right)
return p is q
```
#### File: leetcode.com/search-insert-position/search-insert-position.py
```python
from typing import List
class Solution:
def binary_search(self, nums, target, left, right) -> int:
if left >= right:
return left
m = (left + right) // 2
if nums[m] > target:
return self.binary_search(nums, target, left, m)
elif nums[m] < target:
return self.binary_search(nums, target, m + 1, right)
else:
return m
def searchInsert(self, nums: List[int], target: int) -> int:
return self.binary_search(nums, target, 0, len(nums))
```
#### File: problems/programmers.co.kr/best-set.py
```python
from math import ceil
def solution(n: int, s: int) -> int:
if n > s:
return [-1]
first = ceil(s / n)
ans = [first for _ in range(n)]
idx, total = 0, sum(ans)
while total != s:
total -= 1
ans[idx] -= 1
if idx + 1 == n: # last
idx = 0
else:
idx += 1
return ans
solution(3,9)
solution(4,14)
solution(4,15)
solution(4,17)
solution(16,17)
```
#### File: problems/programmers.co.kr/convert-word.py
```python
def isDiffByOneLetter(s1 : str, s2 : str) -> bool:
count = 0
for idx, letter1 in enumerate(s1):
if s2[idx] != letter1:
count += 1
if count >= 2:
return False
if count == 0:
return False
return True
def solution(begin, target, words):
if target not in words:
return 0
queue = [begin]
visited = [begin]
prevQueueLen = 1
count = 0
while queue:
nxt = []
for _ in range(prevQueueLen):
nxt.append(queue.pop(0))
visited += nxt
oneLetterDiffWords = []
for nxtStr in nxt:
oneLetterDiffWords += list(filter(lambda s2 : isDiffByOneLetter(nxtStr, s2) and s2 not in visited, words))
prevQueueLen = len(oneLetterDiffWords)
count += 1
if target in oneLetterDiffWords:
break
queue += oneLetterDiffWords
return count
```
#### File: problems/programmers.co.kr/delivery(WIP).py
```python
from typing import List
def solution(N: int, road: List[List[int]], k: int) -> int:
graph = {}
distanceMap = [[-1 for i in range(N + 1)] for j in range(N + 1)]
for edge in road:
if distanceMap[edge[0]][edge[1]] == -1:
distanceMap[edge[0]][edge[1]] = edge[2]
else:
distanceMap[edge[0]][edge[1]] = min(distanceMap[edge[0]][edge[1]], edge[2])
if distanceMap[edge[1]][edge[0]] == -1:
distanceMap[edge[1]][edge[0]] = edge[2]
else:
distanceMap[edge[1]][edge[0]] = min(distanceMap[edge[1]][edge[0]], edge[2])
if edge[0] in graph:
graph[edge[0]].append(edge[1])
else:
graph[edge[0]] = [edge[1]]
if edge[1] in graph:
graph[edge[1]].append(edge[0])
else:
graph[edge[1]] = [edge[0]]
visited = {}
q = [(1, 0)]
while q:
nxt, nxtDist = q.pop(0)
for node in graph[nxt]:
if nxtDist + distanceMap[nxt][node] <= k:
sumDist = nxtDist + distanceMap[nxt][node]
if (node not in visited) or (node in visited and visited[
node] > sumDist): # 2nd condition: if the node is already visited but the distance from node 1 is shorter
visited[node] = sumDist
q.append((node, sumDist))
return len(visited)
print(solution(5, [[1, 2, 1], [2, 3, 3], [5, 2, 2], [1, 4, 2], [5, 3, 1], [5, 4, 2]], 3))
print(solution(6, [[1, 2, 1], [1, 3, 2], [2, 3, 2], [3, 4, 3], [3, 5, 2], [3, 5, 3], [5, 6, 1]], 4))
```
#### File: problems/programmers.co.kr/lineup-methods.py
```python
from typing import Callable, Union, List
def fac(n: int) -> int:
if n == 1:
return 1
return n * fac(n - 1)
def recur(combinations: int, n: int, k: int, ans: List[int], people: List[int]):
eachPartLen = combinations / n
for i in range(1, n + 1):
if i * eachPartLen >= k:
if i != 0: # if i == 0: k = k
k = k - (i - 1) * eachPartLen
if k == 0: # last element
k = eachPartLen
# find ith number not yet used
count = 0
for idx in range(1, len(people)):
if not people[idx]:
count += 1
if i == count:
people[idx] = True
ans.append(idx)
break
break
if combinations == 1:
return ans
else:
return recur(combinations / n, n - 1, k, ans, people)
def solution(n: int, k: int) -> int:
combinations = fac(n)
eachPartLen = combinations / n
people = [False for i in range(0, n+1)] # leave idx 0 empty
ans = []
for i in range(1, n + 1):
if i * eachPartLen >= k:
if i != 0: # if i == 0: k = k
k = k - (i - 1) * eachPartLen
if k == 0: # last element
k = eachPartLen
people[i] = True
ans.append(i)
break
return recur(combinations / n, n - 1, k, ans, people)
solution(3, 5)
solution(4, 5)
solution(4, 19)
```
#### File: problems/programmers.co.kr/ranking.py
```python
def solution(n, results):
answer = 0
return answer
```
#### File: problems/programmers.co.kr/target-number(not-working).py
```python
def dfs(total : int, numbers : int, target : int):
if numbers:
num = numbers.pop(0) # this part does not work properly. Have to use array concatenation
return dfs(-num+total, numbers, target) + dfs(num+total, numbers, target)
else:
return 1 if total == target else 0
```
#### File: problems/programmers.co.kr/word-break.py
```python
from typing import List
def wordBreak(s : str, words: List[str]) -> bool:
ok = [True]
max_len = max(map(len,words+[''])) # addding [''] just in case?
print(max_len) # the longest word's length
words = set(words) # sift out duplicates
for i in range(1, len(s)+1): # for each letter in string
# add to array "ok"
ok += any(ok[j] and s[j:i] in words for j in range(max(0, i-max_len),i)),
return ok[-1]
if __name__ == "__main__":
print(wordBreak("applepenapple", ["apple", "pen"]))
print(wordBreak("leetcode", ["leet", "code"]))
print(wordBreak("catsandog", ["cats", "dog", "sand", "and", "cat"]))
``` |
{
"source": "9OMShitikov/anytask",
"score": 2
} |
#### File: app/easyCI/tasksPool.py
```python
import json
import requests
import tempfile
import shutil
import subprocess
import os
import logging
import urllib.request
from multiprocessing import Pool
import app.easyCI.docker as docker
from contextlib import contextmanager
LOG = logging.getLogger(__name__)
CONFIG = "config.json"
PASSWORDS = "<PASSWORD>"
MAX_COMMENT_SIZE = 10000
PROCS = 1
REQUEST_TIMEOUT = 300
class QueueTask(object):
host = None
auth = None
config = None
id = None
course = None
task = None
issue = None
event = None
files = None
def __repr__(self):
return repr(self.__dict__)
@contextmanager
def tmp_dir():
t = tempfile.mkdtemp(dir="/var/tmp")
try:
yield t
finally:
shutil.rmtree(t)
def git_clone(repo, dst_dir):
cmd = ["git", "clone", repo, dst_dir]
LOG.info("RUN: %s", cmd)
subprocess.check_call(cmd)
def prepare_dir(qtask, dirname):
git_dir = os.path.join(dirname, "git")
task_dir = os.path.join(dirname, "task")
git_clone(qtask.course["repo"], git_dir)
os.mkdir(task_dir)
for url in qtask.files:
filename = url.split('/')[-1]
dst_path = os.path.join(task_dir, filename)
LOG.info("Download '%s' -> '%s'", url, dst_path)
print(url, dst_path)
urllib.request.urlretrieve(url, dst_path)
def process_task(qtask):
LOG.info("Proccess task %s", qtask.id)
with tmp_dir() as dirname:
prepare_dir(qtask, dirname)
run_cmd = qtask.course["run_cmd"] + [qtask.task, "/task_dir/task"]
#run_cmd = ["ls", "/task_dir/task"]
ret = docker.execute(run_cmd, cwd="/task_dir/git", timeout=qtask.course["timeout"], user='root',
network='bridge', image=qtask.course["docker_image"],
volumes=["{}:/task_dir:ro".format(os.path.abspath(dirname))])
status, retcode, is_timeout, output = ret
LOG.info("Task %d done, status:%s, retcode:%d, is_timeout:%d",
qtask.id, status, retcode, is_timeout)
LOG.info(" == Task %d output start", qtask.id)
for line in output.split("\n"):
LOG.info(line)
LOG.info(" == Task %d output end", qtask.id)
if len(output) > MAX_COMMENT_SIZE:
output = output[:MAX_COMMENT_SIZE]
output += u"\n...\nTRUNCATED"
if is_timeout:
output += u"\nTIMEOUT ({} sec)".format(qtask.course["timeout"])
comment = u"[id:{}] Check DONE!<br>\nSubmited on {}<br>\n<pre>{}</pre>\n".format(qtask.id,
qtask.event_timestamp,
output)
LOG.info("{}/api/v1/issue/{}/add_comment".format(qtask.host, qtask.issue_id))
response = requests.post("{}/api/v1/issue/{}/add_comment".format(qtask.host, qtask.issue_id),
auth=qtask.auth, data={"comment":comment.encode("utf-8")}, timeout=REQUEST_TIMEOUT)
response.raise_for_status()
LOG.info(" == Task %d DONE!, URL: %s/issue/%d", qtask.id, qtask.host, qtask.issue_id)
return qtask
def load_passwords(filename=PASSWORDS):
with open(filename) as config_fn:
return json.load(config_fn)
def load_config(filename=CONFIG):
with open(filename) as config_fn:
config_arr = json.load(config_fn)
config_dict = {}
for course in config_arr:
config_dict[course["course_id"]] = course
return config_dict
def get_auth(passwords, host):
host_auth = passwords[host]
return (host_auth["username"], host_auth["password"])
config = load_config()
passwords = load_passwords()
pool = Pool(processes=PROCS)
def put_to_pool(task):
course_id = task["course_id"]
course = config[course_id]
auth = get_auth(passwords, course["host"])
files = task["files"]
qtask = QueueTask()
qtask.host = course["host"]
qtask.auth = auth
qtask.course = course
qtask.task = task["title"]
qtask.issue_id = task["issue_id"]
qtask.files = files
qtask.id = task["event"]["id"]
qtask.event_timestamp = task["event"]["timestamp"]
print(qtask)
pool.apply_async(process_task, args=(qtask,))
``` |
{
"source": "9p4/comic-ocr",
"score": 3
} |
#### File: 9p4/comic-ocr/smbcgather.py
```python
import os
import json
BASEDIR = "output/smbc-comics.com/comic/"
def main():
comics = os.listdir(BASEDIR)
data = []
for comic in comics:
if not os.path.exists(BASEDIR+comic+"/completed"):
continue
with open(BASEDIR+comic+"/metadata.json", "r") as metadata_file:
data.append(json.loads(metadata_file.read()))
with open("data.js", "w") as file:
file.write("const list=")
file.write(json.dumps(data))
file.write(";")
if __name__ == "__main__":
main()
``` |
{
"source": "9qW/Tritan-Bot-Discord",
"score": 2
} |
#### File: Tritan-Bot-Discord/slash/slash.py
```python
import os
import discord
import random
import asyncio
from discord_slash import SlashCommand
from discord.ext import commands
from discord_slash.utils import manage_commands
from config import *
## Intents
intents = discord.Intents.default()
intents.members = True
## Define the bot class
class OwOMyBot(commands.Bot):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
## Client Options
client = OwOMyBot(command_prefix="*", case_insensitive = True, intents = intents)
slash = SlashCommand(client, sync_commands=True)
client.remove_command('help')
## On Ready
async def on_ready(self):
print(f"Logged in as {client.user}")
owohello = client.get_channel(CONTROL_CHANNEL)
embed=discord.Embed(title="🟢 The slash client has connected.", color=0x7289DA)
await owohello.send(embed=embed)
## Cogs
for filename in os.listdir('./cogs'):
if filename.endswith('.py'):
client.load_extension(f'cogs.{filename[:-3]}')
print(f'Loaded {filename[:-3]}')
## Cog Loading
@client.command(aliases=['lo'])
async def load(ctx, extension):
if ctx.author.id in DEVS:
client.load_extension(f'cogs.{extension}')
await ctx.message.reply(f"Loaded {extension}")
## Cog Unloading
@client.command(aliases=['unlo'])
async def unload(ctx, extension):
if ctx.author.id in DEVS:
client.unload_extension(f'cogs.{extension}')
await ctx.message.reply(f"Unloaded {extension}")
## Cog Reloading
@client.command(aliases=['relo'])
async def reload(ctx, extension):
if ctx.author.id in DEVS:
client.unload_extension(f'cogs.{extension}')
client.load_extension(f'cogs.{extension}')
await ctx.message.reply(f"Reloaded {extension}")
# - Slashy Lashy - #
@slash.slash(name="dev", description="This is a test command.")
async def _dev(ctx):
if ctx.author.has_role(792593736076099634):
await ctx.send("Poggers :white_check_mark:")
else:
await ctx.send("You no use this :(")
## Slash Commands: Ping
@slash.slash(name="ping", description='What else would this be for ;-;')
async def _ping(ctx):
'''Tf else do you think this is for?'''
embed=discord.Embed(title = "Ping", description = "Ponged back the ping in {0:.2f}ms!".format(client.latency * 1000), color=0x7289DA)
embed.set_author(name = client.user, icon_url = "https://cdn.tritan.gg/tritan-bot/logo.webp")
embed.set_thumbnail(url=ctx.author.avatar_url)
embed.set_footer(text = "Requested by: {}".format(ctx.author), icon_url = ctx.author.avatar_url_as(size=128))
await ctx.send(embed=embed)
## Slash Commands: Lock Channel
#@slash.slash(name="Lock", description="Locks a channel", options=[manage_commands.create_option("channel", "The channel you want to lock.", SlashCommandOptionType.CHANNEL, True), manage_commands.create_option("reason", "the reason for locking the channel", SlashCommandOptionType.STRING, True)])
#async def _lock(ctx, channel, reason):
# if ctx.author.guild_permissions.administrator == True:
# await ctx.channel.set_permissions(
# ctx.guild.default_role, send_messages=False)
# embed = discord.Embed(title=":warning: Channel has been locked!",description="Moderation action", color=0x7289DA)
# embed.add_field(
# name=(f"{ctx.author} has locked this channel!"),
# value=(f"{reason}"))
# await ctx.send(embeds=[embed])
# else:
# await ctx.send("You aren't an administrator!")
## Slash Command: Add Role
<EMAIL>(name="addrole", description="Gives a user a role,", options=[manage_commands.create_option("user", "the user you want to assign the role to.", SlashCommandOptionType.USER, True), manage_commands.create_option("role", "the role you're giving.", SlashCommandOptionType.ROLE, True)])
#async def _addrole(ctx, user, role):
# if ctx.author.guild_permissions.administrator == True:
# await user.add_roles(role)
# embed=discord.Embed(title=":gift: Role given", description=f"{role} has been given to {user}", color=0x7289DA)
# await ctx.send(embeds=[embed])
# else:
# await ctx.send("You aren't an administrator!")
## Slash Commands: Remove Role
<EMAIL>(name="removerole", description="Removes a role from a user.", options=[manage_commands.create_option("user", "the user you're removing the role from", SlashCommandOptionType.USER, True), manage_commands.create_option("role", "the role you want to remove.", SlashCommandOptionType.ROLE, True)])
#async def _removerole(ctx, user, role):
# if ctx.author.guild_permissions.administrator == True:
# await user.remove_roles(role)
# embed=discord.Embed(title=":white_check_mark: Role removed", description="{role} has been removed from {user}", color=0x7289DA)
# await ctx.send(embeds=[embed])
# else:
# await ctx.send("You aren't an administrator!")
## Slash Commands: Unlock Channel
<EMAIL>(name="unlock", description="unlocks a channel", options=[manage_commands.create_option("channel", "The channel you want to lock.", SlashCommandOptionType.CHANNEL, True), manage_commands.create_option("reason", "the reason for unlocking the channel", SlashCommandOptionType.STRING, True)])
#async def _unlock(ctx, channel, reason):
# if ctx.author.guild_permissions.administrator == True:
# await ctx.channel.set_permissions(
# ctx.guild.default_role, send_messages=True)
# embed = discord.Embed(title=":white_check_mark: Channel has been unlocked!",description="Moderation action", color=0x7289DA)
# embed.add_field(
# name=(f"{ctx.author} has unlocked this channel!"),
# value=(f"{reason}"))
# await ctx.send(embeds=[embed])
# else:
# await ctx.send("You aren't an administrator!")
## Slash Commands: Announce to Channel
#@<EMAIL>(name="announce", description="Makes an announcement.", options=[manage_commands.create_option("channel", "The channel you want to send the announcement to.", SlashCommandOptionType.CHANNEL, True), manage_commands.create_option("message", "your message", SlashCommandOptionType.STRING, True)])
#async def _announce(ctx, channel, message):
# if ctx.author.guild_permissions.administrator == True:
# await channel.send(f"{message}")
# else:
# await ctx.send("You aren't an administrator!")
## Slash Commands: Guild Permissions
#@slash.<EMAIL>(name='guildperms')
#async def _guildperms(self, ctx, *, member: discord.Member=None):
# """A simple command which checks a members Guild Permissions.
# If member is not provided, the author will be checked."""
# if not member:
# member = ctx.author
# perms = '\n'.join(perm for perm, value in member.guild_permissions if value)
# embed = discord.Embed(title='Permissions for:', description=ctx.guild.name, colour=member.colour, color=0x7289DA)
# embed.set_author(icon_url=member.avatar_url, name=str(member))
# embed.add_field(name='\uFEFF', value=perms)
# await ctx.send(content=None, embed=embed)
## Slash Commands: Owoify
@slash.slash(name = "owoify", description = "owoifies your text ><", options = [manage_commands.create_option(
name = "text",
description = "text",
option_type = 3,
required = True
)])
async def _owowo(ctx, msg: str):
'''owo'''
vowels = ['a', 'e', 'i', 'o', 'u', 'A', 'E', 'I', 'O', 'U']
def last_replace(s, old, new):
li = s.rsplit(old, 1)
return new.join(li)
def text_to_owo(text):
""" Converts your text to OwO """
smileys = [';;w;;', '^w^', '>w<', 'UwU', '(・`ω\´・)', '(´・ω・\`)']
text = text.replace('L', 'W').replace('l', 'w')
text = text.replace('R', 'W').replace('r', 'w')
text = last_replace(text, '!', '! {}'.format(random.choice(smileys)))
text = last_replace(text, '?', '? owo')
text = last_replace(text, '.', '. {}'.format(random.choice(smileys)))
for v in vowels:
if 'n{}'.format(v) in text:
text = text.replace('n{}'.format(v), 'ny{}'.format(v))
if 'N{}'.format(v) in text:
text = text.replace('N{}'.format(v), 'N{}{}'.format('Y' if v.isupper() else 'y', v))
return text
await ctx.send(text_to_owo(msg))
## Client Login
client.run(TOKEN)
``` |
{
"source": "9ran1te/gitbot",
"score": 3
} |
#### File: 9ran1te/gitbot/main.py
```python
import os
import json
def filepath(filename):
return '{}/{}'.format(
os.path.dirname(os.path.realpath(__file__)),
filename
)
def load_config():
try:
with open(filepath('config.json')) as f:
config = json.load(f)
except:
config = []
finally:
return config
def main():
pass
if __name__ == "__main__":
main()
``` |
{
"source": "9seconds/concierge-mako",
"score": 3
} |
#### File: concierge-mako/tests/test_plugin.py
```python
import concierge_mako.templater
TEMPLATE = """\
lalala
% for i in range(2):
i - ${i}
% endfor
"""
RESULT = """\
lalala
i - 0
i - 1
"""
def test_name():
assert concierge_mako.templater.MakoTemplater.name == "mako"
def test_render():
tpl = concierge_mako.templater.MakoTemplater()
assert tpl.render(TEMPLATE) == RESULT
``` |
{
"source": "9seconds/curly",
"score": 3
} |
#### File: curly/curly/exceptions.py
```python
class CurlyError(ValueError):
"""Main exception raised from Curly."""
def __init__(self, message, *args, **kwargs):
super().__init__(message.format(*args, **kwargs))
class CurlyEvaluateError(CurlyError):
"""Expression evaluation error."""
class CurlyLexerError(CurlyError):
"""Errors on lexing phase."""
class CurlyParserError(CurlyError):
"""Errors on parsing phase."""
class CurlyLexerStringDoesNotMatchError(CurlyLexerError):
"""Exception raised if given string does not match regular expression."""
def __init__(self, text, pattern):
super().__init__("String {0!r} is not valid pattern {1!r}",
text, pattern.pattern)
class CurlyEvaluateNoKeyError(CurlyEvaluateError):
"""Exception raised if context has no required key."""
def __init__(self, context, key):
super().__init__("Context {0!r} has no key {1!r}", context, key)
class CurlyParserUnknownTokenError(CurlyParserError):
"""Exception raised on unknown token type."""
def __init__(self, token):
super().__init__("Unknown token {0!s}".format(token))
class CurlyParserUnknownStartBlockError(CurlyParserError):
"""Exception raised if function of start block is unknown."""
def __init__(self, token):
super().__init__("Unknown block tag {0} for token {1!s}",
token.contents["function"], token)
class CurlyParserUnknownEndBlockError(CurlyParserError):
"""Exception raised if function of end block is unknown."""
def __init__(self, token):
super().__init__("Unknown block tag {0} for token {1!s}",
token.contents["function"], token)
class CurlyParserFoundNotDoneError(CurlyParserError):
"""Exception raised if some node is not done."""
def __init__(self, node):
super().__init__("Cannot find enclosement statement for {0!s}",
node.token)
class CurlyParserNoUnfinishedNodeError(CurlyParserError):
"""Exception raised if searching for not finished node is failed."""
def __init__(self):
super().__init__("Cannot find not finished node.")
class CurlyParserUnexpectedUnfinishedNodeError(CurlyParserError):
"""Exception raised if we found unfinished node which is not expected."""
def __init__(self, search_for, node):
super().__init__("Excepted to find {0} node but found {1!s} instead",
search_for, node)
``` |
{
"source": "9seconds/hrss",
"score": 2
} |
#### File: concierge/endpoints/common.py
```python
import abc
import os
import warnings
import concierge.core.processor
import concierge.endpoints.cli
import concierge.endpoints.templates
import concierge.notifications
import concierge.templater
import concierge.utils
LOG = concierge.utils.logger(__name__)
class App(metaclass=abc.ABCMeta):
@classmethod
def specify_parser(cls, parser):
return parser
def __init__(self, options):
if options.use_templater is None:
warnings.warn(
"--use-templater flag and therefore implicit templater "
"autoresolve are deprecated. Please use explicit "
"templater in both concierge-check and concierge.",
FutureWarning)
if options.no_templater:
warnings.warn(
"Flag --no-templater is deprecated. "
"Please use 'dummy' templater instead.",
DeprecationWarning)
self.source_path = options.source_path
self.destination_path = options.destination_path
self.boring_syntax = options.boring_syntax
self.add_header = options.add_header
self.no_templater = getattr(options, "no_templater", False)
self.templater_name = options.use_templater
if options.no_desktop_notifications:
self.notificator = concierge.notifications.dummy_notifier
else:
self.notificator = concierge.notifications.notifier
try:
self.templater = concierge.templater.resolve_templater(
self.templater_name)
except KeyError:
raise ValueError(
"Cannot find templater for {0}".format(options.use_templater))
if self.add_header is None:
self.add_header = options.destination_path is not None
concierge.utils.configure_logging(
options.debug,
options.verbose,
self.destination_path is None)
@abc.abstractmethod
def do(self):
pass
def output(self):
content = self.get_new_config()
if self.destination_path is None:
print(content)
return
try:
with concierge.utils.topen(self.destination_path, True) as destfp:
destfp.write(content)
except Exception as exc:
self.log_error("Cannot write to file %s: %s",
self.destination_path, exc)
raise
def get_new_config(self):
content = self.fetch_content()
if not self.no_templater:
content = self.apply_template(content)
else:
LOG.info("No templating is used.")
if not self.boring_syntax:
content = self.process_syntax(content)
else:
LOG.info("Boring syntax was choosen, not processing is applied.")
if self.add_header:
content = self.attach_header(content)
else:
LOG.info("No need to attach header.")
return content
def fetch_content(self):
LOG.info("Fetching content from %s", self.source_path)
try:
content = concierge.utils.get_content(self.source_path)
except Exception as exc:
self.log_error("Cannot fetch content from %s: %s",
self.source_path, exc)
raise
LOG.info("Original content of %s:\n%s", self.source_path, content)
return content
def apply_template(self, content):
LOG.info("Applying templater to content of %s.", self.source_path)
try:
content = self.templater.render(content)
except Exception as exc:
self.log_error("Cannot process template (%s) in source file %s.",
self.source_path, self.templater.name, exc)
raise
LOG.info("Templated content of %s:\n%s", self.source_path, content)
return content
def process_syntax(self, content):
try:
return concierge.core.processor.process(content)
except Exception as exc:
self.log_error("Cannot parse content of source file %s: %s",
self.source_path, exc)
raise
def attach_header(self, content):
header = concierge.endpoints.templates.make_header(
rc_file=self.source_path)
content = header + content
return content
def log_error(self, template, *args):
LOG.error(template, *args)
self.notificator(template % args)
def main(app_class):
def main_func():
parser = concierge.endpoints.cli.create_parser()
parser = app_class.specify_parser(parser)
options = parser.parse_args()
app = app_class(options)
LOG.debug("Options: %s", options)
try:
return app.do()
except KeyboardInterrupt:
pass
except Exception as exc:
LOG.exception("Failed with error %s", exc)
return os.EX_SOFTWARE
return main_func
```
#### File: hrss/concierge/utils.py
```python
import copy
import logging
import logging.config
import logging.handlers
import sys
LOG_NAMESPACE = "concierge"
def get_syslog_address():
if sys.platform.startswith("linux"):
return "/dev/log"
elif sys.platform == "darwin":
return "/var/run/syslog"
else:
return "localhost", logging.handlers.SYSLOG_UDP_PORT
LOG_CONFIG = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"debug": {
"format": "[%(levelname)s] %(name)30s:%(lineno)d %(message)s"
},
"simple": {
"format": "%(message)s"
},
"verbose": {
"format": "[%(levelname)s] %(message)s"
},
"syslog": {
"format": "{0}[%(process)d]: %(message)s".format(LOG_NAMESPACE)
}
},
"handlers": {
"stderr": {
"level": "ERROR",
"class": "logging.StreamHandler",
"formatter": "simple"
},
"syslog": {
"level": "ERROR",
"class": "logging.handlers.SysLogHandler",
"formatter": "syslog",
"address": get_syslog_address()
}
},
"loggers": {
LOG_NAMESPACE: {
"handlers": ["syslog"],
"level": "DEBUG",
"propagate": True
}
}
}
def topen(filename, write=False):
mode = "w" if write else "r"
return open(filename, mode, encoding="utf-8", errors="surrogateescape")
def get_content(filename):
with topen(filename) as filefp:
return filefp.read()
def logger(namespace):
return logging.getLogger(LOG_NAMESPACE + "." + namespace)
def configure_logging(debug=False, verbose=True, stderr=True):
config = copy.deepcopy(LOG_CONFIG)
for handler in config["handlers"].values():
if verbose:
handler["level"] = "INFO"
if debug:
handler["level"] = "DEBUG"
if verbose:
config["handlers"]["stderr"]["formatter"] = "verbose"
if debug:
config["handlers"]["stderr"]["formatter"] = "debug"
if stderr:
config["loggers"][LOG_NAMESPACE]["handlers"].append("stderr")
logging.config.dictConfig(config)
```
#### File: hrss/tests/test_core_lexer.py
```python
import pytest
import concierge.core.exceptions as exceptions
import concierge.core.lexer as lexer
def make_token(indent_lvl=0):
token_name = "a{0}".format(0)
return lexer.Token(indent_lvl, token_name, [token_name], token_name, 0)
@pytest.mark.parametrize(
"input_, output_", (
("", ""),
(" ", ""),
(" #", ""),
("# ", ""),
(" # dsfsdfsdf sdfsdfsd", ""),
(" a", " a"),
(" a# sdfsfdf", " a"),
(" a # sdfsfsd x xxxxxxx # sdfsfd", " a")))
def test_clean_line(input_, output_):
assert lexer.clean_line(input_) == output_
@pytest.mark.parametrize(
"input_, output_", (
("", ""),
(" ", " "),
(" ", " "),
(" ", " "),
("\t ", " "),
("\t\t\t", 12 * " "),
("\t \t", " "),
("\t\t\t ", " "),
(" \t\t\t ", " ")))
def test_reindent_line(input_, output_):
assert lexer.reindent_line(input_) == output_
@pytest.mark.parametrize(
"indent_", (
"",
" ",
" ",
"\t",
"\t\t",
"\t \t",
"\t\t ",
" \t\t"))
@pytest.mark.parametrize(
"content_", (
"",
"a"))
def test_get_split_indent(indent_, content_):
text = indent_ + content_
assert lexer.get_indent(text) == indent_
assert lexer.split_indent(text) == (indent_, content_)
@pytest.mark.parametrize(
"text", (
"#",
"# ",
"# sdfsdf #",
"## sdfsfdf",
"# #sdf # #"))
def test_regexp_comment_ok(text):
assert lexer.RE_COMMENT.match(text)
@pytest.mark.parametrize(
"text", (
"",
"sdfdsf",
"sdfsdf#",
"dzfsdfsdf#sdfsdf",
"sdf #",
" #"))
def test_regexp_comment_nok(text):
assert not lexer.RE_COMMENT.match(text)
@pytest.mark.parametrize(
"text", (
" ",
" ",
" ",
"\t"))
def test_regexp_indent_ok(text):
assert lexer.RE_INDENT.match(text)
@pytest.mark.parametrize(
"text", (
"",
"sdf",
"sdfs ",
"sdfsfd dsfx"))
def test_regexp_indent_nok(text):
assert not lexer.RE_INDENT.match(text)
@pytest.mark.parametrize(
"text", (
"''",
"'sdf'",
"'sdfsf\'sfdsf'",
"'sdfsd\'\'sdfsf\'sdf\'sdfxx'"
'""',
'"sdf"',
'"sdfsf\"fdsf"',
'"sdfsd\"\"sdfsf\"sdf\"sdfx"',
"'\"'",
"'sdfsdf' \"sdfsdf\"",
"'sdfx\"sdx' 'sdfdf\"' \"sdfx'sdfffffdf\" \"sdfsdf'sdxx'ds\""))
def test_regexp_quoted_ok(text):
assert lexer.RE_QUOTED.match(text)
@pytest.mark.parametrize(
"text", (
"'xx\"",
"\"sdfk'"))
def test_regexp_quoted_nok(text):
assert not lexer.RE_QUOTED.match(text)
@pytest.mark.parametrize(
"text", (
"hhh x",
"hhh x",
"hhh \tx",
"hhh=x",
"hhh =sdfsf",
"sdf= sdfx",
"sdf = sdf",
"hhh x",
"sdfsf- x"))
def test_regexp_optvalue_ok(text):
assert lexer.RE_OPT_VALUE.match(text)
@pytest.mark.parametrize(
"text", (
"",
"hhx",
"sdfsf ",
" sdfsfdf",
"sdfsf =",
"sdfsf= ",
"sdfsdf = ",
" "))
def test_regexp_optvalue_nok(text):
assert not lexer.RE_OPT_VALUE.match(text)
@pytest.mark.parametrize(
"input_, output_", (
("", ""),
("a", "a"),
(" a", " a"),
(" a", " a"),
("\ta", " a"),
(" \ta", " a"),
(" \t a", " a"),
(" \t a ", " a"),
(" \t a #sdfds", " a"),
(" \t a #sdfds #", " a"),
("a\t", "a"),
("a\t\r", "a"),
("a\r", "a"),
("a\n", "a")))
def test_process_line(input_, output_):
assert lexer.process_line(input_) == output_
@pytest.mark.parametrize(
"text, indent_len, option, values", (
("\ta 1", 1, "a", "1"),
("\ta 1 2", 1, "a", ["1", "2"]),
("\t\ta 1 2", 2, "a", ["1", "2"]),
("a 1 2 'cv'", 0, "a", ["1", "2", "'cv'"]),
("a 1 2 \"cv\"", 0, "a", ["1", "2", '"cv"']),
("a 1 2 \"cv\" 3", 0, "a", ["1", "2", '"cv"', "3"]),
("\ta=1", 1, "a", "1"),
("\ta =1 2", 1, "a", ["1", "2"]),
("\t\ta= 1 2", 2, "a", ["1", "2"]),
("a = 1 2 'cv'", 0, "a", ["1", "2", "'cv'"])))
def test_make_token_ok(text, indent_len, option, values):
processed_line = lexer.process_line(text)
token = lexer.make_token(processed_line, text, 1)
if not isinstance(values, (list, tuple)):
values = [values]
assert token.indent == indent_len
assert token.option == option
assert token.values == values
assert token.original == text
@pytest.mark.parametrize(
"text", (
"",
"a",
"a=",
"a =",
"a ",
"=",
"==",
" =asd"))
def test_make_token_incorrect_value(text):
with pytest.raises(exceptions.LexerIncorrectOptionValue):
lexer.make_token(text, text, 1)
@pytest.mark.parametrize(
"offset", (
1, 2, 3, 5, 6, 7))
def test_make_token_incorrect_indentation(offset):
text = " " * offset + "a = 1"
with pytest.raises(exceptions.LexerIncorrectIndentationLength):
lexer.make_token(text, text, 1)
def test_verify_tokens_empty():
assert lexer.verify_tokens([]) == []
def test_verify_tokens_one_token():
token = make_token(indent_lvl=0)
assert lexer.verify_tokens([token]) == [token]
@pytest.mark.parametrize(
"level", list(range(1, 4)))
def test_verify_tokens_one_token_incorrect_level(level):
token = make_token(indent_lvl=level)
with pytest.raises(exceptions.LexerIncorrectFirstIndentationError):
assert lexer.verify_tokens([token]) == [token]
def test_verify_tokens_ladder_level():
tokens = [make_token(indent_lvl=level) for level in range(5)]
assert lexer.verify_tokens(tokens) == tokens
@pytest.mark.parametrize(
"level", list(range(2, 7)))
def test_verify_tokens_big_level_gap(level):
tokens = [make_token(indent_lvl=0), make_token(indent_lvl=level)]
with pytest.raises(exceptions.LexerIncorrectIndentationError):
assert lexer.verify_tokens(tokens) == tokens
@pytest.mark.parametrize("level", list(range(5)))
def test_verify_tokens_dedent(level):
tokens = [make_token(indent_lvl=lvl) for lvl in range(5)]
tokens.append(make_token(indent_lvl=level))
assert lexer.verify_tokens(tokens) == tokens
def test_verify_tokens_lex_ok():
text = """\
aa = 1
b 1
q = 2
c = 3 # q
d = 5 'aa' "sdx" xx 3 3
e = 3
""".strip()
tokens = lexer.lex(text.split("\n"))
assert len(tokens) == 6
assert tokens[0].indent == 0
assert tokens[0].option == "aa"
assert tokens[0].values == ["1"]
assert tokens[0].original == "aa = 1"
assert tokens[0].lineno == 1
assert tokens[1].indent == 0
assert tokens[1].option == "b"
assert tokens[1].values == ["1"]
assert tokens[1].original == "b 1"
assert tokens[1].lineno == 2
assert tokens[2].indent == 1
assert tokens[2].option == "q"
assert tokens[2].values == ["2"]
assert tokens[2].original == " q = 2"
assert tokens[2].lineno == 5
assert tokens[3].indent == 1
assert tokens[3].option == "c"
assert tokens[3].values == ["3"]
assert tokens[3].original == " c = 3 # q"
assert tokens[3].lineno == 6
assert tokens[4].indent == 2
assert tokens[4].option == "d"
assert tokens[4].values == ["5", "'aa'", '"sdx"', "xx", "3", "3"]
assert tokens[4].original == " d = 5 'aa' \"sdx\" xx 3 3"
assert tokens[4].lineno == 7
assert tokens[5].indent == 0
assert tokens[5].option == "e"
assert tokens[5].values == ["3"]
assert tokens[5].original == "e = 3"
assert tokens[5].lineno == 9
def test_lex_incorrect_first_indentation():
text = """\
a = 1
b = 3
"""
with pytest.raises(exceptions.LexerIncorrectFirstIndentationError):
lexer.lex(text.split("\n"))
```
#### File: hrss/tests/test_endpoints_app.py
```python
import os
import pytest
import concierge
import concierge.endpoints.cli as cli
import concierge.endpoints.common as common
def get_app():
parser = cli.create_parser()
parser = SimpleApp.specify_parser(parser)
parsed = parser.parse_args()
app = SimpleApp(parsed)
return app
class SimpleApp(common.App):
def do(self):
return self.output()
def test_resolve_templater_unknown(cliargs_default, monkeypatch):
def boom(*args, **kwargs):
raise KeyError
monkeypatch.setattr("concierge.templater.resolve_templater", boom)
with pytest.raises(ValueError):
get_app()
def test_fetch_content_ok(cliargs_default, mock_get_content):
mock_get_content.return_value = "Content"
app = get_app()
assert app.fetch_content() == mock_get_content.return_value
def test_fetch_content_exception(cliargs_default, mock_get_content):
mock_get_content.side_effect = Exception
app = get_app()
with pytest.raises(Exception):
app.fetch_content()
def test_apply_content_ok(monkeypatch, cliargs_default, template_render):
template_render.side_effect = lambda param: param.upper()
app = get_app()
assert app.apply_template("hello") == "HELLO"
def test_apply_content_exception(monkeypatch, cliargs_default,
template_render):
template_render.side_effect = Exception
app = get_app()
with pytest.raises(Exception):
app.apply_template("hello")
def test_process_syntax_ok(cliargs_default):
content = """\
Host n
ViaJumpHost x
"""
app = get_app()
assert app.process_syntax(content) == (
"Host n\n"
" ProxyCommand ssh -W %h:%p x\n")
def test_process_syntax_exception(cliargs_default):
app = get_app()
with pytest.raises(Exception):
app.process_syntax("WTF")
def test_attach_header(cliargs_default):
app = get_app()
assert app.attach_header("Content").startswith("#")
@pytest.mark.parametrize(
"no_templater", (
True, False))
@pytest.mark.parametrize(
"boring_syntax", (
True, False))
@pytest.mark.parametrize(
"add_header", (
True, False))
def test_get_new_config(monkeypatch, cliargs_default, template_render,
mock_get_content, no_templater, boring_syntax,
add_header):
template_render.side_effect = lambda param: param.upper()
mock_get_content.return_value = """\
Compression yes
Host q
HostName e
Host b
HostName lalala
"""
app = get_app()
app.no_templater = no_templater
app.boring_syntax = boring_syntax
app.add_header = add_header
if not no_templater and not boring_syntax:
with pytest.raises(Exception):
app.get_new_config()
else:
result = app.get_new_config()
if not no_templater:
assert "COMPRESSION YES" in result
else:
assert "Compression yes" in result
if boring_syntax:
assert "Host qb" not in result
else:
assert "Host qb" in result
if add_header:
assert result.startswith("#")
else:
assert not result.startswith("#")
def test_output_stdout(capfd, monkeypatch, cliargs_default, mock_get_content):
mock_get_content.return_value = """\
Compression yes
Host q
HostName e
Host b
HostName lalala
"""
app = get_app()
app.destination_path = None
app.output()
out, err = capfd.readouterr()
assert out == """\
Host qb
HostName lalala
Host q
HostName e
Host *
Compression yes
"""
assert not err
def test_output_file(cliargs_default, ptmpdir, mock_get_content):
mock_get_content.return_value = """\
Compression yes
Host q
HostName e
Host b
HostName lalala
"""
app = get_app()
app.destination_path = ptmpdir.join("config").strpath
app.output()
with open(ptmpdir.join("config").strpath, "r") as filefp:
assert filefp.read()
def test_output_file_exception(monkeypatch, cliargs_default, ptmpdir,
mock_get_content):
def write_fail(*args, **kwargs):
raise Exception
monkeypatch.setattr("concierge.utils.topen", write_fail)
mock_get_content.return_value = """\
Compression yes
Host q
HostName e
Host b
HostName lalala
"""
app = get_app()
app.destination_path = ptmpdir.join("config").strpath
with pytest.raises(Exception):
app.output()
@pytest.mark.longrun
def test_create_app(cliargs_fullset, mock_log_configuration):
_, options = cliargs_fullset
parser = cli.create_parser()
parsed = parser.parse_args()
app = SimpleApp(parsed)
assert app.boring_syntax == bool(options["boring_syntax"])
if options["source_path"]:
assert app.source_path == "/path/to"
else:
assert app.source_path == concierge.DEFAULT_RC
if options["destination_path"]:
assert app.destination_path == "/path/to"
else:
assert app.destination_path is None
if options["add_header"] is not None:
assert app.add_header
else:
assert app.add_header == (options["destination_path"] is not None)
assert mock_log_configuration.called
def test_mainfunc_ok(cliargs_default, mock_get_content):
mock_get_content.return_value = """\
Compression yes
Host q
HostName e
Host b
HostName lalala
"""
main = concierge.endpoints.common.main(SimpleApp)
result = main()
assert result is None or result == os.EX_OK
def test_mainfunc_exception(cliargs_default, mock_get_content):
mock_get_content.side_effect = Exception
main = concierge.endpoints.common.main(SimpleApp)
assert main() != os.EX_OK
def test_mainfunc_keyboardinterrupt(cliargs_default, mock_get_content):
mock_get_content.side_effect = KeyboardInterrupt
main = concierge.endpoints.common.main(SimpleApp)
result = main()
assert result is None or result == os.EX_OK
```
#### File: hrss/tests/test_endpoints_daemon.py
```python
import errno
import itertools
import os
import os.path
import inotify_simple
import pytest
import concierge
import concierge.endpoints.cli as cli
import concierge.endpoints.daemon as daemon
import concierge.utils
def get_app(*params):
parser = cli.create_parser()
parser = daemon.Daemon.specify_parser(parser)
parsed = parser.parse_args()
for param in params:
if param:
setattr(parsed, param.strip("-"), True)
app = daemon.Daemon(parsed)
return app
def test_create_app(cliargs_default, cliparam_systemd, cliparam_curlsh):
app = get_app(cliparam_systemd, cliparam_curlsh)
assert app.systemd == bool(cliparam_systemd)
assert app.curlsh == bool(cliparam_curlsh)
def test_print_help(capfd, cliargs_default, cliparam_curlsh):
app = get_app("--systemd", cliparam_curlsh)
app.do()
out, err = capfd.readouterr()
out = out.split("\n")
if cliparam_curlsh:
for line in out:
assert not line.startswith("$")
else:
assert line.startswith(("$", "Please")) or not line
assert not err
@pytest.mark.parametrize(
"main_method", (
True, False))
def test_work(mock_mainfunc, ptmpdir, main_method):
_, _, inotifier = mock_mainfunc
app = get_app()
app.destination_path = ptmpdir.join("filename").strpath
if main_method:
app.do()
else:
app.track()
inotifier.add_watch.assert_called_once_with(
os.path.dirname(concierge.DEFAULT_RC), daemon.INOTIFY_FLAGS)
assert not inotifier.v
with concierge.utils.topen(ptmpdir.join("filename").strpath) as filefp:
assert 1 == sum(int(line.strip() == "Host *") for line in filefp)
def test_track_no_our_events(no_sleep, mock_mainfunc, ptmpdir):
_, _, inotifier = mock_mainfunc
inotifier.v.clear()
inotifier.v.extend([inotify_simple.Event(0, 0, 0, "Fake")] * 3)
app = get_app()
app.destination_path = ptmpdir.join("filename").strpath
app.track()
assert not os.path.exists(ptmpdir.join("filename").strpath)
def test_track_cannot_read(no_sleep, mock_mainfunc, ptmpdir):
_, _, inotifier = mock_mainfunc
def add_watch(*args, **kwargs):
exc = IOError("Hello?")
exc.errno = errno.EPERM
raise exc
inotifier.add_watch.side_effect = add_watch
app = get_app()
app.destination_path = ptmpdir.join("filename").strpath
with pytest.raises(IOError):
app.track()
@pytest.mark.parametrize(
"ev1, ev2",
list(itertools.permutations(inotify_simple.flags, 2)))
def test_event_names(ev1, ev2):
events = [
inotify_simple.Event(0, ev1, 0, "ev1"),
inotify_simple.Event(0, ev2, 0, "ev2"),
inotify_simple.Event(0, ev1 | ev2, 0, "ev1ev2")]
descriptions = daemon.Daemon.describe_events(events)
assert len(descriptions) == len(events)
assert "ev1" in descriptions[0]
assert str(ev1) in descriptions[0]
assert "ev2" in descriptions[1]
assert str(ev2) in descriptions[1]
assert "ev1" in descriptions[2]
assert "ev2" in descriptions[2]
assert str(ev1) in descriptions[2]
assert str(ev2) in descriptions[2]
def test_mainfunc_ok(mock_mainfunc):
result = daemon.main()
assert result is None or result == os.EX_OK
def test_mainfunc_exception(mock_mainfunc):
_, _, inotifier = mock_mainfunc
inotifier.read.side_effect = Exception
result = daemon.main()
assert result != os.EX_OK
```
#### File: hrss/tests/test_parser.py
```python
import pytest
import concierge.core.exceptions as exceptions
import concierge.core.lexer as lexer
import concierge.core.parser as parser
def is_trackable_host():
assert parser.is_trackable_host("Host")
assert not parser.is_trackable_host("Host-")
def get_host_tokens():
text = """\
Host name
Option 1
Host 2
Host 3
Hello yes
q 5
""".strip()
tokens = lexer.lex(text.split("\n"))
tokens = tokens[1:]
leveled_tokens = parser.get_host_tokens(1, tokens)
assert len(leveled_tokens) == 4
assert leveled_tokens[-1].option == "Hello"
def test_parse_options_big_config_with_star_host():
text = """\
# Okay, rather big config but let's try to cover all cases here.
# Basically, I've been trying to split it to different test cases but it
# was really hard to maintain those tests. So there.
Compression yes
CompressionLevel 5
Host m
Port 22
Host e v
User root
HostName env10
Host WWW
TCPKeepAlive 5
Host q
Protocol 2
-Host x
SendEnv 12
Host qex
Port 35
ViaJumpHost env312
Host *
CompressionLevel 6
""".strip()
tokens = lexer.lex(text.split("\n"))
tree = parser.parse(tokens)
assert tree.name == ""
assert tree.parent is None
assert len(tree.hosts) == 2
star_host = tree.hosts[0]
assert star_host.trackable
assert star_host.fullname == "*"
assert star_host.options == {"Compression": ["yes"],
"CompressionLevel": ["6"]}
m_host = tree.hosts[1]
assert m_host.trackable
assert m_host.fullname == "m"
assert m_host.options == {"Port": ["22"]}
assert len(m_host.hosts) == 4
me_host = m_host.hosts[0]
assert me_host.trackable
assert me_host.fullname == "me"
assert me_host.options == {"Port": ["22"], "HostName": ["env10"],
"User": ["root"]}
assert len(me_host.hosts) == 1
mewww_host = me_host.hosts[0]
assert mewww_host.trackable
assert mewww_host.fullname == "meWWW"
assert mewww_host.options == {"Port": ["22"], "TCPKeepAlive": ["5"],
"HostName": ["env10"], "User": ["root"]}
assert mewww_host.hosts == []
mq_host = m_host.hosts[1]
assert mq_host.trackable
assert mq_host.fullname == "mq"
assert mq_host.options == {"Protocol": ["2"], "Port": ["22"]}
assert mq_host.hosts == []
mv_host = m_host.hosts[2]
assert mv_host.trackable
assert mv_host.fullname == "mv"
assert mv_host.options == {"Port": ["22"], "HostName": ["env10"],
"User": ["root"]}
assert len(mv_host.hosts) == 1
mvwww_host = mv_host.hosts[0]
assert mvwww_host.trackable
assert mvwww_host.fullname == "mvWWW"
assert mvwww_host.options == {"Port": ["22"], "TCPKeepAlive": ["5"],
"HostName": ["env10"], "User": ["root"]}
assert mvwww_host.hosts == []
mx_host = m_host.hosts[3]
assert not mx_host.trackable
assert mx_host.fullname == "mx"
assert mx_host.options == {"SendEnv": ["12"], "Port": ["22"]}
assert len(mx_host.hosts) == 1
mxqex_host = mx_host.hosts[0]
assert mxqex_host.trackable
assert mxqex_host.fullname == "mxqex"
assert mxqex_host.options == {"SendEnv": ["12"], "Port": ["35"],
"ProxyCommand": ["ssh -W %h:%p env312"]}
assert mxqex_host.hosts == []
def test_parse_options_star_host_invariant():
no_star_host = """\
Compression yes
CompressionLevel 6
""".strip()
star_host = """\
Compression yes
Host *
CompressionLevel 6
""".strip()
star_host_only = """\
Host *
Compression yes
CompressionLevel 6
""".strip()
no_star_host = parser.parse(lexer.lex(no_star_host.split("\n")))
star_host = parser.parse(lexer.lex(star_host.split("\n")))
star_host_only = parser.parse(lexer.lex(star_host_only.split("\n")))
assert no_star_host.struct == star_host.struct
assert no_star_host.struct == star_host_only.struct
def test_parse_multiple_options():
config = """\
Host q
User root
Host name
User rooter
LocalForward 80 brumm:80
LocalForward 443 brumm:443
LocalForward 22 brumm:23
""".strip()
parsed = parser.parse(lexer.lex(config.split("\n")))
assert sorted(parsed.hosts[1].options["LocalForward"]) == [
"22 brumm:23",
"443 brumm:443",
"80 brumm:80"]
@pytest.mark.parametrize(
"empty_lines", list(range(5)))
def test_nothing_to_parse(empty_lines):
root = parser.parse(lexer.lex([""] * empty_lines))
assert len(root.hosts) == 1
assert root.hosts[0].fullname == "*"
assert root.hosts[0].options == {}
assert root.hosts[0].hosts == []
def test_unknown_option():
tokens = lexer.lex(["ASDF 1"])
with pytest.raises(exceptions.ParserUnknownOption):
parser.parse(tokens)
```
#### File: hrss/tests/test_processor.py
```python
import concierge.core.lexer as lexer
import concierge.core.parser as parser
import concierge.core.processor as process
CONTENT = """\
Compression yes
Host q
Port 22
-Host e
Protocol 2
Host h
HostName hew
LocalForward 22 b:22
LocalForward 23 b:23
Host q
HostName qqq
""".strip()
def test_generate():
tokens = lexer.lex(CONTENT.split("\n"))
tree = parser.parse(tokens)
new_config = list(process.generate(tree))
assert new_config == [
"Host qeh",
" HostName hew",
" LocalForward 22 b:22",
" LocalForward 23 b:23",
" Port 22",
" Protocol 2",
"",
"Host qq",
" HostName qqq",
" Port 22",
"",
"Host q",
" Port 22",
"",
"Host *",
" Compression yes",
""]
def test_process():
assert process.process(CONTENT) == """\
Host qeh
HostName hew
LocalForward 22 b:22
LocalForward 23 b:23
Port 22
Protocol 2
Host qq
HostName qqq
Port 22
Host q
Port 22
Host *
Compression yes
"""
```
#### File: hrss/tests/test_utils.py
```python
import pytest
import concierge.utils as utils
def test_topen_write_read(ptmpdir):
filename = ptmpdir.join("test")
filename.write_text("TEST", "utf-8")
with utils.topen(filename.strpath) as filefp:
with pytest.raises(IOError):
filefp.write("1")
assert filefp.read() == "TEST"
def test_topen_write_ok(ptmpdir):
filename = ptmpdir.join("test")
filename.write_text("TEST", "utf-8")
with utils.topen(filename.strpath, True) as filefp:
filefp.write("1")
with utils.topen(filename.strpath) as filefp:
assert filefp.read() == "1"
@pytest.mark.parametrize(
"content", (
"1", "", "TEST"))
def test_get_content(ptmpdir, content):
filename = ptmpdir.join("test")
filename.write_text(content, "utf-8")
assert utils.get_content(filename.strpath) == content
@pytest.mark.parametrize(
"name, address", (
("linux", "/dev/log"),
("linux2", "/dev/log"),
("linux3", "/dev/log"),
("darwin", "/var/run/syslog"),
("windows", ("localhost", 514))))
def test_get_syslog_address(monkeypatch, name, address):
monkeypatch.setattr("sys.platform", name)
assert utils.get_syslog_address() == address
@pytest.mark.parametrize(
"debug", (
True, False))
@pytest.mark.parametrize(
"verbose", (
True, False))
@pytest.mark.parametrize(
"stderr", (
True, False))
@pytest.mark.no_mock_log_configuration
def test_configure_logging(debug, verbose, stderr):
utils.configure_logging(debug, verbose, stderr)
``` |
{
"source": "9seconds/iblocklist2ipset",
"score": 3
} |
#### File: iblocklist2ipset/iblocklist2ipset/ipset.py
```python
import math
from six import u
def ipset_hashsize(element_count):
assert element_count > 0
power = math.floor(math.log(element_count) / math.log(2))
return int(math.pow(2, power))
def generate_ipset(ipset_name, networks):
# create blocklist0 hash:net family inet hashsize 32768 maxelem 65536
header = u("create {0} hash:net family inet hashsize {1} maxelem {2}")
yield header.format(
ipset_name, ipset_hashsize(len(networks)), len(networks)
)
for network in networks:
# add blocklist0 172.16.31.10/16
yield u("add {0} {1}").format(ipset_name, network)
```
#### File: iblocklist2ipset/iblocklist2ipset/utils.py
```python
import functools
import time
import sys
import os
import os.path
import posixpath
import re
from six import u, moves, print_
from . import TIME_TO_SLEEP
def try_if_empty(count):
assert count >= 1
def outer_decorator(func):
@functools.wraps(func)
def inner_decorator(*args, **kwargs):
for attempt in moves.range(count - 1): # pylint: disable=E1101
try:
result = func(*args, **kwargs)
except Exception as exc: # pylint: disable=W0703
print_(u("[{0}/{1}] Error during parsing: {2}").format(
attempt, count, exc
), file=sys.stderr)
time.sleep(TIME_TO_SLEEP)
else:
return result
return func(*args, **kwargs)
return inner_decorator
return outer_decorator
def script_example_header(func):
@functools.wraps(func)
def decorator(*args, **kwargs):
print_("#!/bin/bash\nset -e", end="\n\n")
if os.getenv("VIRTUAL_ENV"):
script_path = posixpath.join(
os.getenv("VIRTUAL_ENV"), "bin", "activate"
)
print_(u('source {0}').format(printable_path(script_path)),
end="\n\n")
return func(*args, **kwargs)
return decorator
def printable_path(path):
abspath = os.path.abspath(path)
if re.search(r"\s", abspath) is not None:
abspath = '"' + abspath.replace('"', r'\"') + '"'
return abspath
```
#### File: iblocklist2ipset/tests/test_networks.py
```python
if __package__ is None:
import sys
import os.path
sys.path[0:0] = [
os.path.dirname( # project_root
os.path.dirname( # tests
os.path.abspath(__file__) # this file
)
)
]
import httmock
import pytest
from six import moves
from iblocklist2ipset.networks import extract_networks, fetch_networks, \
convert_to_ipnetworks, ParseError
from tests import CommonTest
# noinspection PyUnresolvedReferences
class TestConvertToIPNetwork(object):
@pytest.mark.parametrize("input_", (
"HELLO:172.16.58.3-123.123.123.255",
"EVIL HACKER:172.16.31.10-172.16.31.1050",
":150.250.250.250-150.251.250.250"
))
def test_ok(self, input_):
network = convert_to_ipnetworks(input_)
assert network and len(network) > 0
@pytest.mark.parametrize("input_", (
"HELLO:192.168.127.12-123.123.123.255",
"EVIL HACKER:150.250.250.250-",
":150.250.250.250-15",
"::15.12"
))
def test_nok(self, input_):
with pytest.raises(ParseError):
convert_to_ipnetworks(input_)
@pytest.mark.parametrize("input_", (
"",
"#commentary"
"#commented:127.0.0.1-127.0.0.12"
))
def test_empty(self, input_):
assert convert_to_ipnetworks(input_) == []
# noinspection PyUnresolvedReferences,PyMethodMayBeStatic
class TestFetchNetworks(CommonTest):
def test_ok(self):
with httmock.HTTMock(self.fake_response(self.FAKE_CONTENT)):
networks = [str(ntw) for ntw in fetch_networks("http://fake.url")]
assert set(networks) == set(self.FAKE_NETWORKS)
@pytest.mark.parametrize("input_", (
" ",
"#commentary",
"""
# commentary
# another commentary
"""
))
def test_empty(self, input_):
with httmock.HTTMock(self.fake_response(input_)):
assert list(fetch_networks("http://fake.url")) == []
# noinspection PyMethodMayBeStatic
class TestExtractNetworks(CommonTest):
def test_no_repeats(self):
urls = ["http://fake{0}.url".format(idx) for idx in moves.range(3)]
with httmock.HTTMock(self.fake_response(self.FAKE_CONTENT)):
networks = extract_networks(urls)
assert set(networks) == set(self.FAKE_NETWORKS)
``` |
{
"source": "9seconds/multiverse",
"score": 2
} |
#### File: multiverse/multiverse/utils.py
```python
import copy
import csv
import logging
import logging.config
import pkg_resources
import unicodecsv
LOG_NAMESPACE = "multiverse"
LOG_CONFIG = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"debug": {
"format": "[%(levelname)s] %(name)30s:%(lineno)d :: %(message)s"
},
"simple": {
"format": "%(message)s"
},
"verbose": {
"format": "[%(levelname)s] %(message)s"
}
},
"handlers": {
"stderr": {
"level": "ERROR",
"class": "logging.StreamHandler",
"formatter": "verbose"
}
},
"loggers": {
LOG_NAMESPACE: {
"handlers": ["stderr"],
"level": "DEBUG",
"propagate": True
}
}
}
csv.register_dialect(
LOG_NAMESPACE,
delimiter=",",
doublequote=True,
lineterminator="\n",
quotechar='"',
quoting=csv.QUOTE_ALL,
skipinitialspace=False)
def logger(namespace):
return logging.getLogger(LOG_NAMESPACE + "." + namespace)
def configure_logging(debug=False):
config = copy.deepcopy(LOG_CONFIG)
for handler in config["handlers"].values():
handler["level"] = "DEBUG" if debug else "ERROR"
handler["formatter"] = "debug" if debug else "verbose"
logging.config.dictConfig(config)
def all_plugins(group):
plugins = {}
for plugin in pkg_resources.iter_entry_points(group):
plugins[plugin.name] = plugin
return plugins
def make_csv_reader(filefp):
return unicodecsv.reader(filefp, dialect=LOG_NAMESPACE)
def make_csv_writer(filefp):
return unicodecsv.writer(filefp, dialect=LOG_NAMESPACE)
``` |
{
"source": "9seconds/pep3134",
"score": 3
} |
#### File: pep3134/pep3134/__init__.py
```python
import sys
if sys.version_info[0] == 2:
from .py2 import raise_, raise_from
else:
from .py3 import raise_, raise_from
def reraise():
"""
Does the same that ``raise`` without arguments do in Python2.
But in both Python 2 and Python 3 (>= 3.3).
"""
raise_(*sys.exc_info())
# silence pyflakes
assert reraise
assert raise_
assert raise_from
```
#### File: pep3134/pep3134/py2.py
```python
import sys
from .utils import prepare_raise, construct_exc_class
# noinspection PyUnusedLocal
@prepare_raise
def raise_(type_, value=None, traceback=None): # pylint: disable=W0613
"""
Does the same as ordinary ``raise`` with arguments do in Python 2.
But works in Python 3 (>= 3.3) also!
Please checkout README on https://github.com/9seconds/pep3134
to get an idea about possible pitfals. But short story is: please
be pretty carefull with tracebacks. If it is possible, use sys.exc_info
instead. But in most cases it will work as you expect.
"""
prev_exc, prev_tb = sys.exc_info()[1:]
proxy_class = construct_exc_class(type(type_))
err = proxy_class(type_)
err.__original_exception__.__cause__ = None
err.__original_exception__.__suppress_context__ = False
if getattr(prev_exc, "__pep3134__", False):
prev_exc = prev_exc.with_traceback(prev_tb)
err.__original_exception__.__context__ = prev_exc
if traceback:
raise err.with_traceback(traceback), None, traceback
else:
raise err
def raise_from(exc, cause):
"""
Does the same as ``raise LALALA from BLABLABLA`` does in Python 3.
But works in Python 2 also!
Please checkout README on https://github.com/9seconds/pep3134
to get an idea about possible pitfals. But short story is: please
be pretty carefull with tracebacks. If it is possible, use sys.exc_info
instead. But in most cases it will work as you expect.
"""
context_tb = sys.exc_info()[2]
incorrect_cause = not (
(isinstance(cause, type) and issubclass(cause, Exception)) or
isinstance(cause, BaseException) or
cause is None
)
if incorrect_cause:
raise TypeError("exception causes must derive from BaseException")
if cause is not None:
if not getattr(cause, "__pep3134__", False):
# noinspection PyBroadException
try:
raise_(cause)
except: # noqa pylint: disable=W0702
cause = sys.exc_info()[1]
cause.__fixed_traceback__ = context_tb
# noinspection PyBroadException
try:
raise_(exc)
except: # noqa pylint: disable=W0702
exc = sys.exc_info()[1]
exc.__original_exception__.__suppress_context__ = True
exc.__original_exception__.__cause__ = cause
exc.__original_exception__.__context__ = None
raise exc
```
#### File: 9seconds/pep3134/test_pep3134.py
```python
import sys
import traceback
import pytest
from pep3134 import raise_, raise_from, reraise
def get_tb(traceback_):
return [tuple(entry[:2]) for entry in traceback.extract_tb(traceback_)]
@pytest.mark.parametrize("input_", (
(IOError, None, None),
(IOError, "OKAY", None)
))
def test_simple_raise(input_):
try:
raise_(IOError)
assert False
except IOError:
ctype, cexc, ctb = sys.exc_info()
assert isinstance(cexc, IOError)
assert issubclass(ctype, IOError)
assert cexc.__context__ is None
assert cexc.__suppress_context__ == False
assert cexc.__traceback__ is ctb
def test_assert_with_proper_callback():
try:
raise TypeError
except TypeError:
traceback = sys.exc_info()[2]
try:
raise_(IOError, None, traceback)
assert False
except IOError:
ctype, cexc1, ctb1 = sys.exc_info()
assert ctb1 is not traceback
assert isinstance(cexc1, IOError)
assert issubclass(ctype, IOError)
assert isinstance(cexc1.__context__, TypeError)
assert cexc1.__suppress_context__ == False
assert get_tb(traceback)[0] == get_tb(ctb1)[-1]
try:
raise_(IOError, None, None)
assert False
except IOError:
ctype, cexc2, ctb2 = sys.exc_info()
assert ctb2 is not ctb1
assert ctb1 is not traceback
assert get_tb(traceback)[0] != get_tb(ctb2)[-1]
assert cexc2.__traceback__ is ctb2
assert cexc2.__traceback__ is not ctb1
@pytest.mark.parametrize("input_, expect_", (
((KeyError, None, None), "KeyError()"),
((KeyError, "OK", None), "KeyError('OK',)"),
((KeyError("OK"), None), "KeyError('OK',)")
))
def test_repr(input_, expect_):
try:
raise_(*input_)
assert False
except KeyError as exc:
assert repr(exc) == expect_
def test_raise_fault():
with pytest.raises(TypeError):
raise_(IOError("OK"), "NOK", None)
def test_raise_custom():
class CustomException(Exception):
def parameter(self):
return 1
try:
raise_(CustomException())
assert False
except CustomException:
type_, value_, tb_ = sys.exc_info()
assert issubclass(type_, CustomException)
assert isinstance(value_, CustomException)
assert value_.parameter() == 1
@pytest.mark.parametrize("input_, cause_", (
(IOError("Hello"), KeyError("OKAY")),
(IOError("Hello"), KeyError),
(IOError, KeyError("OKAY")),
(IOError, KeyError),
(IOError, None)
))
def test_raise_from(input_, cause_):
with pytest.raises(IOError):
raise_from(input_, cause_)
@pytest.mark.parametrize("input_, cause_", (
(IOError("Hello"), "str"),
(IOError("Hello"), set)
))
def test_raise_from_fail(input_, cause_):
with pytest.raises(TypeError):
raise_from(input_, cause_)
def test_raise_from_proxy_exc():
try:
raise_(TypeError, "OK")
assert False
except TypeError:
cause, cause_tb = sys.exc_info()[1:]
try:
raise_from(IOError, cause)
except IOError:
exc, exc_tb = sys.exc_info()[1:]
assert exc.__suppress_context__
assert exc.__context__ is None
assert exc.__cause__ is cause
assert exc.__traceback__ is exc_tb
assert exc.__cause__.__traceback__ is cause_tb
def test_raise_from_ordinary_exc():
try:
raise TypeError("OK")
except TypeError:
cause, cause_tb = sys.exc_info()[1:]
try:
raise_from(IOError, cause)
assert False
except IOError:
exc, exc_tb = sys.exc_info()[1:]
if sys.version_info[0] == 2:
assert exc.__cause__ is not cause
assert hasattr(exc.__cause__, "__pep3134__")
else:
assert exc.__cause__ is cause
assert exc.__suppress_context__
assert exc.__context__ is None
assert exc.__traceback__ is exc_tb
assert exc.__cause__.__traceback__ is cause_tb
def test_raise_from_none():
try:
raise_from(IOError, None)
assert False
except IOError:
exc, exc_tb = sys.exc_info()[1:]
assert exc.__suppress_context__
assert exc.__context__ is None
assert exc.__cause__ is None
def test_reraise():
try:
raise_from(IOError, KeyError)
assert False
except IOError:
try:
reraise()
except IOError:
reraised, reraised_tb = sys.exc_info()[1:]
assert not reraised.__suppress_context__
assert reraised.__cause__ is None
``` |
{
"source": "9seconds/rannts-crawler",
"score": 3
} |
#### File: rannts_crawler/spiders/meetups.py
```python
import dateparser
import scrapy.loader
from rannts_crawler import items
from . import base
class MeetupsSpider(base.Spider):
name = "meetups"
start_urls = [
"https://rannts.ru/meetups"
]
def parse(self, response):
for result in self.follow_urls(
response, "section.section div.content h1 a::attr(href)",
self.parse_meetups):
yield result
for result in self.follow_urls(
response, "nav.pagination a::attr(href)", self.parse):
yield result
def parse_meetups(self, response):
loader = scrapy.loader.ItemLoader(
item=items.MeetupsItem(), response=response)
loader.add_css("number", ".hero-body h1.title")
loader.add_css("date", ".hero-body h2.subtitle")
loader.add_css("place", ".hero-body h2.subtitle a::text")
loader.add_css("place_link", ".hero-body h2.subtitle a::attr(href)")
loader.add_xpath("description", "//section[2]/div/div")
loader.add_value("talks", list(self.parse_talks(loader, response)))
loader.add_value("description_links", [
response.urljoin(link)
for link in response.xpath("//section[2]/div/div") \
.css("a::attr(href)").extract()
])
yield loader.load_item()
def parse_talks(self, base_loader, response):
base_datetime = base_loader.load_item()["date"]
for selector in response.xpath("//section[4]"):
loader = scrapy.loader.ItemLoader(
item=items.TalksItem(), selector=selector)
loader.add_css("title", "h4")
loader.add_css("speaker", "h5")
loader.add_css("company", "h5")
loader.add_value(
"date",
self.make_date(
base_datetime,
selector.css("div.is-2::text").extract_first()
)
)
loader.add_xpath(
"description",
"//div/div/div[1]/div[2]/p[not(position() = 1 and @class='is-small')]"
)
slides_link = selector.xpath(
"//div/div/div[1]/div[2]/p[1 and @class='is-small']/a/@href")
loader.add_value(
"slides_link",
response.urljoin(slides_link.extract_first()))
loader.add_value(
"description_links",
[
response.urljoin(url)
for url in selector.xpath(
"//div/div/div[1]/div[2]/p[not(position() = 1 and @class='is-small')]/a/@href").extract()
]
)
loader.add_css("video_link", "iframe::attr(src)")
yield loader.load_item()
def make_date(self, base, time):
parsed_time = dateparser.parse(time)
return base.replace(
hour=parsed_time.hour,
minute=parsed_time.minute,
second=parsed_time.second,
microsecond=parsed_time.microsecond
)
``` |
{
"source": "9seconds/rymtracks",
"score": 3
} |
#### File: services/base/mixins.py
```python
from .water import Water
from bs4 import BeautifulSoup
###############################################################################
__all__ = "HTMLMixin", "JSONMixin", "XMLMixin"
###############################################################################
class HTMLMixin(object):
"""
Mixin which asserts that response contains HTML and converts it into
Beautiful Soup instance.
"""
@staticmethod
def convert_response(response):
"""
Converter of response into Beautiful Soup instance.
"""
return Water(BeautifulSoup(response.text, "html"))
class JSONMixin(object):
"""
Mixin which asserts that response contains JSON and parses it.
"""
@staticmethod
def convert_response(response):
"""
Converts response into Python objects.
"""
return response.json()
class XMLMixin(object):
"""
Mixin which asserts that response contains XML and converts it into
Beautiful Soup instance.
"""
@staticmethod
def convert_response(response):
"""
Converter of response into Beautiful Soup instance.
"""
return Water(BeautifulSoup(response.text, "xml"))
```
#### File: services/implementations/sevendigital.py
```python
from ..base import SchemaOrgService
##############################################################################
class SevenDigital(SchemaOrgService):
"""
Implementation of Service which is intended to parse 7Digital.
"""
def fetch_tracks(self, soup):
return soup.find_all(
itemtype="http://schema.org/MusicRecording",
itemprop="tracks"
)
def fetch_name(self, container):
return container.find(itemprop="name")["content"]
``` |
{
"source": "9-si/nevergrad",
"score": 2
} |
#### File: nevergrad/parametrization/core.py
```python
import uuid
import warnings
import operator
import functools
from collections import OrderedDict
import typing as tp
import numpy as np
from nevergrad.common.typetools import ArrayLike
from . import utils
# pylint: disable=no-value-for-parameter
P = tp.TypeVar("P", bound="Parameter")
D = tp.TypeVar("D", bound="Dict")
# pylint: disable=too-many-instance-attributes,too-many-public-methods
class Parameter:
"""Abstract class providing the core functionality of a parameter, aka
value, internal/model parameters, mutation, recombination
and additional features such as shared random state,
constraint check, hashes, generation and naming.
"""
def __init__(self, **parameters: tp.Any) -> None:
# Main features
self.uid = uuid.uuid4().hex
self.parents_uids: tp.List[str] = []
self.heritage: tp.Dict[str, tp.Any] = {"lineage": self.uid} # passed through to children
self.loss: tp.Optional[float] = None # associated loss
self._parameters = None if not parameters else Dict(**parameters) # internal/model parameters
self._dimension: tp.Optional[int] = None
# Additional convenient features
self._random_state: tp.Optional[np.random.RandomState] = None # lazy initialization
self._generation = 0
self._constraint_checkers: tp.List[tp.Callable[[tp.Any], bool]] = []
self._name: tp.Optional[str] = None
self._frozen = False
self._descriptors: tp.Optional[utils.Descriptors] = None
self._meta: tp.Dict[str, tp.Any] = {} # for anything algorithm related
@property
def value(self) -> tp.Any:
raise NotImplementedError
@value.setter
def value(self, value: tp.Any) -> tp.Any:
raise NotImplementedError
@property
def args(self) -> tp.Tuple[tp.Any, ...]:
"""Value of the positional arguments.
Used to input value in a function as `func(*param.args, **param.kwargs)`
Use `parameter.Instrumentation` to set `args` and `kwargs` with full freedom.
"""
return (self.value,)
@property
def kwargs(self) -> tp.Dict[str, tp.Any]:
"""Value of the keyword arguments.
Used to input value in a function as `func(*param.args, **param.kwargs)`
Use `parameter.Instrumentation` to set `args` and `kwargs` with full freedom.
"""
return {}
@property
def parameters(self) -> "Dict":
"""Internal/model parameters for this parameter
"""
if self._parameters is None: # delayed instantiation to avoid infinte loop
assert self.__class__ != Dict, "parameters of Parameters dict should never be called"
self._parameters = Dict()
assert self._parameters is not None
return self._parameters
@property
def dimension(self) -> int:
"""Dimension of the standardized space for this parameter
i.e size of the vector returned by get_standardized_data(reference=...)
"""
if self._dimension is None:
try:
self._dimension = self.get_standardized_data(reference=self).size
except utils.NotSupportedError:
self._dimension = 0
return self._dimension
def mutate(self) -> None:
"""Mutate parameters of the instance, and then its value
"""
self._check_frozen()
self.parameters.mutate()
self.set_standardized_data(self.random_state.normal(size=self.dimension), deterministic=False)
def sample(self: P) -> P:
"""Sample a new instance of the parameter.
This usually means spawning a child and mutating it.
This function should be used in optimizers when creating an initial population,
and parameter.heritage["lineage"] is reset to parameter.uid instead of its parent's
"""
child = self.spawn_child()
child.mutate()
child.heritage["lineage"] = child.uid
return child
def recombine(self: P, *others: P) -> None:
"""Update value and parameters of this instance by combining it with
other instances.
Parameters
----------
*others: Parameter
other instances of the same type than this instance.
"""
raise utils.NotSupportedError(f"Recombination is not implemented for {self.name}")
def get_standardized_data(self: P, *, reference: P) -> np.ndarray:
"""Get the standardized data representing the value of the instance as an array in the optimization space.
In this standardized space, a mutation is typically centered and reduced (sigma=1) Gaussian noise.
The data only represent the value of this instance, not the parameters (eg.: mutable sigma), hence it does not
fully represent the state of the instance. Also, in stochastic cases, the value can be non-deterministically
deduced from the data (eg.: categorical variable, for which data includes sampling weights for each value)
Parameters
----------
reference: Parameter
the reference instance for representation in the standardized data space. This keyword parameter is
mandatory to make the code clearer.
If you use "self", this method will always return a zero vector.
Returns
-------
np.ndarray
the representation of the value in the optimization space
Note
----
- Operations between different standardized data should only be performed if each array was produced
by the same reference in the exact same state (no mutation)
- to make the code more explicit, the "reference" parameter is enforced as a keyword-only parameter.
"""
assert reference is None or isinstance(reference, self.__class__), f"Expected {type(self)} but got {type(reference)} as reference"
return self._internal_get_standardized_data(self if reference is None else reference)
def _internal_get_standardized_data(self: P, reference: P) -> np.ndarray:
raise utils.NotSupportedError(f"Export to standardized data space is not implemented for {self.name}")
def set_standardized_data(self: P, data: ArrayLike, *, reference: tp.Optional[P] = None, deterministic: bool = False) -> P:
"""Updates the value of the provided reference (or self) using the standardized data.
Parameters
----------
np.ndarray
the representation of the value in the optimization space
reference: Parameter
the reference point for representing the data ("self", if not provided)
deterministic: bool
whether the value should be deterministically drawn (max probability) in the case of stochastic parameters
Returns
-------
Parameter
self (modified)
Note
----
To make the code more explicit, the "reference" and "deterministic" parameters are enforced
as keyword-only parameters.
"""
assert isinstance(deterministic, bool)
sent_reference = self if reference is None else reference
assert isinstance(sent_reference, self.__class__), f"Expected {type(self)} but got {type(sent_reference)} as reference"
self._check_frozen()
self._internal_set_standardized_data(np.array(data, copy=False), reference=sent_reference, deterministic=deterministic)
return self
def _internal_set_standardized_data(self: P, data: np.ndarray, reference: P, deterministic: bool = False) -> None:
raise utils.NotSupportedError(f"Import from standardized data space is not implemented for {self.name}")
# PART 2 - Additional features
@property
def generation(self) -> int:
"""Generation of the parameter (children are current generation + 1)
"""
return self._generation
def get_value_hash(self) -> tp.Hashable:
"""Hashable object representing the current value of the instance
"""
val = self.value
if isinstance(val, (str, bytes, float, int)):
return val
elif isinstance(val, np.ndarray):
return val.tobytes()
else:
raise utils.NotSupportedError(f"Value hash is not supported for object {self.name}")
def _get_name(self) -> str:
"""Internal implementation of parameter name. This should be value independant, and should not account
for internal/model parameters.
"""
return self.__class__.__name__
@property
def name(self) -> str:
"""Name of the parameter
This is used to keep track of how this Parameter is configured (included through internal/model parameters),
mostly for reproducibility A default version is always provided, but can be overriden directly
through the attribute, or through the set_name method (which allows chaining).
"""
if self._name is not None:
return self._name
substr = ""
if self._parameters is not None and self.parameters:
substr = f"[{self.parameters._get_parameters_str()}]"
if substr == "[]":
substr = ""
return f"{self._get_name()}" + substr
@name.setter
def name(self, name: str) -> None:
self.set_name(name) # with_name allows chaining
def __repr__(self) -> str:
strings = [self.name]
if not callable(self.value): # not a mutation
strings.append(str(self.value))
return ":".join(strings)
def set_name(self: P, name: str) -> P:
"""Sets a name and return the current instrumentation (for chaining)
Parameters
----------
name: str
new name to use to represent the Parameter
"""
self._name = name
return self
# %% Constraint management
def satisfies_constraints(self) -> bool:
"""Whether the instance satisfies the constraints added through
the `register_cheap_constraint` method
Returns
-------
bool
True iff the constraint is satisfied
"""
if self._parameters is not None and not self.parameters.satisfies_constraints():
return False
if not self._constraint_checkers:
return True
val = self.value
return all(func(val) for func in self._constraint_checkers)
def register_cheap_constraint(self, func: tp.Callable[[tp.Any], bool]) -> None:
"""Registers a new constraint on the parameter values.
Parameters
----------
func: Callable
function which, given the value of the instance, returns whether it satisfies the constraints.
Note
----
- this is only for checking after mutation/recombination/etc if the value still satisfy the constraints.
The constraint is not used in those processes.
- constraints should be fast to compute.
"""
if getattr(func, "__name__", "not lambda") == "<lambda>": # LambdaType does not work :(
warnings.warn("Lambda as constraint is not adviced because it may not be picklable.")
self._constraint_checkers.append(func)
# %% random state
@property
def random_state(self) -> np.random.RandomState:
"""Random state the instrumentation and the optimizers pull from.
It can be seeded/replaced.
"""
if self._random_state is None:
# use the setter, to make sure the random state is propagated to the variables
seed = np.random.randint(2 ** 32, dtype=np.uint32)
self._set_random_state(np.random.RandomState(seed))
assert self._random_state is not None
return self._random_state
@random_state.setter
def random_state(self, random_state: np.random.RandomState) -> None:
self._set_random_state(random_state)
def _set_random_state(self, random_state: np.random.RandomState) -> None:
self._random_state = random_state
if self._parameters is not None:
self.parameters._set_random_state(random_state)
def spawn_child(self: P, new_value: tp.Optional[tp.Any] = None) -> P:
"""Creates a new instance which shares the same random generator than its parent,
is sampled from the same data, and mutates independently from the parentp.
If a new value is provided, it will be set to the new instance
Parameters
----------
new_value: anything (optional)
if provided, it will update the new instance value (cannot be used at the same time as new_data).
Returns
-------
Parameter
a new instance of the same class, with same content/internal-model parameters/...
Optionally, a new value will be set after creation
"""
rng = self.random_state # make sure to create one before spawning
child = self._internal_spawn_child()
child._set_random_state(rng)
child._constraint_checkers = list(self._constraint_checkers)
child._generation = self.generation + 1
child._descriptors = self._descriptors
child._name = self._name
child.parents_uids.append(self.uid)
child.heritage = dict(self.heritage)
if new_value is not None:
child.value = new_value
return child
def freeze(self) -> None:
"""Prevents the parameter from changing value again (through value, mutate etc...)
"""
self._frozen = True
if self._parameters is not None:
self._parameters.freeze()
def _check_frozen(self) -> None:
if self._frozen and not isinstance(self, Constant): # nevermind constants (since they dont spawn children)
raise RuntimeError(f"Cannot modify frozen Parameter {self}, please spawn a child and modify it instead"
"(optimizers freeze the parametrization and all asked and told candidates to avoid border effects)")
def _internal_spawn_child(self: P) -> P:
# default implem just forwards params
inputs = {k: v.spawn_child() if isinstance(v, Parameter) else v for k, v in self.parameters._content.items()}
child = self.__class__(**inputs)
return child
def copy(self: P) -> P: # TODO test (see former instrumentation_copy test)
"""Create a child, but remove the random state
This is used to run multiple experiments
"""
child = self.spawn_child()
child._name = self._name
child.random_state = None
return child
def _compute_descriptors(self) -> utils.Descriptors:
return utils.Descriptors()
@property
def descriptors(self) -> utils.Descriptors:
if self._descriptors is None:
self._compute_descriptors()
self._descriptors = self._compute_descriptors()
return self._descriptors
class Constant(Parameter):
"""Parameter-like object for simplifying management of constant parameters:
mutation/recombination do nothing, value cannot be changed, standardize data is an empty array,
child is the same instance.
Parameter
---------
value: Any
the value that this parameter will always provide
"""
def __init__(self, value: tp.Any) -> None:
super().__init__()
if isinstance(value, Parameter):
raise TypeError("Only non-parameters can be wrapped in a Constant")
self._value = value
def _get_name(self) -> str:
return str(self._value)
def get_value_hash(self) -> tp.Hashable:
try:
return super().get_value_hash()
except utils.NotSupportedError:
return "#non-hashable-constant#"
@property
def value(self) -> tp.Any:
return self._value
@value.setter
def value(self, value: tp.Any) -> None:
if not (value == self._value or value is self._value):
raise ValueError(f'Constant value can only be updated to the same value (in this case "{self._value}")')
def get_standardized_data(self: P, *, reference: tp.Optional[P] = None) -> np.ndarray: # pylint: disable=unused-argument
return np.array([])
# pylint: disable=unused-argument
def set_standardized_data(self: P, data: ArrayLike, *, reference: tp.Optional[P] = None, deterministic: bool = False) -> P:
if np.array(data, copy=False).size:
raise ValueError(f"Constant dimension should be 0 (got data: {data})")
return self
def spawn_child(self: P, new_value: tp.Optional[tp.Any] = None) -> P:
if new_value is not None:
self.value = new_value # check that it is equal
return self # no need to create another instance for a constant
def recombine(self: P, *others: P) -> None:
pass
def mutate(self) -> None:
pass
def as_parameter(param: tp.Any) -> Parameter:
"""Returns a Parameter from anything:
either the input if it is already a parameter, or a Constant if not
This is convenient for iterating over Parameter and other objects alike
"""
if isinstance(param, Parameter):
return param
else:
return Constant(param)
class Dict(Parameter):
"""Dictionary-valued parameter. This Parameter can contain other Parameters,
its value is a dict, with keys the ones provided as input, and corresponding values are
either directly the provided values if they are not Parameter instances, or the value of those
Parameters. It also implements a getter to access the Parameters directly if need be.
Parameters
----------
**parameters: Any
the objects or Parameter which will provide values for the dict
Note
----
This is the base structure for all container Parameters, and it is
used to hold the internal/model parameters for all Parameter classes.
"""
def __init__(self, **parameters: tp.Any) -> None:
super().__init__()
self._content: tp.Dict[tp.Any, Parameter] = {k: as_parameter(p) for k, p in parameters.items()}
self._sizes: tp.Optional[tp.Dict[str, int]] = None
self._sanity_check(list(self._content.values()))
self._ignore_in_repr: tp.Dict[str, str] = {} # hacky undocumented way to bypass boring representations
def _sanity_check(self, parameters: tp.List[Parameter]) -> None:
"""Check that all parameters are different
""" # TODO: this is first order, in practice we would need to test all the different parameter levels together
if parameters:
assert all(isinstance(p, Parameter) for p in parameters)
ids = {id(p) for p in parameters}
if len(ids) != len(parameters):
raise ValueError("Don't repeat twice the same parameter")
def _compute_descriptors(self) -> utils.Descriptors:
init = utils.Descriptors()
return functools.reduce(operator.and_, [p.descriptors for p in self._content.values()], init)
def __getitem__(self, name: tp.Any) -> Parameter:
return self._content[name]
def __len__(self) -> int:
return len(self._content)
def _get_parameters_str(self) -> str:
params = sorted((k, p.name) for k, p in self._content.items()
if p.name != self._ignore_in_repr.get(k, "#ignoredrepr#"))
return ",".join(f"{k}={n}" for k, n in params)
def _get_name(self) -> str:
return f"{self.__class__.__name__}({self._get_parameters_str()})"
@property
def value(self) -> tp.Dict[str, tp.Any]:
return {k: as_parameter(p).value for k, p in self._content.items()}
@value.setter
def value(self, value: tp.Dict[str, tp.Any]) -> None:
cls = self.__class__.__name__
if not isinstance(value, dict):
raise TypeError(f"{cls} value must be a dict, got: {value}\nCurrent value: {self.value}")
if set(value) != set(self._content):
raise ValueError(f"Got input keys {set(value)} for {cls} but expected {set(self._content)}\nCurrent value: {self.value}")
for key, val in value.items():
as_parameter(self._content[key]).value = val
def get_value_hash(self) -> tp.Hashable:
return tuple(sorted((x, y.get_value_hash()) for x, y in self._content.items()))
def _internal_get_standardized_data(self: D, reference: D) -> np.ndarray:
data = {k: self[k].get_standardized_data(reference=p) for k, p in reference._content.items()}
if self._sizes is None:
self._sizes = OrderedDict(sorted((x, y.size) for x, y in data.items()))
assert self._sizes is not None
data_list = [data[k] for k in self._sizes]
if not data_list:
return np.array([])
return data_list[0] if len(data_list) == 1 else np.concatenate(data_list) # type: ignore
def _internal_set_standardized_data(self: D, data: np.ndarray, reference: D, deterministic: bool = False) -> None:
if self._sizes is None:
self.get_standardized_data(reference=self)
assert self._sizes is not None
if data.size != sum(v for v in self._sizes.values()):
raise ValueError(f"Unexpected shape {data.shape} for {self} with dimension {self.dimension}:\n{data}")
data = data.ravel()
start, end = 0, 0
for name, size in self._sizes.items():
end = start + size
self._content[name].set_standardized_data(data[start: end], reference=reference[name], deterministic=deterministic)
start = end
assert end == len(data), f"Finished at {end} but expected {len(data)}"
def mutate(self) -> None:
# pylint: disable=pointless-statement
self.random_state # make sure to create one before using
for param in self._content.values():
param.mutate()
def sample(self: D) -> D:
child = self.spawn_child()
child._content = {k: p.sample() for k, p in self._content.items()}
child.heritage["lineage"] = child.uid
return child
def recombine(self, *others: "Dict") -> None:
if not others:
return
# pylint: disable=pointless-statement
self.random_state # make sure to create one before using
assert all(isinstance(o, self.__class__) for o in others)
for k, param in self._content.items():
param.recombine(*[o[k] for o in others])
def _internal_spawn_child(self: D) -> D:
child = self.__class__()
child._content = {k: v.spawn_child() for k, v in self._content.items()}
return child
def _set_random_state(self, random_state: np.random.RandomState) -> None:
super()._set_random_state(random_state)
for param in self._content.values():
if isinstance(param, Parameter):
param._set_random_state(random_state)
def satisfies_constraints(self) -> bool:
compliant = super().satisfies_constraints()
return compliant and all(param.satisfies_constraints() for param in self._content.values() if isinstance(param, Parameter))
def freeze(self) -> None:
super().freeze()
for p in self._content.values():
p.freeze()
```
#### File: nevergrad/parametrization/test_param_doc.py
```python
import numpy as np
# pylint: disable=reimported,redefined-outer-name,unused-variable,unsubscriptable-object, unused-argument
# pylint: disable=import-outside-toplevel
def test_param_example() -> None:
# DOC_PARAM_0
import nevergrad as ng
# build a parameter providing a dict value:
param = ng.p.Dict(
# logarithmically distributed float
log=ng.p.Log(lower=0.01, upper=1.0),
# one-dimensional array of length 2
array=ng.p.Array(shape=(2,)),
# character, either "a" or "b or "c".
char=ng.p.Choice(["a", "b", "c"])
)
print(param.value)
# {'log': 0.01,
# 'array': array([0., 0.]),
# 'char': 'a'}
# DOC_PARAM_1
# DOC_PARAM_10
# create a new instance
child = param.spawn_child()
# update its value
child.value = {'log': 0.2,
'array': np.array([12., 13.]),
'char': 'c'}
# export to standardized space
data = child.get_standardized_data(reference=param)
print(data)
# np.array([12., 13., 0., 0., 0.69, 0.90])
# DOC_PARAM_11
# DOC_PARAM_100
param.mutate()
print(param.value)
# {'log': 0.155,
# 'array': np.array([-0.966, 0.045]),
# 'char': 'a'}
# increase the step/sigma for array
# (note that it's adviced to to this during the creation
# of the variable:
# array=ng.p.Array(shape=(2,)).set_mutation(sigma=10))
param["array"].set_mutation(sigma=10) # type: ignore
param.mutate()
print(param.value)
# {'log': 0.155,
# 'array': np.array([-9.47, 8.38]), # larger mutation
# 'char': 'a'}
# DOC_PARAM_101
# DOC_PARAM_1000
param.random_state.seed(12)
# DOC_PARAM_1001
``` |
{
"source": "9sneha-n/pari",
"score": 2
} |
#### File: pari/article/image_formats.py
```python
from django.utils.html import escape
from wagtail.images.formats import register_image_format, \
unregister_image_format, Format
from wagtail.images.models import SourceImageIOError
class FullWidthImgFormat(Format):
def image_to_editor_html(self, image, alt_text, extra_attributes=None):
if extra_attributes is None:
extra_attributes = {}
return self.image_to_html(image, alt_text, extra_attributes)
def image_to_html(self, image, alt_text, extra_attributes=None):
if extra_attributes is None:
extra_attributes = {}
try:
rendition = image.get_rendition(self.filter_spec)
except SourceImageIOError:
# Image file is (probably) missing from /media/original_images - generate a dummy
# rendition so that we just output a broken image, rather than crashing out completely
# during rendering
Rendition = image.renditions.model # pick up any custom Image / Rendition classes that may be in use
rendition = Rendition(image=image, width=0, height=0)
rendition.file.name = 'not-found'
try:
half_rendition = image.get_rendition('max-512x410')
except SourceImageIOError:
# Image file is (probably) missing from /media/original_images - generate a dummy
# rendition so that we just output a broken image, rather than crashing out completely
# during rendering
Rendition = image.renditions.model # pick up any custom Image / Rendition classes that may be in use
half_rendition = Rendition(image=image, width=0, height=0)
half_rendition.file.name = 'not-found'
if self.classnames:
class_attr = 'class="%s" ' % escape(self.classnames)
else:
class_attr = ''
sizes = "(max-width: 480px) 512w, 100vw"
srcset = "%s 512w, %s" % (escape(half_rendition.url),
escape(rendition.url))
return ('<img %s%s '
'width="%d" height="%d" '
'alt="%s" srcset="%s" sizes="%s">') % (
extra_attributes, class_attr,
rendition.width, rendition.height, alt_text,
srcset, sizes
)
register_image_format(Format('halfwidth', 'Half Width (512px)', 'richtext-image half-width', 'max-512x410'))
unregister_image_format("fullwidth")
register_image_format(FullWidthImgFormat('fullwidth', 'Full width', 'richtext-image full-width', 'max-1400x1120'))
```
#### File: article/streamfields/blocks.py
```python
from functools import partial
from django import forms
from django.utils.functional import cached_property
from django.utils.module_loading import import_string
from wagtail.core import blocks
from wagtail.core.blocks import PageChooserBlock, RichTextBlock, FieldBlock, RawHTMLBlock, IntegerBlock
from wagtail.core.models import Page
from wagtail.embeds.blocks import EmbedBlock
from wagtail.images.blocks import ImageChooserBlock
from album.models import Album
from core.widgets import JqueryChosenSelectMultipleWithAddObject
from face.models import Face
from location.models import Location
from resources.models import Resource
import operator
ALIGNMENT_CHOICES = [('left', 'Left column'), ('right', 'Right column')]
RichTextMiniBlock = partial(RichTextBlock, features=['bold', 'italic'])
RichTextParagraphBlock = partial(RichTextBlock,
features=['h2', 'h3', 'h4', 'h5', 'h6', 'bold', 'italic', 'ol', 'ul', 'hr', 'link',
'document-link'])
class ModelMultipleChoiceBlock(FieldBlock):
def __init__(self, target_model, required=True, help_text=None, **kwargs):
self.target_model = target_model
self.field = forms.ModelMultipleChoiceField(
queryset=self.target_model.objects.all(),
widget=JqueryChosenSelectMultipleWithAddObject,
required=required,
help_text=help_text,
)
super(ModelMultipleChoiceBlock, self).__init__(**kwargs)
def to_python(self, value):
if not value:
return value
else:
return self.target_model.objects.filter(pk__in=value)
def get_prep_value(self, value):
if not value:
return value
else:
return [each.pk for each in value]
def value_from_form(self, value):
if not value or all(isinstance(each, self.target_model) for each in value):
return value
else:
return self.target_model.objects.filter(pk__in=value)
def value_for_form(self, value):
if not value:
return value
elif all(isinstance(each, self.target_model) for each in value):
return [each.pk for each in value]
else:
return []
# TODO implement caption in the block it is implemented in.
class ImageBlock(blocks.StructBlock):
image = ImageChooserBlock()
class Meta:
icon = 'image'
template = 'article/blocks/image.html'
# TODO: This is implemented in the latest wagtail. Remove it after upgrading.
class PageTypeChooserBlock(PageChooserBlock):
"""Custom implementation of PageChooserBlock to limit page selection to specific page types.
"""
def __init__(self, for_models=[Page], **kwargs):
self.for_models = for_models
super(PageTypeChooserBlock, self).__init__(**kwargs)
@cached_property
def target_model(self):
if len(self.for_models) == 1:
return self.for_models[0]
else:
from wagtail.core.models import Page
return Page
@cached_property
def widget(self):
from django.utils.translation import ugettext_lazy as _
from wagtail.admin.widgets import AdminPageChooser
# Support importing from dotted string in-order to prevent circular-import for certain models(Say Article)
self.for_models = [import_string(model) if isinstance(model, str) else model for model in self.for_models]
if any(not issubclass(each, Page) for each in self.for_models):
raise TypeError("All models passed should be a sub-class of wagtail.core.models.Page")
model_names = ' / '.join(each.__name__.lower() for each in self.for_models)
admin_page_chooser = AdminPageChooser(target_models=self.for_models)
admin_page_chooser.choose_one_text = _('Choose a %s' % model_names)
admin_page_chooser.choose_another_text = _('Choose another %s' % model_names)
admin_page_chooser.link_to_chosen_text = _('Edit this %s' % model_names)
return admin_page_chooser
class FullWidthImageBlock(blocks.StructBlock):
image = ImageBlock()
caption = RichTextMiniBlock(required=False)
class Meta:
icon = 'image'
template = 'article/blocks/full_width_image.html'
label = 'Full width image'
class TwoColumnImageBlock(blocks.StructBlock):
image_left = ImageBlock()
image_right = ImageBlock()
class Meta:
icon = 'image'
template = 'article/blocks/two_column_image.html'
class ParagraphBlock(blocks.StructBlock):
ALIGN_CONTENT_CHOICES = [('default', 'Default'), ('center', 'Center')]
content = RichTextParagraphBlock()
align_content = blocks.ChoiceBlock(choices=ALIGN_CONTENT_CHOICES, default=ALIGN_CONTENT_CHOICES[0][0])
class Meta:
icon = 'title'
label = 'Text'
template = 'article/blocks/paragraph.html'
class PargraphBlockWithOptionalContent(ParagraphBlock):
content = RichTextParagraphBlock(required=False)
class ParagraphWithImageBlock(blocks.StructBlock):
image = ImageBlock()
align_image = blocks.ChoiceBlock(choices=ALIGNMENT_CHOICES, default=ALIGNMENT_CHOICES[0][0])
content = ParagraphBlock()
class Meta:
icon = 'doc-full'
label = 'Paragraphs with an image'
template = 'article/blocks/paragraph_with_image.html'
class FaceBlock(blocks.StructBlock):
face = PageTypeChooserBlock(for_models=[Face])
class Meta:
icon = 'image'
template = 'article/blocks/face.html'
class ParagraphWithBlockQuoteBlock(blocks.StructBlock):
quote = RichTextMiniBlock()
align_quote = blocks.ChoiceBlock(choices=ALIGNMENT_CHOICES, default=ALIGNMENT_CHOICES[1][0])
content = ParagraphBlock()
class Meta:
icon = 'openquote'
label = 'Quote with text'
template = 'article/blocks/paragraph_with_block_quote.html'
class FullWidthBlockQuote(blocks.StructBlock):
quote = RichTextMiniBlock()
class Meta:
icon = 'openquote'
label = 'Full width quote'
template = 'article/blocks/full_width_block_quote.html'
class NColumnParagraphBlock(blocks.StructBlock):
paragraph = blocks.ListBlock(ParagraphBlock())
class Meta:
template = 'article/blocks/columnar_paragraph.html'
label = 'Columnar text'
icon = 'title'
class ParagraphWithEmbedBlock(blocks.StructBlock):
embed = EmbedBlock()
embed_caption = RichTextMiniBlock(required=False)
embed_max_width = IntegerBlock(required=False, help_text="Optional field. Maximum width of the content in pixels to"
" be requested from the content provider(e.g YouTube). "
"If the requested width is not supported, provider will be"
" supplying the content with nearest available width.")
embed_align = blocks.ChoiceBlock(choices=ALIGNMENT_CHOICES, default=ALIGNMENT_CHOICES[0][0])
content = ParagraphBlock()
class Meta:
icon = 'media'
label = 'Embed with text'
template = 'article/blocks/paragraph_with_embed.html'
class NColumnImageBlock(blocks.StructBlock):
images = blocks.ListBlock(ImageBlock())
height = IntegerBlock(min_value=0, required=True, default=380)
caption = RichTextMiniBlock(required=False)
class Meta:
template = 'article/blocks/columnar_image.html'
label = 'Columnar Images'
class ParagraphWithRawEmbedBlock(blocks.StructBlock):
embed = RawHTMLBlock(help_text="Embed HTML code(an iframe)")
embed_caption = RichTextMiniBlock(required=False)
embed_align = blocks.ChoiceBlock(choices=ALIGNMENT_CHOICES, default=ALIGNMENT_CHOICES[0][0])
content = PargraphBlockWithOptionalContent(required=False)
class Meta:
icon = 'media'
label = 'Raw embed with text'
template = 'article/blocks/paragraph_with_raw_embed.html'
class FullWidthEmbedBlock(blocks.StructBlock):
embed = EmbedBlock(required=True, help_text="Enter URL for the embed block")
embed_caption = RichTextMiniBlock(required=False)
class Meta:
icon = 'media'
label = 'Full width embed'
template = 'article/blocks/full_width_embed.html'
class VideoWithQuoteBlock(blocks.StructBlock):
video = EmbedBlock(help_text="YouTube video URL")
video_height = IntegerBlock(required=True, default=270)
video_caption = RichTextMiniBlock(required=False)
quote = RichTextMiniBlock()
align_quote = blocks.ChoiceBlock(choices=ALIGNMENT_CHOICES, default=ALIGNMENT_CHOICES[0][1])
class Meta:
icon = 'openquote'
label = 'Video with quote'
template = 'article/blocks/video_with_block_quote.html'
class ParagraphWithMapBlock(blocks.StructBlock):
locations = ModelMultipleChoiceBlock(target_model=Location)
map_align = blocks.ChoiceBlock(choices=ALIGNMENT_CHOICES, default=ALIGNMENT_CHOICES[0][0])
content = ParagraphBlock()
class Meta:
label = 'Map with text'
template = 'article/blocks/paragraph_with_map.html'
icon = 'site'
class ImageWithCaptionAndHeightBlock(ImageBlock):
height = IntegerBlock(min_value=0, required=True, default=380)
caption = RichTextMiniBlock(required=False)
class ImageWithQuoteAndParagraphBlock(blocks.StructBlock):
image = ImageWithCaptionAndHeightBlock(required=True)
align_image = blocks.ChoiceBlock(choices=ALIGNMENT_CHOICES, default=ALIGNMENT_CHOICES[0][0])
content_1 = PargraphBlockWithOptionalContent(required=False)
quote = FullWidthBlockQuote(required=True)
content_2 = PargraphBlockWithOptionalContent(required=False)
class Meta:
icon = "image"
label = 'Image with quote and text'
template = 'article/blocks/image_with_quote_and_paragraph.html'
# TODO remove this class , this module is deprecated.
class ImageWithBlockQuote(blocks.StructBlock):
image = ImageWithCaptionAndHeightBlock()
quote = RichTextMiniBlock()
align_quote = blocks.ChoiceBlock(choices=ALIGNMENT_CHOICES, default=ALIGNMENT_CHOICES[0][0])
class Meta:
icon = 'image'
template = 'article/blocks/image_with_block_quote.html'
label = 'Image with block quote'
class ParagraphWithPageBlock(blocks.StructBlock):
page = PageTypeChooserBlock(for_models=['article.models.Article', Album, Face, Resource])
align_image = blocks.ChoiceBlock(choices=ALIGNMENT_CHOICES, default=ALIGNMENT_CHOICES[0][0])
content = ParagraphBlock()
class Meta:
icon = 'link'
template = 'article/blocks/paragraph_with_page.html'
label = 'Page reference with text'
class NColumnImageWithTextBlock(NColumnImageBlock):
align_columnar_images = blocks.ChoiceBlock(choices=ALIGNMENT_CHOICES, default=ALIGNMENT_CHOICES[0][0])
content = PargraphBlockWithOptionalContent(required=False)
class Meta:
icon = 'image'
label = 'Columnar images with text'
```
#### File: pari/author/views.py
```python
from django.shortcuts import render
from django.utils.text import slugify
from django.urls import reverse
from django.http import HttpResponseRedirect
from wagtail.admin.modal_workflow import render_modal_workflow
from .forms import AuthorAdminForm
def get_result(instance):
if instance:
return {
'id': instance.id,
'name': instance.name
}
else:
return None
def add_author(request):
instance = None
if request.method == "POST":
form = AuthorAdminForm(request.POST)
if form.is_valid():
instance = form.save(commit=False)
instance.slug = slugify(instance.name)
instance.save()
else:
form = AuthorAdminForm()
return render_modal_workflow(
request,
"core/add_object.html",
None,
{
"add_object_url": reverse("author_add"),
"name": "Author",
"form": form,
"instance": instance
},
json_data={
"step": "chooser",
"result": get_result(instance)
}
)
def add_translator(request):
return HttpResponseRedirect(reverse("author_add"))
def add_photographer(request):
return HttpResponseRedirect(reverse("author_add"))
```
#### File: pari/author/wagtail_hooks.py
```python
from django.conf.urls import url
from django.utils.translation import ugettext_lazy as _
from wagtail.core import hooks
from wagtail.contrib.modeladmin.options import (
ModelAdmin, modeladmin_register)
from .views import add_author
from .models import Author, Role
@hooks.register('register_admin_urls')
def author_admin_urls():
return [
url(r'^authors/add/$', add_author, name='author_add'),
]
class AuthorAdmin(ModelAdmin):
model = Author
menu_label = _("Authors")
menu_icon = "user"
search_fields = ('name', 'email', )
class RoleAdmin(ModelAdmin):
model = Role
menu_label = _("Roles")
menu_icon = "user"
search_fields = ('name',)
modeladmin_register(AuthorAdmin)
modeladmin_register(RoleAdmin)
```
#### File: category/migrations/0003_auto_20170504_1829.py
```python
from __future__ import unicode_literals
from django.db import migrations, models
def update_categories(apps, schema_editor):
Category = apps.get_model('category.category')
category_update_dict = {
"things-we-do": { "name": "Things We Do", "description": "The world of rural labour" },
"things-we-make": { "name": "Things We Make", "description": "Artisans, artists and craftspersons" },
"farming-and-its-crisis": { "name": "Farming and its Crisis", "description": "The troubled world of agriculture" },
"Little takes": { "name": "Little Takes", "description": "Small, impactful video clips" },
"the-rural-in-the-urban": { "name": "The Rural in the Urban", "description": "Migrant workers across India" },
"women": { "name": "Women", "description": "More than half the sky" },
"adivasis": { "name": "Adivasis", "description": "The first dwellers" },
"dalits": { "name": "Dalits", "description": "Struggles of the oppressed" },
"we-are": { "name": "We Are", "description": "Communities and cultures" },
"resource-conflicts": { "name": "Resource Conflicts", "description": "Jal, jungle, zameen" },
"foot-soldiers-of-freedom": { "name": "Foot-Soldiers of Freedom", "description": "The last living freedom fighters" },
"small-world": { "name": "Small World", "description": "A focus on children" },
"musafir": { "name": "Musafir", "description": "Travellers’ tales, everyday lives" },
"getting-there": { "name": "Getting There", "description": "Zany rural transportation" },
"the-wild": { "name": "The Wild", "description": "The world of nature" },
"sports-games": { "name": "Rural Sports", "description": "Games people play" },
"health": { "name": "Healthcare", "description": "The state of rural health" },
"folklore": { "name": "Mosaic", "description": "Culture and folklore" },
"environment": { "name": "Environment", "description": "People, livelihoods, habitats" },
"tongues": { "name": "Tongues", "description": "The universe of our languages" },
"visible-work-invisible-women": { "name": "Visible Work, Invisible Women", "description": "Women and work: a photo exhibition" },
"one-offs": { "name": "One-Offs", "description": "Videos, photos, articles" },
"headgear": { "name": "Things We Wear", "description": "Clothing, headgear, jewellery..." },
"pari-for-schools": { "name": "PARI for Schools", "description": "Work done for PARI by students" },
"videozone": { "name": "VideoZone", "description": "Stories told in moving pictures" },
"audiozone": { "name": "AudioZone", "description": "You could listen all day" },
"photozone": { "name": "PhotoZone", "description": "Collections of photographs" }
}
for slug, value in category_update_dict.items():
name = value["name"]
description = value["description"]
try:
category = Category.objects.get(slug=slug)
except Category.DoesNotExist:
category = None
if(category):
category.name = name
category.description = description
category.save()
class Migration(migrations.Migration):
dependencies = [
('category', '0002_category_image'),
]
operations = [
migrations.RunPython(update_categories),
]
```
#### File: pari/category/models.py
```python
from __future__ import unicode_literals
from django.db import models
from django.urls import reverse
from six import python_2_unicode_compatible
from modelcluster.fields import ParentalKey
@python_2_unicode_compatible
class Category(models.Model):
name = models.CharField(max_length=100, unique=True)
slug = models.CharField(max_length=100)
description = models.TextField()
image = models.ForeignKey('core.AffixImage',
null=True, blank=True,
on_delete=models.SET_NULL,
related_name="category_for_image")
order = models.PositiveIntegerField(default=1)
class Meta:
verbose_name_plural = "Categories"
ordering = ["order"]
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse("story-detail", kwargs={"slug": self.slug})
```
#### File: pari/core/context_processors.py
```python
from django.conf import settings as django_settings
from django.utils.translation import ugettext_lazy as _
from wagtail.core.models import Site
from .models import HomePage
def settings(request):
if not getattr(django_settings, "SOCIAL", None):
return {}
try:
site = Site.objects.get(hostname=request.get_host())
except Site.DoesNotExist:
site = None
return {
"SOCIAL_FACEBOOK": django_settings.SOCIAL.get("FACEBOOK", ""),
"SOCIAL_TWITTER": django_settings.SOCIAL.get("TWITTER", ""),
"SOCIAL_INSTAGRAM": django_settings.SOCIAL.get("INSTAGRAM", ""),
"SOCIAL_YOUTUBE": django_settings.SOCIAL.get("YOUTUBE", ""),
"SOCIAL_SOUND_CLOUD": django_settings.SOCIAL.get("SOUND_CLOUD", ""),
"SOCIAL_GITHUB_REPO": django_settings.SOCIAL.get("GITHUB_REPO", ""),
"GOOGLE_ANALYTICS_ID": django_settings.SOCIAL.get("GOOGLE_ANALYTICS_ID", ""),
"SITE_TITLE": django_settings.SITE_TITLE,
"site": site,
"SUPPORTED_LANGUAGES": django_settings.SUPPORTED_LANGUAGES,
}
def path(request):
return {
"absolute_path_minus_query_string": request.build_absolute_uri(request.path),
"absolute_full_path": request.build_absolute_uri(request.get_full_path())
}
```
#### File: pari/donation/forms.py
```python
from django import forms
from django.utils.translation import ugettext_lazy as _
from .fields import AmountField
from .helpers import DonationOptions
class DonateForm(forms.Form):
name = forms.CharField(
label=_("NAME"),
max_length=100,
widget=forms.TextInput(attrs={"class": "form-control"})
)
email = forms.EmailField(
label=_("EMAIL"),
widget=forms.EmailInput(attrs={"class": "form-control"})
)
phone = forms.CharField(
label=_("PHONE NUMBER"),
widget=forms.TextInput(attrs={"class": "form-control"})
)
pan = forms.CharField(
label=_("PAN NUMBER"),
max_length=10,
widget=forms.TextInput(attrs={"class": "form-control"}),
help_text=_("PAN is required as per government regulations.")
)
amount = AmountField(
choices=DonationOptions.Amount.CHOICES,
label=_('AMOUNT')
)
frequency = forms.ChoiceField(
choices=DonationOptions.Frequency.FORM_CHOICES,
widget=forms.RadioSelect,
label=_('TYPE')
)
term = forms.ChoiceField(
choices=DonationOptions.Term.CHOICES,
initial=DonationOptions.Term.Y5,
widget=forms.Select(attrs={"class": "form-control term-select"}),
label=_('DURATION')
)
is_indian = forms.BooleanField(
initial=False,
label=_("I declare that I am an Indian citizen"),
widget=forms.CheckboxInput()
)
def clean_is_indian(self):
data = self.cleaned_data["is_indian"]
if data != True:
raise forms.ValidationError(_("Sorry, we can accept donations "
"from Indians only."))
return data
def clean_term(self):
if self.cleaned_data.get('frequency', '') == DonationOptions.Frequency.Y and \
self.cleaned_data['term'] in (DonationOptions.Term.M6, DonationOptions.Term.Y1):
raise forms.ValidationError(_('Term should be at least 2 years for Yearly donation'))
return self.cleaned_data['term']
```
#### File: pari/functional_tests/data_setup.py
```python
from functional_tests.factory import ArticleFactory
from functional_tests.factory import CategoryFactory
from functional_tests.factory import PhotoAlbumSlideFactory
from functional_tests.factory import TalkingAlbumSlideFactory
class DataSetup():
def create_article(self, title, author, category, location, image, show_modular_content=False,
modular_content=None):
if modular_content is not None:
return ArticleFactory.create(title=title, authors=(author,), categories=(category,),
locations=(location,), featured_image=image,
show_modular_content=show_modular_content, modular_content=modular_content
)
return ArticleFactory.create(title=title, authors=(author,), categories=(category,),
locations=(location,), featured_image=image,
show_modular_content=show_modular_content
)
def create_video_article(self, title, author, location, image):
category = CategoryFactory.create(name="VideoZone", slug="videozone", order=16)
return self.create_article(title, author, category, location, image)
def create_talking_album(self, image):
talking_slide = TalkingAlbumSlideFactory.create(image=image)
return talking_slide.page
def create_photo_album(self, image):
photo_slide = PhotoAlbumSlideFactory.create(image=image)
return photo_slide.page
```
#### File: functional_tests/factory/album_factory.py
```python
import factory
from album.models import Album, AlbumSlide
from django.utils.text import slugify
from functional_tests.factory import ContentTypeFactory
class AlbumFactory(factory.django.DjangoModelFactory):
class Meta:
model = Album
django_get_or_create = ('title',)
path = factory.Sequence(lambda n: u'000100{}'.format(n)) # Album sequence starts from 00010050
depth = 2
numchild = 0
title = "Album Page"
slug = factory.LazyAttribute(lambda obj: slugify(obj.title))
live = True
has_unpublished_changes = False
seo_title = " "
show_in_menus = False
search_description = " "
go_live_at = '2011-10-24 12:43'
expire_at = '2050-12-31 12:43'
expired = False
content_type = factory.SubFactory(ContentTypeFactory, app_label="album", model="album")
locked = False
latest_revision_created_at = '2011-10-24 12:43'
first_published_at = '2011-10-24 12:43'
description = "<p> Album Content </p>"
language = "en"
@classmethod
def _setup_next_sequence(cls):
return getattr(cls, 'starting_seq_num', 50)
class TalkingAlbumSlideFactory(factory.django.DjangoModelFactory):
class Meta:
model = AlbumSlide
page = factory.SubFactory(AlbumFactory, title='talking album')
audio = "109687682"
description = "<p><i>Varavattoor village, Desamangalam panchayat, Wadakkanchery, Kerala</i></p>"
created_on = "2015-07-31 10:29:49"
modified_on = "2015-08-31 10:29:49"
@factory.post_generation
def image(self, create, extracted, **kwargs):
if not create:
return
if extracted:
self.image = extracted
class PhotoAlbumSlideFactory(factory.django.DjangoModelFactory):
class Meta:
model = AlbumSlide
page = factory.SubFactory(AlbumFactory, title="photo album")
description = "<p><i>Kerala's 'Green Army' is addressing low paddy productivity and a shortage of farm labour</i></p>"
created_on = "2015-07-31 10:29:49"
modified_on = "2015-08-31 10:29:49"
@factory.post_generation
def image(self, create, extracted, **kwargs):
if not create:
return
if extracted:
self.image = extracted
```
#### File: functional_tests/factory/face_factory.py
```python
import factory
from django.contrib.contenttypes.models import ContentType
from django.utils.text import slugify
from face.models import Face
from functional_tests.factory import ImageFactory, LocationFactory
class ContentTypeFactory(factory.django.DjangoModelFactory):
class Meta:
model = ContentType
django_get_or_create = ('app_label', 'model')
app_label = "core"
model = "homepage"
class FaceFactory(factory.django.DjangoModelFactory):
class Meta:
model = Face
path = factory.Sequence(lambda n: u'0001000100{}'.format(n)) # from wagtailcore_pagerevision
depth = 3
numchild = 0
title = 'Face Page'
slug = factory.LazyAttribute(lambda obj: slugify(obj.title))
live = True
has_unpublished_changes = False
show_in_menus = False
search_description = ''
go_live_at = '1995-02-07 12:00'
expire_at = '2050-12-31 12:43'
expired = False
content_type = factory.SubFactory(ContentTypeFactory, app_label="face", model="face")
locked = False
latest_revision_created_at = '1995-02-07 12:00'
first_published_at = '1995-02-07 12:00'
language = 'en'
occupation = 'farmer'
occupation_of_parent = ''
adivasi = ''
quote = ''
child = 'f'
age = '22'
gender = 'M'
image = factory.SubFactory(ImageFactory, title='face image')
location = factory.SubFactory(LocationFactory)
@classmethod
def _setup_next_sequence(cls):
return getattr(cls, 'starting_sequence_num', 20)
```
#### File: functional_tests/factory/image_factory.py
```python
import factory
from core.models import AffixImage
class ImageFactory(factory.django.DjangoModelFactory):
class Meta:
model = AffixImage
title = "loom"
file = "uploads/stories-1.jpg"
width = 3216
height = 2136
created_at = "2015-07-31 10:25"
collection_id = 1
event = "PARI Stories from all over in all languages"
@factory.post_generation
def locations(self, create, extracted, **kwargs):
if not create:
return
if extracted:
for location in extracted:
self.locations.add(location)
@factory.post_generation
def photographers(self, create, extracted, **kwargs):
if not create:
return
if extracted:
for photographer in extracted:
self.photographers.add(photographer)
@factory.post_generation
def categories(self, create, extracted, **kwargs):
if not create:
return
if extracted:
for category in extracted:
self.categories.add(category)
```
#### File: functional_tests/pages/donate_page.py
```python
from selenium.webdriver.common.by import By
from selenium.webdriver.support.wait import WebDriverWait
from functional_tests.base import Page
class DonatePage(Page):
donate_page_container = (By.CSS_SELECTOR, ".donate")
def __init__(self, driver, relative_url='/'):
super(DonatePage, self).__init__(driver, self.donate_page_container, '/pages/donate/')
```
#### File: pari/location/models.py
```python
from __future__ import unicode_literals
from django.contrib.gis.db import models
from six import python_2_unicode_compatible
from django.utils.text import slugify
import django.db.models.deletion
@python_2_unicode_compatible
class Location(models.Model):
name = models.CharField(max_length=100)
slug = models.SlugField()
point = models.PointField(srid=4326)
# Below points are to prevent reverse caching
district = models.CharField(max_length=100)
state = models.CharField(max_length=50)
region = models.CharField(max_length=100, null=True, blank=True)
panchayat = models.CharField(max_length=100, null=True, blank=True)
sub_district_type = models.ForeignKey("SubDistrictType",
related_name="location", null=True, blank=True, on_delete=django.db.models.deletion.PROTECT)
sub_district_name = models.CharField(max_length=100, null=True, blank=True)
objects = models.Manager()
def __str__(self):
location = filter(lambda x: x,
[self.name, self.panchayat, self.region, self.sub_district_name, self.district, self.state])
return ", ".join(location)
def save(self, *args, **kwargs):
super(Location, self).save(*args, **kwargs)
computed_slug = slugify("%s-%s" % (self.id, self.name))[:50]
if computed_slug != self.slug:
self.slug = computed_slug
Location.objects.filter(pk=self.id).update(slug=computed_slug)
@property
def address(self):
addr = self.name
addr += ", " + self.sub_district_name if self.sub_district_name else ""
addr += ", " + self.district if self.district else ""
addr += ", " + self.state if self.state else ""
return addr
@property
def minimal_address(self):
addr = self.district
addr += ", " + self.state if self.state else ""
return addr
class Meta:
unique_together = ["name", "district", "state", "panchayat", "sub_district_name"]
ordering = ["name"]
@python_2_unicode_compatible
class SubDistrictType(models.Model):
name = models.CharField(max_length=100, null=True, blank=True)
def __str__(self):
return self.name
```
#### File: pari/location/tests.py
```python
from django.test import TestCase
from django.test import RequestFactory
from .views import LocationDetail
from functional_tests.factory import LocationFactory
from functional_tests.factory import ArticleFactory
from functional_tests.factory import PhotoAlbumSlideFactory
from functional_tests.factory import ImageFactory
import shutil
class LocationDetailTests(TestCase):
def setUp(self):
shutil.copy2('core/static/img/stories-1.jpg', 'media/uploads/stories-1.jpg')
self.location = LocationFactory(name="chennai", id="100")
self.english_article = ArticleFactory(title="english_article", locations=(self.location,), language='en')
self.hindi_article = ArticleFactory(title="hindi_article", locations=(self.location,), language='hi')
image = ImageFactory.create(locations=(self.location,))
self.english_album = PhotoAlbumSlideFactory(image=image).page
def test_lang_is_used_from_query_params(self):
request = RequestFactory().get('/locations/100-chennai/?lang=hi')
response = LocationDetail.as_view()(request, object=self.location, slug="100-chennai")
for article in response.context_data['articles']:
assert article.title == self.hindi_article.title
def test_lang_is_set_to_english_by_default(self):
request = RequestFactory().get('/locations/100-chennai/')
response = LocationDetail.as_view()(request, object=self.location, slug="100-chennai")
for article in response.context_data['articles']:
self.assertIn(article.title, [self.english_article.title, self.english_album.title])
def test_all_articles_are_returned_if_lang_is_all(self):
request = RequestFactory().get('/locations/100-chennai/?lang=all')
response = LocationDetail.as_view()(request, object=self.location, slug="100-chennai")
assert len(list((response.context_data['articles']))) == 3
``` |
{
"source": "9Strike/ap_praktikum",
"score": 2
} |
#### File: 9Strike/ap_praktikum/PAP22-252j.py
```python
import measure as ms
from measure import sqrt, exp, ln
import numpy as np
import scipy.constants as cs
from scipy.optimize import curve_fit
from scipy.stats import chi2
ms.plt.rc('text', usetex=True)
ms.plt.rc('font', family='serif')
# Measured data
U0 = 520
d_U0 = 10
ΔT0 = 10.0
ΔT_Ag = 10.0
ΔT_In = 120.0
T0 = 8 * cs.minute
T_Ag = 400
T_In = 50 * cs.minute
n0 = np.loadtxt('data/252/252_1j.dat', usecols=[1], unpack=True)
n_Ag_1 = np.loadtxt('data/252/252_2j.dat', usecols=[1], unpack=True)
n_Ag_2 = np.loadtxt('data/252/252_3j.dat', usecols=[1], unpack=True)
n_Ag_3 = np.loadtxt('data/252/252_4j.dat', usecols=[1], unpack=True)
n_Ag_4 = np.loadtxt('data/252/252_5j.dat', usecols=[1], unpack=True)
n_In = np.loadtxt('data/252/252_6j.dat', usecols=[1], unpack=True)
# Background radiation
n0_m = ms.mv(n0 / ΔT0)
d_n0_m = ms.dsto_mv(n0 / ΔT0)
# Fit function
def f_Ag(x, A1, λ1, A2, λ2):
return A1 * exp(-λ1 * x) + A2 * exp(-λ2 * x)
def f_In(x, A, λ):
return A * exp(-λ * x)
# Ag decay
t_Ag = np.arange(ΔT_Ag / 2, T_Ag + ΔT_Ag / 2, ΔT_Ag)
N_Ag = (n_Ag_1 + n_Ag_2 + n_Ag_3 + n_Ag_4)
d_N_Ag = sqrt(N_Ag) / (4 * ΔT_Ag)
N_Ag = N_Ag / (4 * ΔT_Ag)
N_Ag = N_Ag - n0_m
d_N_Ag = sqrt(d_N_Ag**2 + d_n0_m**2)
cut = 0
for i in range(len(N_Ag)):
if N_Ag[i] / d_N_Ag[i] < 3.0:
cut = i
break
[A1_Ag, λ1_Ag, A2_Ag, λ2_Ag], pcov = curve_fit(f_Ag, t_Ag[:cut], N_Ag[:cut], sigma=d_N_Ag[:cut], p0=[30, ln(2) / 24.6, 5, ln(2) / (2.41 * cs.minute)])
[d_A1_Ag, d_λ1_Ag, d_A2_Ag, d_λ2_Ag] = sqrt(np.diag(pcov))
t_Ag_fit = np.linspace(t_Ag[0], t_Ag[-1], 1000)
N_Ag_fit = f_Ag(t_Ag_fit, A1_Ag, λ1_Ag, A2_Ag, λ2_Ag)
ms.pltext.initplot(num=1, title=r'Mittelwert der Zerfallsrate $A$ aus 4 Messungen von $^{108}$Ag und $^{110}$Ag zu der Zeit $t$', xlabel=r'$t$ / s', ylabel=r'$A$ / Bq', fignum=True)
ms.pltext.plotdata(t_Ag, N_Ag, d_N_Ag, color='gray')
ms.plt.plot(t_Ag_fit, N_Ag_fit)
dof_Ag_fit = len(N_Ag[:cut]) - 4
χ2_Ag = ms.chi2(N_Ag[:cut], d_N_Ag[:cut], f_Ag(t_Ag[:cut], A1_Ag, λ1_Ag, A2_Ag, λ2_Ag))
χ2_Ag_red = χ2_Ag / dof_Ag_fit
p_Ag_fit = 1 - chi2.cdf(χ2_Ag, dof_Ag_fit)
τ1_Ag = ln(2) / λ1_Ag
d_τ1_Ag = ln(2) * d_λ1_Ag / λ1_Ag**2
τ2_Ag = ln(2) / λ2_Ag
d_τ2_Ag = ln(2) * d_λ2_Ag / λ2_Ag**2
print("Ag108, Ag110 decay:")
print(ms.val("A1", A1_Ag, d_A1_Ag, unit='Bq'))
print(ms.val("λ1", λ1_Ag, d_λ1_Ag, unit='1/s', prefix=False))
print(ms.val("A2", A2_Ag, d_A2_Ag, unit='Bq'))
print(ms.val("λ2", λ2_Ag, d_λ2_Ag, unit='1/s', prefix=False))
print(ms.val("χ²", χ2_Ag))
print(ms.val("χr²", χ2_Ag_red))
print(ms.val("pfit", 100 * p_Ag_fit, unit='%'))
print()
print("Half life of Ag110:")
print(ms.val("τ", τ1_Ag, d_τ1_Ag, unit='s'))
print(ms.sig("τ", τ1_Ag, d_τ1_Ag, 24.6))
print("Half life of Ag108:")
print(ms.val("τ", τ2_Ag / cs.minute, d_τ2_Ag / cs.minute, unit='min'))
print(ms.sig("τ", τ2_Ag, d_τ2_Ag, 2.37 * cs.minute))
print()
# In decay
t_In = np.arange(ΔT_In / 2, T_In + ΔT_In / 2, ΔT_In)
d_n_In = sqrt(n_In) / ΔT_In
n_In = n_In / ΔT_In
n_In = n_In - n0_m
d_n_In = sqrt(d_n_In**2 + d_n0_m**2)
[A_In, λ_In], pcov = curve_fit(f_In, t_In[1:], n_In[1:], sigma=d_n_In[1:], p0=[4, 0.5e-3])
[d_A_In, d_λ_In] = sqrt(np.diag(pcov))
t_In_fit = np.linspace(t_In[0], t_In[-1], 1000)
n_In_fit = f_In(t_In_fit, A_In, λ_In)
ms.pltext.initplot(num=2, title=r'Zerfallsrate $A$ von $^{116\rm{m}}$In (und $^{116}$In) in Abhängigkeit zu der Zeit $t$.', xlabel=r'$t$ / s', ylabel=r'$A$ / Bq', fignum=True)
ms.pltext.plotdata(t_In, n_In, d_n_In, color='gray')
ms.plt.plot(t_In_fit, n_In_fit)
dof_In_fit = len(n_In[1:]) - 2
χ2_In = ms.chi2(n_In[1:], d_n_In[1:], f_In(t_In[1:], A_In, λ_In))
χ2_In_red = χ2_In / (len(n_In[1:]) - 2)
p_In_fit = 1 - chi2.cdf(χ2_In, dof_In_fit)
τ_In = ln(2) / λ_In
d_τ_In = ln(2) * d_λ_In / λ_In**2
print("In decay:")
print(ms.val("A", A_In, d_A_In, unit='Bq'))
print(ms.val("λ", λ_In, d_λ_In, unit='1/s', prefix=False))
print(ms.val("χ²", χ2_In))
print(ms.val("χr²", χ2_In_red))
print(ms.val("pfit", 100 * p_In_fit, unit='%'))
print()
print("Half life of In116:")
print(ms.val("τ", τ_In / cs.minute, d_τ_In / cs.minute, unit='min'))
print(ms.sig("τ", τ_In, d_τ_In, 54.29 * cs.minute))
ms.pltext.savefigs('figures/252')
ms.plt.show()
```
#### File: 9Strike/ap_praktikum/PAP22-255j.py
```python
import measure as ms
from measure import pi, sqrt, sin, tan, cos, arcsin, arccos, exp, ln
from measure import npfarray as npf
import numpy as np
import scipy.constants as cs
ms.plt.rc('text', usetex=True)
ms.plt.rc('font', family='serif')
titles = [
r'Bestimmung des Grenzwinkels $\beta_G$ durch Extrapolation' '\n' r'des linearen Endes des Grenzspektrums (Zählrate $n$ in Abhängigkeit des Winkels $\beta$).',
r'Bestimmung der Lagen der $K_\alpha$, $K_\beta$-Peaks durch Gaussfits an die Zählraten-Winkel-Abhängigkeit des LiF-Kristalls.',
r'Zählrate $n$ in Abhängigkeit der Spannung der Röntgenröhre $U$ bei einem konstanten Winkel von 7.5$^\circ$',
r'Bestimmung der Lagen der $K_\alpha$, $K_\beta$-Peaks durch Gaussfits an die Zählraten-Winkel-Abhängigkeit des NaCl-Kristalls.'
]
# Constants
d_LiF = 201.4 * cs.pico
rho_NaCl = 2.164 * cs.gram / cs.centi**3
M_NaCl = 58.44 * cs.gram
# (1) Analysis of the spectrum of the LiF-crystal
# Determination of planck's constant
U1 = 35.0 * cs.kilo
t1 = 5.0
beta1, n1 = np.loadtxt('data/255/data1.txt', unpack=True)
d_n1 = sqrt(n1 * t1) / t1
n1_0 = ms.mv(n1[0:7])
d_n1_0 = ms.dsto_mv(n1[0:7])
ms.pltext.initplot(num=1, title=titles[0], xlabel=r'$\beta$ / $^\circ$', ylabel=r'$n$ / (1/s)', fignum=True)
s1, d_s1, b1, d_b1 = ms.linreg(beta1[:20], n1[:20], d_n1[:20], fit_range=range(10, 13), plot=True)
beta1_G = (n1_0 - b1) / s1
d_beta1_G = beta1_G * sqrt((d_n1_0**2 + d_b1**2) / (n1_0 - b1)**2 + (d_s1 / s1)**2)
beta1_G *= cs.degree
d_beta1_G *= cs.degree
ld1_G = 2 * d_LiF * sin(beta1_G)
d_ld1_G = 2 * d_LiF * cos(beta1_G) * d_beta1_G
h1 = (cs.e * U1 / cs.c) * ld1_G
d_h1 = (cs.e * U1 / cs.c) * d_ld1_G
beta1_G2 = arcsin(ld1_G / d_LiF)
d_beta1_G2 = d_ld1_G / sqrt(d_LiF**2 - ld1_G**2)
print()
print(ms.val("n0", n1_0, d_n1_0, unit='1/s', prefix=False))
print(ms.val("s1", s1, d_s1))
print(ms.val("b1", b1, d_b1))
print(ms.val("β_G", beta1_G / cs.degree, d_beta1_G / cs.degree, unit='°', prefix=False))
print(ms.val("λ_G", ld1_G, d_ld1_G, unit='m'))
print(ms.val("h", h1, d_h1, unit='Js', prefix=False))
print(ms.sig("h,l", h1, d_h1, cs.h))
print(ms.val("β_G2", beta1_G2 / cs.degree, d_beta1_G2 / cs.degree, unit='°', prefix=False))
print()
# (p1) Analysis of the K_α, K_β peaks in first and second order
def gauss(x, mu, sigma, A):
return A / sqrt(2 * pi * sigma**2) * exp(-(x - mu)**2 / (2 * sigma**2))
t_p1 = 20.0
beta1_p1, n1_p1 = np.loadtxt('data/255/data2.txt', unpack=True)
beta2_p1, n2_p1 = np.loadtxt('data/255/data3.txt', unpack=True)
beta3_p1, n3_p1 = np.loadtxt('data/255/data4.txt', unpack=True)
beta4_p1, n4_p1 = np.loadtxt('data/255/data5.txt', unpack=True)
d_n1_p1 = sqrt(n1_p1 * t_p1) / t_p1
d_n2_p1 = sqrt(n2_p1 * t_p1) / t_p1
d_n3_p1 = sqrt(n3_p1 * t_p1) / t_p1
d_n4_p1 = sqrt(n4_p1 * t_p1) / t_p1
ms.pltext.initplot(num=2, nrows=2, ncols=2, title=titles[1], xlabel=r'$\beta$ / $^\circ$', ylabel=r'$n$ / (1/s)', fignum=True)
ms.pltext.set_axis(0)
[mu1_p1, sigma1_p1, A1_p1], [d_mu1_p1, d_sigma1_p1, d_A1_p1] = ms.fit(beta1_p1, n1_p1, d_n1_p1, gauss, p0=[9.0, 0.2, 450], plot=True, fit_range=range(3, 7))
ms.pltext.set_axis(1)
[mu2_p1, sigma2_p1, A2_p1], [d_mu2_p1, d_sigma2_p1, d_A2_p1] = ms.fit(beta2_p1, n2_p1, d_n2_p1, gauss, p0=[10.15, 0.15, 500], plot=True, fit_range=range(3, 7))
ms.pltext.set_axis(2)
[mu3_p1, sigma3_p1, A3_p1], [d_mu3_p1, d_sigma3_p1, d_A3_p1] = ms.fit(beta3_p1, n3_p1, d_n3_p1, gauss, p0=[18.3, 0.2, 50], plot=True, fit_range=range(5, 9))
ms.pltext.set_axis(3)
[mu4_p1, sigma4_p1, A4_p1], [d_mu4_p1, d_sigma4_p1, d_A4_p1] = ms.fit(beta4_p1, n4_p1, d_n4_p1, gauss, p0=[20.7, 0.15, 100], plot=True, fit_range=range(4, 9))
ld1_p1 = 2 * d_LiF * sin(mu1_p1 * cs.degree)
d_ld1_p1 = 2 * d_LiF * cos(mu1_p1 * cs.degree) * sigma1_p1 * cs.degree
ld2_p1 = 2 * d_LiF * sin(mu2_p1 * cs.degree)
d_ld2_p1 = 2 * d_LiF * cos(mu2_p1 * cs.degree) * sigma2_p1 * cs.degree
ld3_p1 = d_LiF * sin(mu3_p1 * cs.degree)
d_ld3_p1 = d_LiF * cos(mu3_p1 * cs.degree) * sigma3_p1 * cs.degree
ld4_p1 = d_LiF * sin(mu4_p1 * cs.degree)
d_ld4_p1 = d_LiF * cos(mu4_p1 * cs.degree) * sigma4_p1 * cs.degree
print(ms.val("β1", mu1_p1, sigma1_p1, unit='°', prefix=False))
print(ms.val("β2", mu2_p1, sigma2_p1, unit='°', prefix=False))
print(ms.val("β3", mu3_p1, sigma3_p1, unit='°', prefix=False))
print(ms.val("β4", mu4_p1, sigma4_p1, unit='°', prefix=False))
print(ms.val("Δβ_2", 2 * sqrt(2 * ln(2)) * sigma2_p1, 2 * sqrt(2 * ln(2)) * d_sigma2_p1, unit='°', prefix=False))
print()
print(ms.val("λ1", ld1_p1, d_ld1_p1, unit='m'))
print(ms.val("λ2", ld2_p1, d_ld2_p1, unit='m'))
print(ms.val("λ3", ld3_p1, d_ld3_p1, unit='m'))
print(ms.val("λ4", ld4_p1, d_ld4_p1, unit='m'))
print()
print(ms.sig("λ_α1,l", ld2_p1, d_ld2_p1, 71.1e-12, perc=True))
print(ms.sig("λ_α2,l", ld4_p1, d_ld4_p1, 71.1e-12, perc=True))
print(ms.sig("λ_β1,l", ld1_p1, d_ld1_p1, 63.1e-12, perc=True))
print(ms.sig("λ_β2,l", ld3_p1, d_ld3_p1, 63.1e-12, perc=True))
print(ms.sig("λ_α", ld2_p1, d_ld2_p1, ld4_p1, d_ld4_p1, perc=True))
print(ms.sig("λ_β", ld1_p1, d_ld1_p1, ld3_p1, d_ld3_p1, perc=True))
print()
# Counting rate - Voltage dependency Measurement
t = 20
beta = 7.5 * cs.degree
d_beta = 0.1 * cs.degree
U = np.arange(20.0, 36.0, 1.0) * cs.kilo
n = npf([1.35, 1.35, 2.75, 5.55, 32.95, 78.35, 122.8, 163.3, 200.6, 237.0, 270.2, 307.6, 337.1, 374.7, 403.7, 433.3])
d_n = sqrt(n * t) / t
n_0 = ms.mv(n[:3])
d_n_0 = ms.dsto_mv(n[:3])
ms.pltext.initplot(num=3, title=titles[2], xlabel=r'$U$ / V', ylabel=r'$n$ / (1/s)', fignum=True)
s, d_s, b, d_b = ms.linreg(U, n, d_n, fit_range=range(3, len(U)), plot=True)
U_G = (n_0 - b) / s
d_U_G = U_G * sqrt((d_n_0**2 + d_b**2) / (n_0 - b)**2 + (d_s / s)**2)
h = (2 * cs.e * d_LiF / cs.c) * sin(beta) * U_G
d_h = h * sqrt((d_U_G / U_G)**2 + (d_beta / tan(beta))**2)
print(ms.val("n0", n_0, d_n_0, unit='1/s', prefix=False))
print(ms.val("s", s, d_s))
print(ms.val("b", b, d_b))
print(ms.val("U_G", U_G, d_U_G, unit='V'))
print(ms.val("h", h, d_h, unit='Js', prefix=False))
print(ms.sig("h,l", h, d_h, cs.h))
print(ms.sig("h", h1, d_h1, h, d_h))
print()
# (2) Analysis of the spectrum of the NaCl-crystal
# Analysis of the K_α, K_β peaks in first and second order
# Determination of the lattice constant of NaCl and Avogadro's constant
U2 = 35 * cs.kilo
t2 = 5.0
beta2, n2 = np.loadtxt('data/255/data6.txt', unpack=True)
d_n2 = sqrt(n2 * t2) / t2
ms.pltext.initplot(num=4, ncols=2, nrows=2, title=titles[3], xlabel=r'$\beta$ / $^\circ$', ylabel=r'$n$ / (1/s)', fignum=True)
ms.pltext.set_axis(0)
[mu1_2, sigma1_2, A1_2], [d_mu1_2, d_sigma1_2, d_A1_2] = ms.fit(beta2, n2, d_n2, gauss, p0=[6.5, 0.2, 650], fit_range=range(15, 19), plot=True)
ms.pltext.set_axis(1)
[mu2_2, sigma2_2, A2_2], [d_mu2_2, d_sigma2_2, d_A2_2] = ms.fit(beta2, n2, d_n2, gauss, p0=[7.25, 0.2, 1000], fit_range=range(20, 24), plot=True)
ms.pltext.set_axis(2)
[mu3_2, sigma3_2, A3_2], [d_mu3_2, d_sigma3_2, d_A3_2] = ms.fit(beta2, n2, d_n2, gauss, p0=[13, 0.15, 100], fit_range=range(49, 53), plot=True)
ms.pltext.set_axis(3)
[mu4_2, sigma4_2, A4_2], [d_mu4_2, d_sigma4_2, d_A4_2] =ms.fit(beta2, n2, d_n2, gauss, p0=[14.5, 0.15, 200], fit_range=range(57, 61), plot=True)
print(ms.val("μ1", mu1_2, sigma1_2, unit='°', prefix=False))
print(ms.val("μ2", mu2_2, sigma2_2, unit='°', prefix=False))
print(ms.val("μ3", mu3_2, sigma3_2, unit='°', prefix=False))
print(ms.val("μ4", mu4_2, sigma4_2, unit='°', prefix=False))
print()
beta1_2 = mu1_2 * cs.degree
d_beta1_2 = sigma1_2 * cs.degree
beta2_2 = mu2_2 * cs.degree
d_beta2_2 = sigma2_2 * cs.degree
l_alpha_2 = ld2_p1 / sin(beta2_2)
d_l_alpha_2 = l_alpha_2 * sqrt((d_ld2_p1 / ld2_p1)**2 + (d_beta2_2 / tan(beta2_2))**2)
l_beta_2 = ld1_p1 / sin(beta1_2)
d_l_beta_2 = l_beta_2 * sqrt((d_ld1_p1 / ld1_p1)**2 + (d_beta1_2 / tan(beta1_2))**2)
N_A_alpha_2 = 4 * M_NaCl / (rho_NaCl * l_alpha_2**3)
d_N_A_alpha_2 = N_A_alpha_2 * (3 * d_l_alpha_2 / l_alpha_2)
N_A_beta_2 = 4 * M_NaCl / (rho_NaCl * l_beta_2**3)
d_N_A_beta_2 = N_A_beta_2 * (3 * d_l_beta_2 / l_beta_2)
print(ms.val("a", l_alpha_2, d_l_alpha_2, unit='m'))
print(ms.val("a", l_beta_2, d_l_beta_2, unit='m'))
print(ms.sig("a", l_alpha_2, d_l_alpha_2, l_beta_2, d_l_beta_2))
print()
print(ms.val("N_A", N_A_alpha_2, d_N_A_alpha_2, unit='1/mol', prefix=False))
print(ms.sig("N_A,l", N_A_alpha_2, d_N_A_alpha_2, cs.N_A))
print(ms.val("N_A", N_A_beta_2, d_N_A_beta_2, unit='1/mol', prefix=False))
print(ms.sig("N_A,l", N_A_beta_2, d_N_A_beta_2, cs.N_A))
print(ms.sig("N_A", N_A_alpha_2, d_N_A_alpha_2, N_A_beta_2, d_N_A_beta_2))
print()
# Show plots
ms.pltext.savefigs("figures/255")
ms.plt.show()
``` |
{
"source": "9sunsforcjr/DRL-GNN",
"score": 3
} |
#### File: DRL-GNN/DQN/mpnn.py
```python
import tensorflow as tf
from tensorflow import keras
from keras import regularizers
class myModel(tf.keras.Model):
def __init__(self, hparams):
super(myModel, self).__init__()
self.hparams = hparams
# Define layers here
self.Message = tf.keras.models.Sequential()
self.Message.add(keras.layers.Dense(self.hparams['link_state_dim'],
activation=tf.nn.selu, name="FirstLayer"))
self.Update = tf.keras.layers.GRUCell(self.hparams['link_state_dim'], dtype=tf.float32)
self.Readout = tf.keras.models.Sequential()
self.Readout.add(keras.layers.Dense(self.hparams['readout_units'],
activation=tf.nn.selu,
kernel_regularizer=regularizers.l2(hparams['l2']),
name="Readout1"))
self.Readout.add(keras.layers.Dropout(rate=hparams['dropout_rate']))
self.Readout.add(keras.layers.Dense(self.hparams['readout_units'],
activation=tf.nn.selu,
kernel_regularizer=regularizers.l2(hparams['l2']),
name="Readout2"))
self.Readout.add(keras.layers.Dropout(rate=hparams['dropout_rate']))
self.Readout.add(keras.layers.Dense(1, kernel_regularizer=regularizers.l2(hparams['l2']),
name="Readout3"))
def build(self, input_shape=None):
self.Message.build(input_shape=tf.TensorShape([None, self.hparams['link_state_dim']*2]))
self.Update.build(input_shape=tf.TensorShape([None,self.hparams['link_state_dim']]))
self.Readout.build(input_shape=[None, self.hparams['link_state_dim']])
self.built = True
@tf.function
def call(self, states_action, states_graph_ids, states_first, states_second, sates_num_edges, training=False):
# Define the forward pass
link_state = states_action
# Execute T times
for _ in range(self.hparams['T']):
# We have the combination of the hidden states of the main edges with the neighbours
mainEdges = tf.gather(link_state, states_first)
neighEdges = tf.gather(link_state, states_second)
edgesConcat = tf.concat([mainEdges, neighEdges], axis=1)
### 1.a Message passing for link with all it's neighbours
outputs = self.Message(edgesConcat)
### 1.b Sum of output values according to link id index
edges_inputs = tf.math.unsorted_segment_sum(data=outputs, segment_ids=states_second,
num_segments=sates_num_edges)
### 2. Update for each link
# GRUcell needs a 3D tensor as state because there is a matmul: Wrap the link state
outputs, links_state_list = self.Update(edges_inputs, [link_state])
link_state = links_state_list[0]
# Perform sum of all hidden states
edges_combi_outputs = tf.math.segment_sum(link_state, states_graph_ids, name=None)
r = self.Readout(edges_combi_outputs,training=training)
return r
```
#### File: DRL-GNN/DQN/train_DQN.py
```python
import numpy as np
import gym
import gc
import os
import sys
import gym_environments
import random
import mpnn as gnn
import tensorflow as tf
from collections import deque
import multiprocessing
import time as tt
import glob
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
ENV_NAME = 'GraphEnv-v1'
graph_topology = 0 # 0==NSFNET, 1==GEANT2, 2==Small Topology, 3==GBN
SEED = 37
ITERATIONS = 10000
TRAINING_EPISODES = 20
EVALUATION_EPISODES = 40
FIRST_WORK_TRAIN_EPISODE = 60
MULTI_FACTOR_BATCH = 6 # Number of batches used in training
TAU = 0.08 # Only used in soft weights copy
differentiation_str = "sample_DQN_agent"
checkpoint_dir = "./models"+differentiation_str
store_loss = 3 # Store the loss every store_loss batches
os.environ['PYTHONHASHSEED']=str(SEED)
np.random.seed(SEED)
random.seed(SEED)
# Force TensorFlow to use single thread.
# Multiple threads are a potential source of non-reproducible results.
# For further details, see: https://stackoverflow.com/questions/42022950/
# tf.config.threading.set_inter_op_parallelism_threads(1)
# tf.config.threading.set_intra_op_parallelism_threads(1)
tf.random.set_seed(1)
train_dir = "./TensorBoard/"+differentiation_str
# summary_writer = tf.summary.create_file_writer(train_dir)
listofDemands = [8, 32, 64]
copy_weights_interval = 50
evaluation_interval = 20
epsilon_start_decay = 70
hparams = {
'l2': 0.1,
'dropout_rate': 0.01,
'link_state_dim': 20,
'readout_units': 35,
'learning_rate': 0.0001,
'batch_size': 32,
'T': 4,
'num_demands': len(listofDemands)
}
MAX_QUEUE_SIZE = 4000
def cummax(alist, extractor):
with tf.name_scope('cummax'):
maxes = [tf.reduce_max(extractor(v)) + 1 for v in alist]
cummaxes = [tf.zeros_like(maxes[0])]
for i in range(len(maxes) - 1):
cummaxes.append(tf.math.add_n(maxes[0:i + 1]))
return cummaxes
class DQNAgent:
def __init__(self, batch_size):
self.memory = deque(maxlen=MAX_QUEUE_SIZE)
self.gamma = 0.95 # discount rate
self.epsilon = 1.0 # exploration rate
self.epsilon_min = 0.01
self.epsilon_decay = 0.995
self.writer = None
self.K = 4 # K-paths
self.listQValues = None
self.numbersamples = batch_size
self.action = None
self.capacity_feature = None
self.bw_allocated_feature = np.zeros((env_training.numEdges,len(env_training.listofDemands)))
self.global_step = 0
self.primary_network = gnn.myModel(hparams)
self.primary_network.build()
self.target_network = gnn.myModel(hparams)
self.target_network.build()
self.optimizer = tf.keras.optimizers.SGD(learning_rate=hparams['learning_rate'],momentum=0.9,nesterov=True)
def act(self, env, state, demand, source, destination, flagEvaluation):
"""
Given a demand stored in the environment it allocates the K=4 shortest paths on the current 'state'
and predicts the q_values of the K=4 different new graph states by using the GNN model.
Picks the state according to epsilon-greedy approach. The flag=TRUE indicates that we are testing
the model and thus, it won't activate the drop layers.
"""
# Set to True if we need to compute K=4 q-values and take the maxium
takeMax_epsilon = False
# List of graphs
listGraphs = []
# List of graph features that are used in the cummax() call
list_k_features = list()
# Initialize action
action = 0
# We get the K-paths between source-destination
pathList = env.allPaths[str(source) +':'+ str(destination)]
path = 0
# 1. Implement epsilon-greedy to pick allocation
# If flagEvaluation==TRUE we are EVALUATING => take always the action that the agent is saying has higher q-value
# Otherwise, we are training with normal epsilon-greedy strategy
if flagEvaluation:
# If evaluation, compute K=4 q-values and take the maxium value
takeMax_epsilon = True
else:
# If training, compute epsilon-greedy
z = np.random.random()
if z > self.epsilon:
# Compute K=4 q-values and pick the one with highest value
# In case of multiple same max values, return the first one
takeMax_epsilon = True
else:
# Pick a random path and compute only one q-value
path = np.random.randint(0, len(pathList))
action = path
# 2. Allocate (S,D, linkDemand) demand using the K shortest paths
while path < len(pathList):
state_copy = np.copy(state)
currentPath = pathList[path]
i = 0
j = 1
# 3. Iterate over paths' pairs of nodes and allocate demand to bw_allocated
while (j < len(currentPath)):
state_copy[env.edgesDict[str(currentPath[i]) + ':' + str(currentPath[j])]][1] = demand
i = i + 1
j = j + 1
# 4. Add allocated graphs' features to the list. Later we will compute their q-values using cummax
listGraphs.append(state_copy)
features = self.get_graph_features(env, state_copy)
list_k_features.append(features)
if not takeMax_epsilon:
# If we don't need to compute the K=4 q-values we exit
break
path = path + 1
vs = [v for v in list_k_features]
# We compute the graphs_ids to later perform the unsorted_segment_sum for each graph and obtain the
# link hidden states for each graph.
graph_ids = [tf.fill([tf.shape(vs[it]['link_state'])[0]], it) for it in range(len(list_k_features))]
first_offset = cummax(vs, lambda v: v['first'])
second_offset = cummax(vs, lambda v: v['second'])
tensors = ({
'graph_id': tf.concat([v for v in graph_ids], axis=0),
'link_state': tf.concat([v['link_state'] for v in vs], axis=0),
'first': tf.concat([v['first'] + m for v, m in zip(vs, first_offset)], axis=0),
'second': tf.concat([v['second'] + m for v, m in zip(vs, second_offset)], axis=0),
'num_edges': tf.math.add_n([v['num_edges'] for v in vs]),
}
)
# Predict qvalues for all graphs within tensors
self.listQValues = self.primary_network(tensors['link_state'], tensors['graph_id'], tensors['first'],
tensors['second'], tensors['num_edges'], training=False).numpy()
if takeMax_epsilon:
# If we computed the K=4 q-values, we take the max
action = np.argmax(self.listQValues)
else:
action = 0
return action, list_k_features[action]
def get_graph_features(self, env, copyGraph):
"""
We iterate over the converted graph nodes and take the features. The capacity and bw allocated features
are normalized on the fly.
"""
self.bw_allocated_feature.fill(0.0)
# Normalize capacity feature
self.capacity_feature = (copyGraph[:,0] - 100.00000001) / 200.0
iter = 0
for i in copyGraph[:, 1]:
if i == 8:
self.bw_allocated_feature[iter][0] = 1
elif i == 32:
self.bw_allocated_feature[iter][1] = 1
elif i == 64:
self.bw_allocated_feature[iter][2] = 1
iter = iter + 1
sample = {
'num_edges': env.numEdges,
'length': env.firstTrueSize,
'betweenness': tf.convert_to_tensor(value=env.between_feature, dtype=tf.float32),
'bw_allocated': tf.convert_to_tensor(value=self.bw_allocated_feature, dtype=tf.float32),
'capacities': tf.convert_to_tensor(value=self.capacity_feature, dtype=tf.float32),
'first': tf.convert_to_tensor(env.first, dtype=tf.int32),
'second': tf.convert_to_tensor(env.second, dtype=tf.int32)
}
sample['capacities'] = tf.reshape(sample['capacities'][0:sample['num_edges']], [sample['num_edges'], 1])
sample['betweenness'] = tf.reshape(sample['betweenness'][0:sample['num_edges']], [sample['num_edges'], 1])
hiddenStates = tf.concat([sample['capacities'], sample['betweenness'], sample['bw_allocated']], axis=1)
paddings = tf.constant([[0, 0], [0, hparams['link_state_dim'] - 2 - hparams['num_demands']]])
link_state = tf.pad(tensor=hiddenStates, paddings=paddings, mode="CONSTANT")
inputs = {'link_state': link_state, 'first': sample['first'][0:sample['length']],
'second': sample['second'][0:sample['length']], 'num_edges': sample['num_edges']}
return inputs
def _write_tf_summary(self, gradients, loss):
with summary_writer.as_default():
tf.summary.scalar(name="loss", data=loss[0], step=self.global_step)
tf.summary.histogram(name='gradients_5', data=gradients[5], step=self.global_step)
tf.summary.histogram(name='gradients_7', data=gradients[7], step=self.global_step)
tf.summary.histogram(name='gradients_9', data=gradients[9], step=self.global_step)
tf.summary.histogram(name='FirstLayer/kernel:0', data=self.primary_network.variables[0], step=self.global_step)
tf.summary.histogram(name='FirstLayer/bias:0', data=self.primary_network.variables[1], step=self.global_step)
tf.summary.histogram(name='kernel:0', data=self.primary_network.variables[2], step=self.global_step)
tf.summary.histogram(name='recurrent_kernel:0', data=self.primary_network.variables[3], step=self.global_step)
tf.summary.histogram(name='bias:0', data=self.primary_network.variables[4], step=self.global_step)
tf.summary.histogram(name='Readout1/kernel:0', data=self.primary_network.variables[5], step=self.global_step)
tf.summary.histogram(name='Readout1/bias:0', data=self.primary_network.variables[6], step=self.global_step)
tf.summary.histogram(name='Readout2/kernel:0', data=self.primary_network.variables[7], step=self.global_step)
tf.summary.histogram(name='Readout2/bias:0', data=self.primary_network.variables[8], step=self.global_step)
tf.summary.histogram(name='Readout3/kernel:0', data=self.primary_network.variables[9], step=self.global_step)
tf.summary.histogram(name='Readout3/bias:0', data=self.primary_network.variables[10], step=self.global_step)
summary_writer.flush()
self.global_step = self.global_step + 1
@tf.function
def _forward_pass(self, x):
prediction_state = self.primary_network(x[0], x[1], x[2], x[3], x[4], training=True)
preds_next_target = tf.stop_gradient(self.target_network(x[6], x[7], x[9], x[10], x[11], training=True))
return prediction_state, preds_next_target
def _train_step(self, batch):
# Record operations for automatic differentiation
with tf.GradientTape() as tape:
preds_state = []
target = []
for x in batch:
prediction_state, preds_next_target = self._forward_pass(x)
# Take q-value of the action performed
preds_state.append(prediction_state[0])
# We multiple by 0 if done==TRUE to cancel the second term
target.append(tf.stop_gradient([x[5] + self.gamma*tf.math.reduce_max(preds_next_target)*(1-x[8])]))
loss = tf.keras.losses.MSE(tf.stack(target, axis=1), tf.stack(preds_state, axis=1))
# Loss function using L2 Regularization
regularization_loss = sum(self.primary_network.losses)
loss = loss + regularization_loss
# Computes the gradient using operations recorded in context of this tape
grad = tape.gradient(loss, self.primary_network.variables)
#gradients, _ = tf.clip_by_global_norm(grad, 5.0)
gradients = [tf.clip_by_value(gradient, -1., 1.) for gradient in grad]
self.optimizer.apply_gradients(zip(gradients, self.primary_network.variables))
del tape
return grad, loss
def replay(self, episode):
for i in range(MULTI_FACTOR_BATCH):
batch = random.sample(self.memory, self.numbersamples)
grad, loss = self._train_step(batch)
if i%store_loss==0:
fileLogs.write(".," + '%.9f' % loss.numpy() + ",\n")
# Soft weights update
# for t, e in zip(self.target_network.trainable_variables, self.primary_network.trainable_variables):
# t.assign(t * (1 - TAU) + e * TAU)
# Hard weights update
if episode % copy_weights_interval == 0:
self.target_network.set_weights(self.primary_network.get_weights())
# if episode % evaluation_interval == 0:
# self._write_tf_summary(grad, loss)
gc.collect()
def add_sample(self, env_training, state_action, action, reward, done, new_state, new_demand, new_source, new_destination):
self.bw_allocated_feature.fill(0.0)
new_state_copy = np.copy(new_state)
state_action['graph_id'] = tf.fill([tf.shape(state_action['link_state'])[0]], 0)
# We get the K-paths between new_source-new_destination
pathList = env_training.allPaths[str(new_source) +':'+ str(new_destination)]
path = 0
list_k_features = list()
# 2. Allocate (S,D, linkDemand) demand using the K shortest paths
while path < len(pathList):
currentPath = pathList[path]
i = 0
j = 1
# 3. Iterate over paths' pairs of nodes and allocate new_demand to bw_allocated
while (j < len(currentPath)):
new_state_copy[env_training.edgesDict[str(currentPath[i]) + ':' + str(currentPath[j])]][1] = new_demand
i = i + 1
j = j + 1
# 4. Add allocated graphs' features to the list. Later we will compute it's qvalues using cummax
features = agent.get_graph_features(env_training, new_state_copy)
list_k_features.append(features)
path = path + 1
new_state_copy[:,1] = 0
vs = [v for v in list_k_features]
# We compute the graphs_ids to later perform the unsorted_segment_sum for each graph and obtain the
# link hidden states for each graph.
graph_ids = [tf.fill([tf.shape(vs[it]['link_state'])[0]], it) for it in range(len(list_k_features))]
first_offset = cummax(vs, lambda v: v['first'])
second_offset = cummax(vs, lambda v: v['second'])
tensors = ({
'graph_id': tf.concat([v for v in graph_ids], axis=0),
'link_state': tf.concat([v['link_state'] for v in vs], axis=0),
'first': tf.concat([v['first'] + m for v, m in zip(vs, first_offset)], axis=0),
'second': tf.concat([v['second'] + m for v, m in zip(vs, second_offset)], axis=0),
'num_edges': tf.math.add_n([v['num_edges'] for v in vs]),
}
)
# We store the state with the action marked, the graph ids, first, second, num_edges, the reward,
# new_state(-1 because we don't need it in this case), the graph ids, done, first, second, number of edges
self.memory.append((state_action['link_state'], state_action['graph_id'], state_action['first'], # 2
state_action['second'], tf.convert_to_tensor(state_action['num_edges']), # 4
tf.convert_to_tensor(reward, dtype=tf.float32), tensors['link_state'], tensors['graph_id'], # 7
tf.convert_to_tensor(int(done==True), dtype=tf.float32), tensors['first'], tensors['second'], # 10
tf.convert_to_tensor(tensors['num_edges']))) # 12
if __name__ == "__main__":
# python train_DQN.py
# Get the environment and extract the number of actions.
env_training = gym.make(ENV_NAME)
np.random.seed(SEED)
env_training.seed(SEED)
env_training.generate_environment(graph_topology, listofDemands)
env_eval = gym.make(ENV_NAME)
np.random.seed(SEED)
env_eval.seed(SEED)
env_eval.generate_environment(graph_topology, listofDemands)
batch_size = hparams['batch_size']
agent = DQNAgent(batch_size)
eval_ep = 0
train_ep = 0
max_reward = 0
reward_id = 0
if not os.path.exists("./Logs"):
os.makedirs("./Logs")
# We store all the information in a Log file and later we parse this file
# to extract all the relevant information
fileLogs = open("./Logs/exp" + differentiation_str + "Logs.txt", "a")
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt")
checkpoint = tf.train.Checkpoint(model=agent.primary_network, optimizer=agent.optimizer)
rewards_test = np.zeros(EVALUATION_EPISODES)
for eps in range(EVALUATION_EPISODES):
state, demand, source, destination = env_eval.reset()
rewardAddTest = 0
while 1:
# We execute evaluation over current state
# demand, src, dst
action, _ = agent.act(env_eval, state, demand, source, destination, True)
new_state, reward, done, demand, source, destination = env_eval.make_step(state, action, demand, source, destination)
rewardAddTest = rewardAddTest + reward
state = new_state
if done:
break
rewards_test[eps] = rewardAddTest
evalMeanReward = np.mean(rewards_test)
fileLogs.write(">," + str(evalMeanReward) + ",\n")
fileLogs.write("-," + str(agent.epsilon) + ",\n")
fileLogs.flush()
counter_store_model = 1
for ep_it in range(ITERATIONS):
if ep_it%5==0:
print("Training iteration: ", ep_it)
if ep_it==0:
# At the beginning we don't have any experiences in the buffer. Thus, we force to
# perform more training episodes than usually
train_episodes = FIRST_WORK_TRAIN_EPISODE
else:
train_episodes = TRAINING_EPISODES
for _ in range(train_episodes):
# Used to clean the TF cache
tf.random.set_seed(1)
state, demand, source, destination = env_training.reset()
while 1:
# We execute evaluation over current state
action, state_action = agent.act(env_training, state, demand, source, destination, False)
new_state, reward, done, new_demand, new_source, new_destination = env_training.make_step(state, action, demand, source, destination)
agent.add_sample(env_training, state_action, action, reward, done, new_state, new_demand, new_source, new_destination)
state = new_state
demand = new_demand
source = new_source
destination = new_destination
if done:
break
agent.replay(ep_it)
# Decrease epsilon (from epsion-greedy exploration strategy)
if ep_it > epsilon_start_decay and agent.epsilon > agent.epsilon_min:
agent.epsilon *= agent.epsilon_decay
agent.epsilon *= agent.epsilon_decay
# We only evaluate the model every evaluation_interval steps
if ep_it % evaluation_interval == 0:
for eps in range(EVALUATION_EPISODES):
state, demand, source, destination = env_eval.reset()
rewardAddTest = 0
while 1:
# We execute evaluation over current state
action, _ = agent.act(env_eval, state, demand, source, destination, True)
new_state, reward, done, demand, source, destination = env_eval.make_step(state, action, demand, source, destination)
rewardAddTest = rewardAddTest + reward
state = new_state
if done:
break
rewards_test[eps] = rewardAddTest
evalMeanReward = np.mean(rewards_test)
if evalMeanReward>max_reward:
max_reward = evalMeanReward
reward_id = counter_store_model
fileLogs.write(">," + str(evalMeanReward) + ",\n")
fileLogs.write("-," + str(agent.epsilon) + ",\n")
# Store trained model
checkpoint.save(checkpoint_prefix)
fileLogs.write("MAX REWD: " + str(max_reward) + " MODEL_ID: " + str(reward_id) +",\n")
counter_store_model = counter_store_model + 1
fileLogs.flush()
# Invoke garbage collection
# tf.keras.backend.clear_session()
gc.collect()
for eps in range(EVALUATION_EPISODES):
state, demand, source, destination = env_eval.reset()
rewardAddTest = 0
while 1:
# We execute evaluation over current state
# demand, src, dst
action, _ = agent.act(env_eval, state, demand, source, destination, True)
new_state, reward, done, demand, source, destination = env_eval.make_step(state, action, demand, source, destination)
rewardAddTest = rewardAddTest + reward
state = new_state
if done:
break
rewards_test[eps] = rewardAddTest
evalMeanReward = np.mean(rewards_test)
if evalMeanReward>max_reward:
max_reward = evalMeanReward
reward_id = counter_store_model
fileLogs.write(">," + str(evalMeanReward) + ",\n")
fileLogs.write("-," + str(agent.epsilon) + ",\n")
# Store trained model
checkpoint.save(checkpoint_prefix)
fileLogs.write("MAX REWD: " + str(max_reward) + " MODEL_ID: " + str(reward_id) +",\n")
fileLogs.flush()
fileLogs.close()
``` |
{
"source": "9tarz/AnnexML",
"score": 3
} |
#### File: AnnexML/scripts/learning-evaluate_predictions_propensity_scored.py
```python
from __future__ import print_function
import sys
import math
import argparse
from collections import defaultdict
def calc_propensity_score(train_file, A, B):
num_inst = 0
freqs = defaultdict(int)
for line in open(train_file):
if line.find(":") == -1:
continue # header line
num_inst += 1
idx = line.find(" ")
labels = line[:idx]
for l in labels.split(","):
l = int(l)
freqs[l] += 1
C = (math.log(num_inst) - 1) * pow(B+1, A)
pw_dict = dict()
for k in freqs.keys():
pw_dict[k] = 1 + C * pow(freqs[k]+B, -A)
default_pw = 1 + C * pow(B, -A)
return pw_dict, default_pw
def main():
parser = argparse.ArgumentParser(description='Calc propensity scored precision and nDCG (PSP@k and PSnDCG@k)')
parser.add_argument('train_file', help='Input train file for calculating propensity score')
parser.add_argument('-o', '--ordered', action='store_true', help='Input is already ordered (or sorted)')
parser.add_argument('-A', '--A', type=float, default=0.55, help='A')
parser.add_argument('-B', '--B', type=float, default=1.5, help='B')
args = parser.parse_args()
max_k = 5
pw_dict, default_pw = calc_propensity_score(args.train_file, args.A, args.B)
dcg_list = [0 for i in range(max_k)]
idcg_list = [0 for i in range(max_k)]
dcg_list[0] = 1.0
idcg_list[0] = 1.0
for i in range(1, max_k):
dcg_list[i] = 1.0 / math.log(i + 2, 2)
idcg_list[i] = idcg_list[i-1] + dcg_list[i]
num_lines = 0
n_accs = [0 for i in range(max_k)]
d_accs = [0 for i in range(max_k)]
n_ndcgs = [0.0 for x in range(max_k)]
d_ndcgs = [0.0 for x in range(max_k)]
for line in sys.stdin:
num_lines += 1
tokens = line.rstrip().split()
if len(tokens) != 2:
continue
ls = tokens[0]
ps = tokens[1]
l_set = set([int(x) for x in ls.split(",")])
k_list = list()
pred_dict = dict()
for t in ps.split(","):
p, v = t.split(":")
p = int(p)
v = float(v)
pred_dict[p] = v
k_list.append(p)
if not args.ordered:
# compatibility for (old) Matlab scripts
k_list = sorted([k for k in pred_dict.keys()], key=lambda x: (-pred_dict[x], x))
if len(k_list) > max_k:
k_list = k_list[:max_k]
n_dcgs = [0.0 for x in range(max_k)]
d_dcgs = [0.0 for x in range(max_k)]
for i, p in enumerate(k_list):
pw = pw_dict[p]
if p in l_set:
n_accs[i] += pw
n_dcgs[i] = pw * dcg_list[i]
sum_n_dcg = 0.0
for i, n_dcg in enumerate(n_dcgs):
sum_n_dcg += n_dcg
n_ndcgs[i] += sum_n_dcg / idcg_list[min(i, len(l_set)-1)]
l_pw_list = list()
for l in l_set:
pw = pw_dict[l] if l in pw_dict else default_pw
l_pw_list.append(pw)
l_pw_list = sorted(l_pw_list, reverse=True)
if len(l_pw_list) > max_k:
l_pw_list = l_pw_list[:max_k]
for i, pw in enumerate(l_pw_list):
d_accs[i] += pw
d_dcgs[i] = pw * dcg_list[i]
sum_d_dcg = 0.0
for i, d_dcg in enumerate(d_dcgs):
sum_d_dcg += d_dcg
d_ndcgs[i] += sum_d_dcg / idcg_list[min(i, len(l_set)-1)]
print("#samples={0}".format(num_lines))
n_a_sum, d_a_sum = 0.0, 0.0
for n in range(max_k):
n_a_sum += float(n_accs[n])
d_a_sum += float(d_accs[n])
print("PSP@{0}={1:.6f}".format(n+1, n_a_sum / d_a_sum))
for n in range(max_k):
print("PSnDCG@{0}={1:.6f}".format(n+1, n_ndcgs[n] / d_ndcgs[n]))
if __name__ == '__main__':
main()
``` |
{
"source": "9to6/simple_python_api",
"score": 3
} |
#### File: simple_python_api/apis/serializers.py
```python
from rest_framework import serializers
from apis.models import *
class UserSerializer(serializers.Serializer):
class Meta:
fields = ('email','nick',)
id = serializers.IntegerField(read_only=True)
password = serializers.CharField(required=False, max_length=254)
email = serializers.EmailField(required=False, allow_blank=True, max_length=254)
age = serializers.IntegerField(required=False)
nick = serializers.CharField(required=False, allow_blank=True, max_length=10)
first_name = serializers.CharField(required=False, allow_blank=True, max_length=30)
last_name = serializers.CharField(required=False, allow_blank=True, max_length=30)
def create(self, validated_data):
"""
Create and return a new `User` instance, given the validated data.
"""
password = <PASSWORD>_data['password']
user = CustomUser.objects.create(**validated_data)
user.set_password(password)
user.save()
return user
def update(self, instance, validated_data):
"""
Update and return an existing `User` instance, given the validated data.
"""
instance.email = validated_data.get('email', instance.email)
instance.set_password(validated_data.get('password', instance.password))
instance.age = validated_data.get('age', instance.age)
instance.nick = validated_data.get('nick', instance.nick)
instance.first_name = validated_data.get('first_name', instance.first_name)
instance.last_name = validated_data.get('last_name', instance.last_name)
instance.save()
return instance
``` |
{
"source": "9trees/moke_test",
"score": 3
} |
#### File: moke_test/backend/downloadData.py
```python
import urllib.request
from urllib.error import HTTPError, URLError
import bs4 as bs
import json, time, random, os
from socket import timeout
def update_json(data):
global j_data, tag_name
j_data[tag_name].append(data)
def call_back(myurl, num):
toremove = dict.fromkeys((ord(c) for c in u'\xa0\n\t'))
# Fetch the html file
#response = urllib.request.urlopen(myurl)
try:
req = urllib.request.Request(myurl)
response = urllib.request.urlopen(req, timeout=1000)
print("I tried.")
except:
print("I failed.")
# try:
# response = urllib.request.urlopen(myurl, timeout=100)#.read().decode('utf-8')
# except (HTTPError, URLError) as error:
# print('Data not retrieved because %s\nURL: %s', error, myurl)
# except timeout:
# print('socket timed out - URL %s', myurl)
# else:
# print('Access successful.')
html_doc = response.read()
# Parse the html file
soup = bs.BeautifulSoup(html_doc, 'lxml')
# Get question ans. data
details_ques_ans = soup.find(class_="details_ques_ans")
i = 1
for line in details_ques_ans.find_all("span"):
#print(line.get_text(), len(line.get_text()))
data = ""
if(len(line.get_text()) >= 1):
for text in line.contents:
try:
text = text.translate(toremove)
if len(text):
data += text + " "
# print(i, data)
except:
data += str(text)
# print(i, data)
#print(i, data)
if i == 2:
question = data
if i == 4:
option_A = data
if i == 6:
option_B = data
if i == 8:
option_C = data
if i == 10:
option_D = data
if i == 11:
solution = data
i += 1
answer = soup.find(class_="ans_panel")
#print(i, answer.get_text().split()[-1])
answer = answer.get_text().split()[-1]
one = {
"No": num,
"Que": question,
"O_A": option_A,
"O_B": option_B,
"O_C": option_C,
"O_D": option_D,
"Exp": solution,
"Ans": answer,
"Ref": myurl
}
update_json(one)
if __name__ == "__main__":
link = [["NEET_Sample_Test_Paper_89", 'https://www.studyadda.com/sample-papers/neet-sample-test-paper-89_q1/1387/',
417617],["NEET_Sample_Test_Paper_90", 'https://www.studyadda.com/sample-papers/neet-sample-test-paper-90_q1/1388/',
417797], ["NEET_Sample_Test_Paper_91", 'https://www.studyadda.com/sample-papers/neet-sample-test-paper-91_q1/1389/',
417977]]
for one in link:
tag_name = one[0]
ll_1 = one[1]
ll_2 = one[2]
j_data = {tag_name: []}
for i in range(180):
myurl = ll_1 + str(ll_2 + i)
call_back(myurl, i+1)
# x = random.randint(1, 9)
# print(i, x)
# time.sleep(x)
#print(j_data)
fname = os.path.join("C:\\Users\\USER\\PycharmProjects\\moke_test", "source_data", tag_name+'_'+str(time.time()).split('.')[0]+".json")
with open(fname, "w") as outfile:
json.dump(j_data, outfile, indent=2)
``` |
{
"source": "9u3/cryptide-db",
"score": 2
} |
#### File: Cryptide/__automod__/Antispam.py
```python
from discord.ext import commands
from AntiSpam import AntiSpamHandler
from AntiSpam.ext import AntiSpamTracker
import json
class Antispam(commands.Cog, name="Antispam"):
def __init__(self, bot):
self.bot = bot
self.bot.handler = AntiSpamHandler(self.bot)
self.bot.handler.add_ignored_item(370120271367110656, "member") #NullPointer
self.bot.handler.add_ignored_item(786334287968337981, "member") #Me
self.bot.handler.add_ignored_item(382687991958601729, "member") #Prime Iridium
self.bot.handler.add_ignored_item(705641763062415431, "member") #Micah
self.bot.handler.add_ignored_item(490690957642301442, "member") #<NAME>
self.bot.handler.add_ignored_item(264445053596991498, "guild") #Top.gg
@commands.Cog.listener()
async def on_message(self, message):
await self.bot.handler.propagate(message)
def setup(bot):
bot.add_cog(Antispam(bot))
``` |
{
"source": "9uest/shadowsocks-admin",
"score": 2
} |
#### File: ss_admin/models/user.py
```python
from __future__ import absolute_import, division, print_function, \
with_statement
from ss_admin import db
class User(db.Model):
__tablename__ = 'user'
id = db.Column('id', db.Integer, primary_key=True, nullable=False, autoincrement=True)
email = db.Column('email', db.VARCHAR(128), nullable=False, index=True)
login_pass = db.Column('user_pass', db.VARCHAR(32), nullable=True)
server_port = db.Column('port', db.Integer, nullable=False, index=True)
server_pass = db.Column('passwd', db.VARCHAR(32), nullable=False)
total_transfer = db.Column('t', db.Integer, nullable=False)
download_transfer = db.Column('d', db.Integer, nullable=False)
upload_transfer = db.Column('u', db.Integer, nullable=False)
service_enable = db.Column('enable', db.Integer, nullable=False)
effective_date = db.Column('effective_date', db.Integer, nullable=False)
expire_date = db.Column('expire_date', db.Integer, nullable=False)
last_active_time = db.Column('last_active_time', db.Integer, nullable=True)
def __init__(self, user):
self.email = user['email']
self.login_pass = user.get('user_pass', '')
self.server_port = user['port']
self.server_pass = user['<PASSWORD>']
self.total_transfer = user['t']
self.download_transfer = user['d']
self.upload_transfer = user['u']
self.service_enable = user['enable']
self.effective_date = user['effective_date']
self.expire_date = user['expire_date']
self.last_active_time = user.get('last_active_date', '')
if __name__ == '__main__':
pass
```
#### File: ss_admin/views/login.py
```python
from ss_admin import app, login_manager
from ss_admin.models import Admin
from ss_admin.forms import admin
from ss_admin.lib import verification_code
from flask import render_template, request, session, redirect, url_for, g, flash
from flask.ext.wtf import Form
from flask.ext.login import login_required, login_user, logout_user, current_user
import StringIO, hashlib
@app.route('/', methods=['POST', 'GET'])
@app.route('/login', methods=['POST', 'GET'])
def login():
if request.method == 'GET':
form = Form()
return render_template('login.html',
form=form
)
elif request.method == 'POST':
form = admin.login()
if form.validate_on_submit():
if form.vcode.data != session['verification_code']:
flash({u'invalid_v_code': ['verification code not correct']}, 'error')
return redirect('login')
else:
flash(form.errors, 'error')
return redirect('login')
secure_password = hashlib.sha1(form.email.data + form.password.data).hexdigest()
secure_password_md5 = hashlib.md5(secure_password).hexdigest()
user = Admin.query.filter(Admin.email == form.email.data).\
filter(Admin.password == <PASSWORD>).first()
if user is None:
flash({u'user_not_exists': [u'user not exists']}, 'error')
return redirect('login')
login_user(user, remember=False)
return redirect(request.args.get('next') or url_for('index'))
@app.route('/v_code')
def code():
vc = verification_code.VerificationCode()
session['verification_code'], image = vc.generate()
buf = StringIO.StringIO()
image.save(buf, 'JPEG', quality=70)
buf_str = buf.getvalue()
response = app.make_response(buf_str)
response.headers['Content-Type'] = 'image/jpeg'
return response
@app.route('/logout')
def logout():
logout_user()
return redirect(url_for('login'))
@app.route('/index')
@login_required
def index():
return redirect(url_for('dashboard'))
@login_manager.user_loader
def load_user(id):
return Admin.query.get(id)
@app.before_request
def before_request():
g.user = current_user
``` |
{
"source": "9uru/cv_projects",
"score": 3
} |
#### File: cv_projects/src/util.py
```python
from typing import Tuple
import numpy as np
import cv2
def get_cv2_data_loc() -> str:
'''
return full path to where cv2 data is stored
'''
return '\\'.join(cv2.__file__.split('\\')[:-1]) + '\\data'
def start_capture(filename: str) -> cv2.VideoCapture:
'''
Return a camera capture if filename is none
else a video file capture
'''
if filename is None:
filename = 0
return cv2.VideoCapture(0)
def resize_dataset(
image_data: np.ndarray,
target_im_size: Tuple[int]) -> np.ndarray:
'''
Resize every image in dataset
assumes 1st dimension is the image index
'''
data_resized = []
for i in range(image_data.shape[0]):
img = image_data[i, :, :]
data_resized.append(
cv2.resize(img, dsize=target_im_size, interpolation=cv2.INTER_CUBIC))
data_resized = np.array(data_resized)
return data_resized
```
#### File: unit/architectures/test_alexnet.py
```python
from src.architectures.alexnet import AlexNet
def test_alexnet():
'''
unit test for alenxet creation
'''
model = AlexNet((227, 227, 3), 10)
assert model.count_params() > 6e6
model = AlexNet((227, 227, 3), 2)
assert model.layers[-1].activation.__name__ == 'sigmoid'
```
#### File: unit/architectures/test_lenet.py
```python
from src.architectures.lenet import LeNet
def test_lenet():
'''
unit test for lenet creation
'''
model = LeNet((32, 32, 1), 10)
assert model.count_params() == 61706
model = LeNet((32, 32, 1), 2)
assert model.count_params() == 60941
``` |
{
"source": "9watcher/mini_blog",
"score": 2
} |
#### File: mini_blog/myblog/models.py
```python
from __future__ import unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.core.urlresolvers import reverse
# Create your models here.
@python_2_unicode_compatible
class Article(models.Model):
# id = models.IntegerField('博文编号', primary_key=True, auto_created=True)
id = models.AutoField(primary_key=True)
title = models.CharField('标题', max_length=256)
content = models.TextField('内容', max_length=5000, db_index=True)
category = models.CharField('分类', max_length=256,)
published_date = models.DateTimeField('发布时间')
update_date = models.DateTimeField('更新时间')
def get_absolute_url(self):
path = reverse('blog_detail')
return "http://127.0.0.1:8000{0}?={1}".format(path, id)
def __str__(self):
return self.title
class Meta:
ordering = ['-published_date']
```
#### File: myblog/templatetags/myfilter.py
```python
from django import template
register = template.Library()
# @register.filter(name='key')
def key(d, key_name):
try:
value = d[str(key_name)]
except KeyError:
value = 0
return value
register.filter('key', key)
```
#### File: mini_blog/myblog/views.py
```python
import urllib2
import json
from django.shortcuts import render
from myblog.models import Article
from django.core.paginator import Paginator
from django.core.paginator import PageNotAnInteger
from django.core.paginator import EmptyPage
# Create your views here.
def index(request):
results = Article.objects.all()
paginator = Paginator(results, 2)
page_num = request.GET.get('page')
try:
results = paginator.page(page_num)
except PageNotAnInteger:
results = paginator.page(1)
except EmptyPage:
results = paginator.page(paginator.num_pages)
results_id = [str(i.id) for i in results]
ds_url = "http://api.duoshuo.com/threads/counts.json?short_name=gary-qiu&threads={0}".format(','.join(results_id))
headers = {
'Content-Type': 'application/json; charset=UTF-8',
}
r = urllib2.Request(ds_url, headers)
ds_result = json.load(urllib2.urlopen(ds_url)).get('response')
return render(request, 'blog/index.html', locals())
def blog_detail(request):
blog_id = request.GET.get('id', '')
print ">>>>>>>>>>>>>>>>>>>>", blog_id
result = Article.objects.filter(id=blog_id).first()
return render(request, 'blog/blog.html', locals())
def about(request):
return render(request, 'blog/about.html', locals())
def contact(request):
return render(request, 'blog/contact.html', locals())
``` |
{
"source": "9XD/9XD",
"score": 2
} |
#### File: 9XD/notice/tests.py
```python
from django.urls import reverse
from test_plus import TestCase
from notice.factories import NoticeFactory
class NoticeTest(TestCase):
def test_notice_in_navigation(self):
self.get_check_200('home')
self.assertResponseContains('Notice', html=False)
self.assertResponseContains(reverse('notice:list'), html=False)
def test_get_notice_list_page(self):
# given
for _ in range(20):
NoticeFactory()
notice = NoticeFactory()
# when, then
self.get_check_200(reverse('notice:list'))
self.assertResponseContains(NoticeFactory.title, html=False)
detail_url = reverse('notice:detail', kwargs={'pk': notice.pk})
self.assertResponseContains(detail_url, html=False)
def test_get_detail_page(self):
# given
notice = NoticeFactory()
# when, then
detail_url = reverse('notice:detail', kwargs={'pk': notice.pk})
self.get_check_200(detail_url)
self.assertResponseContains(notice.title, html=False)
self.assertResponseContains(notice.content, html=False)
``` |
{
"source": "9xEzreaL/HEDUnet",
"score": 2
} |
#### File: 9xEzreaL/HEDUnet/DiceCofficient.py
```python
import torch
import torch.nn as nn
import numpy as np
def accuracy(target, y_hat):
seg_pred = torch.argmax(y_hat[:, 1:], dim=1)
seg_acc = (seg_pred == target[:, 1]).float().mean()
edge_pred = (y_hat[:, 0] > 0).float()
edge_acc = (edge_pred == target[:, 0]).float().mean()
return seg_acc , edge_acc
# dice cofficient for BCE
def BCEDiceCofficient(target , y_hat):
smooth = 0.00001
edge_target = target[:, 1]
N = edge_target.size(0)
edge_pred = (y_hat[:, 1:] > 0).float()
edge_pred_flat = edge_pred.view(N, -1)
edge_target_flat = edge_target.view(N, -1)
seg_target = target[:, 0]
n = seg_target.size(0)
seg_pred = (y_hat[:, 0] > 0).float()
seg_pred_flat = seg_pred.view(n, -1)
seg_target_flat = seg_target.view(n, -1)
edge_intersection = (edge_pred_flat * edge_target_flat).sum(1)
edge_unionset = edge_pred_flat.sum(1) + edge_target_flat.sum(1)
edge_acc = (2 * (edge_intersection + smooth) / (edge_unionset + smooth)).mean()
seg_intersection = (seg_pred_flat * seg_target_flat).sum(1)
seg_unionset = seg_pred_flat.sum(1) + seg_target_flat.sum(1)
seg_acc = (2 * (seg_intersection + smooth) / (seg_unionset + smooth)).mean()
return seg_acc , edge_acc
def oldCEDiceCofficient(target , y_hat):
n = int(y_hat.shape[0]) # 2
seg_target = target[:n, 0, ::] # [256,224]
edge_target = target[:n, 1, ::]
seg_target = seg_target.view(n,-1)
edge_target = edge_target.view(n,-1)
seg_pred = y_hat[:n, :2, ::] # [8,2,256,224]
edge_pred = y_hat[:n, 2:, ::]
softmax_func = nn.Softmax(dim=1)
seg_pred = softmax_func(seg_pred)
edge_pred = softmax_func(edge_pred)
seg_pred = torch.max(seg_pred, 1)[1]
edge_pred = torch.max(edge_pred, 1)[1]
seg_pred = seg_pred.view(n,-1) # (2, 256*224)
edge_pred = edge_pred.view(n,-1)
# seg_target = seg_target.reshape(seg_target.shape[0] * seg_target.shape[1])
# edge_target = edge_target.reshape(edge_target.shape[0] * edge_target.shape[1])
dice_tp_seg = (seg_pred * seg_target).sum()
dice_div_seg = seg_pred.sum() + seg_target.sum()
seg_acc = ((2 * dice_tp_seg) / dice_div_seg).mean()
dice_tp_edge = (edge_pred * edge_target).sum()
dice_div_edge = edge_pred.sum() + edge_target.sum()
edge_acc = ((2 * dice_tp_edge) / dice_div_edge).mean()
return seg_acc, edge_acc
# dice cofficient for CE
def CEDiceCofficient(target, y_hat):
smooth = 0.00001
edge_target = target[:, 1]
n = edge_target.size(0)
# for edge
edge_target = target[:, 1] # [8,256,224]
edge_pred = (y_hat[:, 3] > 0).float()
edge_pred = edge_pred.view(n ,-1)
edge_target = edge_target.view(n, -1)
dice_tp_edge = (edge_pred * edge_target).sum(1)
dice_div_edge = edge_pred.sum(1) + edge_target.sum(1)
edge_acc = (2 * (dice_tp_edge +smooth) / (dice_div_edge + smooth)).mean()
# for seg
seg_target = target[:, 0, ::] # [8,256,224]
seg_pred = (y_hat[:, 1] > 0).float()
seg_pred = seg_pred.view(n, -1)
seg_target = seg_target.view(n, -1)
dice_tp_seg = (seg_pred * seg_target).sum(1)
dice_div_seg = seg_pred.sum(1) + seg_target.sum(1)
seg_acc = (2 * (dice_tp_seg + smooth) / (dice_div_seg +smooth)).mean()
return seg_acc, edge_acc
``` |
{
"source": "9yur1/CareLabelRecognition",
"score": 2
} |
#### File: CareLabelRecognition/server/app.py
```python
from os.path import join
from flask import Flask, redirect, url_for, request, jsonify
import model
import pathlib
model.initialize()
app = Flask(__name__)
@app.route('/', methods=['GET'])
def getIndex():
return jsonify({}), 200
@app.route('/', methods=['POST'])
def index():
file = request.files['image']
path = join(pathlib.Path(__file__).parent.resolve(), 'img', file.filename)
file.save(path)
return jsonify({ 'result': model.predict(path) }), 200
app.run(host='0.0.0.0', port=8080)
``` |
{
"source": "a000b/zderzacz-BTC",
"score": 2
} |
#### File: a000b/zderzacz-BTC/Zderzacz_btc.py
```python
import random, ecdsa, hashlib, base58, binascii, requests, time
##--------------BECH32-------------------------------------------------
CHARSET = "qpzry9x8gf2tvdw0s3jn54khce6mua7l"
def bech32_polymod(values):
"""Internal function that computes the Bech32 checksum."""
generator = [0x3b6a57b2, 0x26508e6d, 0x1ea119fa, 0x3d4233dd, 0x2a1462b3]
chk = 1
for value in values:
top = chk >> 25
chk = (chk & 0x1ffffff) << 5 ^ value
for i in range(5):
chk ^= generator[i] if ((top >> i) & 1) else 0
return chk
def bech32_hrp_expand(hrp):
"""Expand the HRP into values for checksum computation."""
return [ord(x) >> 5 for x in hrp] + [0] + [ord(x) & 31 for x in hrp]
def bech32_verify_checksum(hrp, data):
"""Verify a checksum given HRP and converted data characters."""
return bech32_polymod(bech32_hrp_expand(hrp) + data) == 1
def bech32_create_checksum(hrp, data):
"""Compute the checksum values given HRP and data."""
values = bech32_hrp_expand(hrp) + data
polymod = bech32_polymod(values + [0, 0, 0, 0, 0, 0]) ^ 1
return [(polymod >> 5 * (5 - i)) & 31 for i in range(6)]
def bech32_encode(hrp, data):
"""Compute a Bech32 string given HRP and data values."""
combined = data + bech32_create_checksum(hrp, data)
return hrp + '1' + ''.join([CHARSET[d] for d in combined])
def bech32_decode(bech):
"""Validate a Bech32 string, and determine HRP and data."""
if ((any(ord(x) < 33 or ord(x) > 126 for x in bech)) or
(bech.lower() != bech and bech.upper() != bech)):
return (None, None)
bech = bech.lower()
pos = bech.rfind('1')
if pos < 1 or pos + 7 > len(bech) or len(bech) > 90:
return (None, None)
if not all(x in CHARSET for x in bech[pos+1:]):
return (None, None)
hrp = bech[:pos]
data = [CHARSET.find(x) for x in bech[pos+1:]]
if not bech32_verify_checksum(hrp, data):
return (None, None)
return (hrp, data[:-6])
def convertbits(data, frombits, tobits, pad=True):
"""General power-of-2 base conversion."""
acc = 0
bits = 0
ret = []
maxv = (1 << tobits) - 1
max_acc = (1 << (frombits + tobits - 1)) - 1
for value in data:
if value < 0 or (value >> frombits):
return None
acc = ((acc << frombits) | value) & max_acc
bits += frombits
while bits >= tobits:
bits -= tobits
ret.append((acc >> bits) & maxv)
if pad:
if bits:
ret.append((acc << (tobits - bits)) & maxv)
elif bits >= frombits or ((acc << (tobits - bits)) & maxv):
return None
return ret
def decode(hrp, addr):
"""Decode a segwit address."""
hrpgot, data = bech32_decode(addr)
if hrpgot != hrp:
return (None, None)
decoded = convertbits(data[1:], 5, 8, False)
if decoded is None or len(decoded) < 2 or len(decoded) > 40:
return (None, None)
if data[0] > 16:
return (None, None)
if data[0] == 0 and len(decoded) != 20 and len(decoded) != 32:
return (None, None)
return (data[0], decoded)
def encode(hrp, witver, witprog):
"""Encode a segwit address."""
ret = bech32_encode(hrp, [witver] + convertbits(witprog, 8, 5))
if decode(hrp, ret) == (None, None):
return None
return ret
##---------------------GENERATOR-SEGWIT----------------------------------------
def generator_segwit(a):
start = time.time()
number = a
for n in range(number):
d = privkey_generator()
private_key = d["pkey"]
WIF = d["Wk"]
signing_key = d["sk"]
verifying_key = d["vk"]
x_cor = bytes.fromhex(verifying_key.to_string().hex())[:32]
y_cor = bytes.fromhex(verifying_key.to_string().hex())[32:]
if int.from_bytes(y_cor, byteorder="big", signed=True) % 2 == 0:
public_key = bytes.fromhex(f'02{x_cor.hex()}')
else:
public_key = bytes.fromhex(f'03{x_cor.hex()}')
sha256_key = hashlib.sha256(public_key)
ripemd160_key = ripemd160(sha256_key.digest())
keyhash = ripemd160_key.digest()
P2WPKH_V0 = bytes.fromhex(f'0014{keyhash.hex()}')
sha256_P2WPKH_V0 = hashlib.sha256(P2WPKH_V0)
ripemd160_P2WPKH_V0 = ripemd160(sha256_P2WPKH_V0.digest())
scripthash = ripemd160_P2WPKH_V0.digest()
P2SH_P2WPKH_V0 = bytes.fromhex(f'a9{scripthash.hex()}87')
flagged_scripthash = bytes.fromhex(f'05{scripthash.hex()}')
checksum = hashlib.sha256(hashlib.sha256(flagged_scripthash).digest()).digest()[:4]
bin_addr = flagged_scripthash + checksum
nested_address = base58.b58encode(bin_addr)
bech32 = encode('bc', 0, keyhash)
i = n + 1
stradress = str(nested_address.decode())
balance = sprawdz_balance_blockstream(stradress)
if balance == 0:
print("{:25} | {:35} | {:46} | {:20}".format("Bitcoin Address " + str(i), str(nested_address.decode()), str(bech32), str(balance) + " BTC"))
else:
print("{:25} | {:35} | {:46} | {:20}".format("Bitcoin Address " + str(i), str(nested_address.decode()), str(bech32), str(balance) + " BTC"))
print("Private Key", str(i) + ": " + private_key.hex())
print("Private Key WIF", str(i) + ": " + WIF.decode())
break
calculate_speed(start, time.time(), number)
##---------------------GENERATOR-LEGACY----------------------------------------
def generator_legacy(a):
start = time.time()
number = a
for n in range(number):
d = privkey_generator()
private_key = d["pkey"]
WIF = d["Wk"]
signing_key = d["sk"]
verifying_key = d["vk"]
publ_key = '04' + binascii.hexlify(verifying_key.to_string()).decode()
hash160 = ripemd160(hashlib.sha256(binascii.unhexlify(publ_key)).digest()).digest()
publ_addr_a = b"\x00" + hash160
checksum = hashlib.sha256(hashlib.sha256(publ_addr_a).digest()).digest()[:4]
publ_addr_b = base58.b58encode(publ_addr_a + checksum)
i = n + 1
stradress = str(publ_addr_b.decode())
balance = sprawdz_balance_blockstream(stradress)
if balance == 0:
print("{:25} | {:35} | {:20}".format("Bitcoin Address " + str(i), publ_addr_b.decode(), str(balance) + " BTC"))
else:
print("{:25} | {:35} | {:20}".format("Bitcoin Address " + str(i), publ_addr_b.decode(), str(balance) + " BTC"))
print('Private Key ', str(i) + ": " + WIF.decode())
break
calculate_speed(start, time.time(), number)
##---------------------POBIERANIE-DANYCH-ONLINE----------------------------------------
def sprawdz_balance_blockstream(a):
addr = a
response = requests.get('https://blockstream.info/api/address/' + addr)
if response.status_code == 200:
content = response.json()
b = (int(content['chain_stats']['funded_txo_sum']) - int(content['chain_stats']['spent_txo_sum'])) / 10**8
else:
print("Err: ", response.status_code)
return b
def check_price():
response = requests.get('https://blockchain.info/ticker')
if response.status_code == 200:
content = response.json()
a = (str(content['USD']['last']) + " USD")
else:
print(response.status_code)
return a
##---------------------FUNKCJE-DODATKOWE----------------------------------------
def calculate_speed(tstart, tend, ilosc):
tdiff = (tend - tstart)
sp = ilosc / tdiff
print("\nProcess time: ", str(tdiff), " sec")
print("Calculated average speed: ", str(sp), " key/sec")
def privkey_generator():
d ={}
private_key = (random.getrandbits(256)).to_bytes(32, byteorder="little", signed=False)
fullkey = '80' + binascii.hexlify(private_key).decode()
sha256a = hashlib.sha256(binascii.unhexlify(fullkey)).hexdigest()
sha256b = hashlib.sha256(binascii.unhexlify(sha256a)).hexdigest()
WIF = base58.b58encode(binascii.unhexlify(fullkey+sha256b[:8]))
signing_key = ecdsa.SigningKey.from_string(private_key, curve=ecdsa.SECP256k1)
verifying_key = signing_key.get_verifying_key()
d["pkey"] = private_key
d["Wk"] = WIF
d["sk"] = signing_key
d["vk"] = verifying_key
return d
def ripemd160(x):
d = hashlib.new('ripemd160')
d.update(x)
return d
##---------------------PROGRAM----------------------------------------
wybor = int(input("Jeżeli chcesz generować adresy Legacy wciśnij 1, jeżeli SegWit wciśnij 2 :"))
if wybor == 1:
ilosc = int(input("Podaj ilość kluczy:"))
generator_legacy(ilosc)
elif wybor == 2:
ilosc = int(input("Podaj ilość kluczy:"))
generator_segwit(ilosc)
else:
print("Nie ma takiej opcji")
print("BTC last price /Blockchain.info/ : ", check_price())
print("Koniec")
``` |
{
"source": "a00920/grpclib",
"score": 3
} |
#### File: examples/multiproc/client.py
```python
import asyncio
from typing import Tuple
from grpclib.client import Channel
# generated by protoc
from .primes_pb2 import Request
from .primes_grpc import PrimesStub
PRIMES = [
112272535095293,
112582705942171,
112272535095293,
115280095190773,
115797848077099,
0,
1,
2,
3,
4,
5,
6,
7,
]
async def main() -> None:
channel = Channel('127.0.0.1', 50051)
primes = PrimesStub(channel)
async def check(n: int) -> Tuple[int, bool]:
reply = await primes.Check(Request(number=n))
return n, reply.is_prime.value
for f in asyncio.as_completed([check(n) for n in PRIMES]):
number, is_prime = await f
print(f'Number {number} {"is" if is_prime else "is not"} prime')
channel.close()
if __name__ == '__main__':
asyncio.run(main())
```
#### File: examples/_reference/client.py
```python
import grpc
from helloworld import helloworld_pb2
from helloworld import helloworld_pb2_grpc
def main():
channel = grpc.insecure_channel('127.0.0.1:50051')
stub = helloworld_pb2_grpc.GreeterStub(channel)
print(stub.SayHello(helloworld_pb2.HelloRequest(name='World')))
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
pass
```
#### File: examples/_reference/server.py
```python
import time
import concurrent.futures
import grpc
from helloworld import helloworld_pb2
from helloworld import helloworld_pb2_grpc
class Greeter(helloworld_pb2_grpc.GreeterServicer):
def SayHello(self, request, context):
return helloworld_pb2.HelloReply(message='Hello, %s!' % request.name)
def serve(host='127.0.0.1', port=50051):
server = grpc.server(concurrent.futures.ThreadPoolExecutor(max_workers=10))
helloworld_pb2_grpc.add_GreeterServicer_to_server(Greeter(), server)
server.add_insecure_port(f'{host}:{port}')
server.start()
print(f'Serving on {host}:{port}')
try:
while True:
time.sleep(3600)
finally:
server.stop(0)
if __name__ == '__main__':
try:
serve()
except KeyboardInterrupt:
pass
```
#### File: grpclib/grpclib/protocol.py
```python
import asyncio
import struct
import time
import socket
import logging
from io import BytesIO
from abc import ABC, abstractmethod
from typing import Optional, List, Tuple, Dict, NamedTuple, Callable
from typing import cast, TYPE_CHECKING
from asyncio import Transport, Protocol, Event, BaseTransport, TimerHandle
from asyncio import Queue
from functools import partial
from collections import deque
from h2.errors import ErrorCodes
from h2.config import H2Configuration
from h2.events import Event as H2Event
from h2.events import RequestReceived, DataReceived, StreamEnded, WindowUpdated
from h2.events import ConnectionTerminated, RemoteSettingsChanged
from h2.events import SettingsAcknowledged, ResponseReceived, TrailersReceived
from h2.events import StreamReset, PriorityUpdated, PingAcknowledged
from h2.settings import SettingCodes
from h2.connection import H2Connection, ConnectionState
from h2.exceptions import ProtocolError, TooManyStreamsError, StreamClosedError
from .utils import Wrapper
from .config import Configuration
from .exceptions import StreamTerminatedError
if TYPE_CHECKING:
from typing import Deque
try:
from h2.events import PingReceived, PingAckReceived
except ImportError:
PingReceived = object()
PingAckReceived = object()
log = logging.getLogger(__name__)
if hasattr(socket, 'TCP_NODELAY'):
_sock_type_mask = 0xf if hasattr(socket, 'SOCK_NONBLOCK') else 0xffffffff
def _set_nodelay(sock: socket.socket) -> None:
if (
sock.family in {socket.AF_INET, socket.AF_INET6}
and sock.type & _sock_type_mask == socket.SOCK_STREAM
and sock.proto == socket.IPPROTO_TCP
):
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
else:
def _set_nodelay(sock: socket.socket) -> None:
pass
class UnackedData(NamedTuple):
data: bytes
data_size: int
ack_size: int
class AckedData(NamedTuple):
data: memoryview
data_size: int
class Buffer:
def __init__(self, ack_callback: Callable[[int], None]) -> None:
self._ack_callback = ack_callback
self._eof = False
self._unacked: 'Queue[UnackedData]' = Queue()
self._acked: 'Deque[AckedData]' = deque()
self._acked_size = 0
def add(self, data: bytes, ack_size: int) -> None:
self._unacked.put_nowait(UnackedData(data, len(data), ack_size))
def eof(self) -> None:
self._unacked.put_nowait(UnackedData(b'', 0, 0))
self._eof = True
async def read(self, size: int) -> bytes:
assert size >= 0, 'Size can not be negative'
if size == 0:
return b''
if not self._eof or not self._unacked.empty():
while self._acked_size < size:
data, data_size, ack_size = await self._unacked.get()
if not ack_size:
break
self._acked.append(AckedData(memoryview(data), data_size))
self._acked_size += data_size
self._ack_callback(ack_size)
if self._eof and self._acked_size == 0:
return b''
if self._acked_size < size:
raise AssertionError('Received less data than expected')
chunks = []
chunks_size = 0
while chunks_size < size:
next_chunk, next_chunk_size = self._acked[0]
if chunks_size + next_chunk_size <= size:
chunks.append(next_chunk)
chunks_size += next_chunk_size
self._acked.popleft()
else:
offset = size - chunks_size
chunks.append(next_chunk[:offset])
chunks_size += offset
self._acked[0] = AckedData(
data=next_chunk[offset:],
data_size=next_chunk_size - offset,
)
self._acked_size -= size
assert chunks_size == size
return b''.join(chunks)
def unacked_size(self) -> int:
return sum(self._unacked.get_nowait().ack_size
for _ in range(self._unacked.qsize()))
class StreamsLimit:
def __init__(self, limit: Optional[int] = None) -> None:
self._limit = limit
self._current = 0
self._release = Event()
def reached(self) -> bool:
if self._limit is not None:
return self._current >= self._limit
else:
return False
async def wait(self) -> None:
# TODO: use FIFO queue for waiters
if self.reached():
self._release.clear()
await self._release.wait()
def acquire(self) -> None:
self._current += 1
def release(self) -> None:
self._current -= 1
if not self.reached():
self._release.set()
def set(self, value: Optional[int]) -> None:
assert value is None or value >= 0, value
self._limit = value
class Connection:
"""
Holds connection state (write_ready), and manages
H2Connection <-> Transport communication
"""
# stats
streams_started = 0
streams_succeeded = 0
streams_failed = 0
data_sent = 0
data_received = 0
messages_sent = 0
messages_received = 0
last_stream_created: Optional[float] = None
last_data_sent: Optional[float] = None
last_data_received: Optional[float] = None
last_message_sent: Optional[float] = None
last_message_received: Optional[float] = None
last_ping_sent: Optional[float] = None
ping_count_in_sequence: int = 0
_ping_handle: Optional[TimerHandle] = None
_close_by_ping_handler: Optional[TimerHandle] = None
def __init__(
self,
connection: H2Connection,
transport: Transport,
*,
config: Configuration,
) -> None:
self._connection = connection
self._transport = transport
self._config = config
self.write_ready = Event()
self.write_ready.set()
self.stream_close_waiter = Event()
def feed(self, data: bytes) -> List[H2Event]:
return self._connection.receive_data(data) # type: ignore
def ack(self, stream_id: int, size: int) -> None:
if size:
self._connection.acknowledge_received_data(size, stream_id)
self.flush()
def pause_writing(self) -> None:
self.write_ready.clear()
def resume_writing(self) -> None:
self.write_ready.set()
def create_stream(
self,
*,
stream_id: Optional[int] = None,
wrapper: Optional[Wrapper] = None,
) -> 'Stream':
return Stream(self, self._connection, self._transport,
stream_id=stream_id, wrapper=wrapper)
def flush(self) -> None:
data = self._connection.data_to_send()
if data:
self._transport.write(data)
def initialize(self) -> None:
if self._config._keepalive_time is not None:
self._ping_handle = asyncio.get_event_loop().call_later(
self._config._keepalive_time,
self._ping
)
def close(self) -> None:
if hasattr(self, '_transport'):
self._transport.close()
# remove cyclic references to improve memory usage
del self._transport
if hasattr(self._connection, '_frame_dispatch_table'):
del self._connection._frame_dispatch_table
if self._ping_handle is not None:
self._ping_handle.cancel()
if self._close_by_ping_handler is not None:
self._close_by_ping_handler.cancel()
def _is_need_send_ping(self) -> bool:
assert self._config._keepalive_time is not None
if not self._config._keepalive_permit_without_calls:
if not any(s.open for s in self._connection.streams.values()):
return False
if self._config._http2_max_pings_without_data != 0 and \
self.ping_count_in_sequence >= \
self._config._http2_max_pings_without_data:
return False
if self.last_ping_sent is not None and \
time.monotonic() - self.last_ping_sent < \
self._config._http2_min_sent_ping_interval_without_data:
return False
return True
def _ping(self) -> None:
assert self._config._keepalive_time is not None
if self._is_need_send_ping():
log.debug('send ping')
data = struct.pack('!Q', int(time.monotonic() * 10 ** 6))
self._connection.ping(data)
self.flush()
self.last_ping_sent = time.monotonic()
self.ping_count_in_sequence += 1
if self._close_by_ping_handler is None:
self._close_by_ping_handler = asyncio.get_event_loop().\
call_later(
self._config._keepalive_timeout,
self.close
)
self._ping_handle = asyncio.get_event_loop().call_later(
self._config._keepalive_time,
self._ping
)
def headers_send_process(self) -> None:
self.ping_count_in_sequence = 0
def data_send_process(self) -> None:
self.ping_count_in_sequence = 0
self.last_data_sent = time.monotonic()
def ping_ack_process(self) -> None:
if self._close_by_ping_handler is not None:
self._close_by_ping_handler.cancel()
self._close_by_ping_handler = None
_Headers = List[Tuple[str, str]]
class Stream:
"""
API for working with streams, used by clients and request handlers
"""
id: Optional[int] = None
# stats
created: Optional[float] = None
data_sent = 0
data_received = 0
def __init__(
self,
connection: Connection,
h2_connection: H2Connection,
transport: Transport,
*,
stream_id: Optional[int] = None,
wrapper: Optional[Wrapper] = None
) -> None:
self.connection = connection
self._h2_connection = h2_connection
self._transport = transport
self.wrapper = wrapper
if stream_id is not None:
self.init_stream(stream_id, self.connection)
self.window_updated = Event()
self.headers: Optional['_Headers'] = None
self.headers_received = Event()
self.trailers: Optional['_Headers'] = None
self.trailers_received = Event()
def init_stream(self, stream_id: int, connection: Connection) -> None:
self.id = stream_id
self.buffer = Buffer(partial(connection.ack, self.id))
self.connection.streams_started += 1
self.created = self.connection.last_stream_created = time.monotonic()
async def recv_headers(self) -> _Headers:
if self.headers is None:
await self.headers_received.wait()
assert self.headers is not None
return self.headers
async def recv_data(self, size: int) -> bytes:
return await self.buffer.read(size)
async def recv_trailers(self) -> _Headers:
if self.trailers is None:
await self.trailers_received.wait()
assert self.trailers is not None
return self.trailers
async def send_request(
self,
headers: _Headers,
end_stream: bool = False,
*,
_processor: 'EventsProcessor',
) -> Callable[[], None]:
assert self.id is None, self.id
while True:
# this is the first thing we should check before even trying to
# create new stream, because this wait() can be cancelled by timeout
# and we wouldn't need to create new stream at all
if not self.connection.write_ready.is_set():
await self.connection.write_ready.wait()
# `get_next_available_stream_id()` should be as close to
# `connection.send_headers()` as possible, without any async
# interruptions in between, see the docs on the
# `get_next_available_stream_id()` method
stream_id = self._h2_connection.get_next_available_stream_id()
try:
self._h2_connection.send_headers(stream_id, headers,
end_stream=end_stream)
except TooManyStreamsError:
# we're going to wait until any of currently opened streams will
# be closed, and we will be able to open a new one
# TODO: maybe implement FIFO for waiters, but this limit
# shouldn't be reached in a normal case, so why bother
# TODO: maybe we should raise an exception here instead of
# waiting, if timeout wasn't set for the current request
self.connection.stream_close_waiter.clear()
await self.connection.stream_close_waiter.wait()
# while we were trying to create a new stream, write buffer
# can became full, so we need to repeat checks from checking
# if we can write() data
continue
else:
self.init_stream(stream_id, self.connection)
release_stream = _processor.register(self)
self._transport.write(self._h2_connection.data_to_send())
self.connection.headers_send_process()
return release_stream
async def send_headers(
self,
headers: _Headers,
end_stream: bool = False,
) -> None:
assert self.id is not None
if not self.connection.write_ready.is_set():
await self.connection.write_ready.wait()
# Workaround for the H2Connection.send_headers method, which will try
# to create a new stream if it was removed earlier from the
# H2Connection.streams, and therefore will raise StreamIDTooLowError
if self.id not in self._h2_connection.streams:
raise StreamClosedError(self.id)
self._h2_connection.send_headers(self.id, headers,
end_stream=end_stream)
self._transport.write(self._h2_connection.data_to_send())
self.connection.headers_send_process()
async def send_data(self, data: bytes, end_stream: bool = False) -> None:
f = BytesIO(data)
f_pos, f_last = 0, len(data)
while True:
if not self.connection.write_ready.is_set():
await self.connection.write_ready.wait()
window = self._h2_connection.local_flow_control_window(self.id)
# window can become negative
if not window > 0:
self.window_updated.clear()
await self.window_updated.wait()
# during "await" above other streams were able to send data and
# decrease current window size, so try from the beginning
continue
max_frame_size = self._h2_connection.max_outbound_frame_size
f_chunk = f.read(min(window, max_frame_size, f_last - f_pos))
f_chunk_len = len(f_chunk)
f_pos = f.tell()
if f_pos == f_last:
self._h2_connection.send_data(self.id, f_chunk,
end_stream=end_stream)
self._transport.write(self._h2_connection.data_to_send())
self.data_sent += f_chunk_len
self.connection.data_sent += f_chunk_len
self.connection.data_send_process()
break
else:
self._h2_connection.send_data(self.id, f_chunk)
self._transport.write(self._h2_connection.data_to_send())
self.data_sent += f_chunk_len
self.connection.data_sent += f_chunk_len
self.connection.data_send_process()
async def end(self) -> None:
if not self.connection.write_ready.is_set():
await self.connection.write_ready.wait()
self._h2_connection.end_stream(self.id)
self._transport.write(self._h2_connection.data_to_send())
async def reset(self, error_code: ErrorCodes = ErrorCodes.NO_ERROR) -> None:
if not self.connection.write_ready.is_set():
await self.connection.write_ready.wait()
self._h2_connection.reset_stream(self.id, error_code=error_code)
self._transport.write(self._h2_connection.data_to_send())
def reset_nowait(
self,
error_code: ErrorCodes = ErrorCodes.NO_ERROR,
) -> None:
self._h2_connection.reset_stream(self.id, error_code=error_code)
if self.connection.write_ready.is_set():
self._transport.write(self._h2_connection.data_to_send())
def __ended__(self) -> None:
self.buffer.eof()
def __terminated__(self, reason: str) -> None:
if self.wrapper is not None:
self.wrapper.cancel(StreamTerminatedError(reason))
@property
def closable(self) -> bool:
if self._h2_connection.state_machine.state is ConnectionState.CLOSED:
return False
stream = self._h2_connection.streams.get(self.id)
if stream is None:
return False
return not stream.closed
class AbstractHandler(ABC):
@abstractmethod
def accept(
self,
stream: Stream,
headers: _Headers,
release_stream: Callable[[], None],
) -> None:
pass
@abstractmethod
def cancel(self, stream: Stream) -> None:
pass
@abstractmethod
def close(self) -> None:
pass
_Streams = Dict[int, Stream]
class EventsProcessor:
"""
H2 events processor, synchronous, not doing any IO, as hyper-h2 itself
"""
def __init__(
self,
handler: AbstractHandler,
connection: Connection,
) -> None:
self.handler = handler
self.connection = connection
self.processors = {
RequestReceived: self.process_request_received,
ResponseReceived: self.process_response_received,
RemoteSettingsChanged: self.process_remote_settings_changed,
SettingsAcknowledged: self.process_settings_acknowledged,
DataReceived: self.process_data_received,
WindowUpdated: self.process_window_updated,
TrailersReceived: self.process_trailers_received,
StreamEnded: self.process_stream_ended,
StreamReset: self.process_stream_reset,
PriorityUpdated: self.process_priority_updated,
ConnectionTerminated: self.process_connection_terminated,
PingReceived: self.process_ping_received,
PingAckReceived: self.process_ping_ack_received,
PingAcknowledged: self.process_ping_ack_received, # deprecated
}
self.streams: _Streams = {}
def register(self, stream: Stream) -> Callable[[], None]:
assert stream.id is not None
self.streams[stream.id] = stream
def release_stream(*, _streams: _Streams = self.streams) -> None:
assert stream.id is not None
_stream = _streams.pop(stream.id)
self.connection.stream_close_waiter.set()
self.connection.ack(stream.id, _stream.buffer.unacked_size())
return release_stream
def close(self, reason: str = 'Connection closed') -> None:
self.connection.close()
self.handler.close()
for stream in self.streams.values():
stream.__terminated__(reason)
# remove cyclic references to improve memory usage
if hasattr(self, 'processors'):
del self.processors
def process(self, event: H2Event) -> None:
try:
proc = self.processors[event.__class__]
except KeyError:
raise NotImplementedError(event)
else:
proc(event)
def process_request_received(self, event: RequestReceived) -> None:
stream = self.connection.create_stream(stream_id=event.stream_id)
release_stream = self.register(stream)
self.handler.accept(stream, event.headers, release_stream)
# TODO: check EOF
def process_response_received(self, event: ResponseReceived) -> None:
stream = self.streams.get(event.stream_id)
if stream is not None:
stream.headers = event.headers
stream.headers_received.set()
def process_remote_settings_changed(
self,
event: RemoteSettingsChanged,
) -> None:
if SettingCodes.INITIAL_WINDOW_SIZE in event.changed_settings:
for stream in self.streams.values():
stream.window_updated.set()
def process_settings_acknowledged(
self,
event: SettingsAcknowledged,
) -> None:
pass
def process_data_received(self, event: DataReceived) -> None:
size = len(event.data)
stream = self.streams.get(event.stream_id)
if stream is not None:
stream.buffer.add(
event.data,
event.flow_controlled_length,
)
stream.data_received += size
else:
self.connection.ack(
event.stream_id,
event.flow_controlled_length,
)
self.connection.data_received += size
self.connection.last_data_received = time.monotonic()
def process_window_updated(self, event: WindowUpdated) -> None:
if event.stream_id == 0:
for value in self.streams.values():
value.window_updated.set()
else:
stream = self.streams.get(event.stream_id)
if stream is not None:
stream.window_updated.set()
def process_trailers_received(self, event: TrailersReceived) -> None:
stream = self.streams.get(event.stream_id)
if stream is not None:
stream.trailers = event.headers
stream.trailers_received.set()
def process_stream_ended(self, event: StreamEnded) -> None:
stream = self.streams.get(event.stream_id)
if stream is not None:
stream.__ended__()
self.connection.streams_succeeded += 1
def process_stream_reset(self, event: StreamReset) -> None:
stream = self.streams.get(event.stream_id)
if stream is not None:
if event.remote_reset:
msg = ('Stream reset by remote party, error_code: {}'
.format(event.error_code))
else:
msg = 'Protocol error'
stream.__terminated__(msg)
self.handler.cancel(stream)
self.connection.streams_failed += 1
def process_priority_updated(self, event: PriorityUpdated) -> None:
pass
def process_connection_terminated(
self,
event: ConnectionTerminated,
) -> None:
self.close(reason=(
'Received GOAWAY frame, closing connection; error_code: {}'
.format(event.error_code)
))
def process_ping_received(self, event: PingReceived) -> None:
pass
def process_ping_ack_received(self, event: PingAckReceived) -> None:
self.connection.ping_ack_process()
class H2Protocol(Protocol):
connection: Connection
processor: EventsProcessor
def __init__(
self,
handler: AbstractHandler,
config: Configuration,
h2_config: H2Configuration,
) -> None:
self.handler = handler
self.config = config
self.h2_config = h2_config
def connection_made(self, transport: BaseTransport) -> None:
sock = transport.get_extra_info('socket')
if sock is not None:
_set_nodelay(sock)
h2_conn = H2Connection(config=self.h2_config)
h2_conn.initiate_connection()
self.connection = Connection(
h2_conn,
cast(Transport, transport),
config=self.config,
)
self.connection.flush()
self.connection.initialize()
self.processor = EventsProcessor(self.handler, self.connection)
def data_received(self, data: bytes) -> None:
try:
events = self.connection.feed(data)
except ProtocolError:
log.debug('Protocol error', exc_info=True)
self.processor.close('Protocol error')
else:
self.connection.flush()
for event in events:
self.processor.process(event)
self.connection.flush()
def pause_writing(self) -> None:
self.connection.pause_writing()
def resume_writing(self) -> None:
self.connection.resume_writing()
def connection_lost(self, exc: Optional[BaseException]) -> None:
self.processor.close(reason='Connection lost')
```
#### File: grpclib/tests/test_codec.py
```python
import json
import pytest
import grpclib.const
import grpclib.server
from grpclib.client import UnaryUnaryMethod
from grpclib.events import _DispatchServerEvents
from grpclib.exceptions import GRPCError
from grpclib.encoding.base import CodecBase
from grpclib.encoding.proto import ProtoCodec
from conn import ClientStream, ClientServer, ServerStream
from conn import grpc_encode, grpc_decode
from dummy_pb2 import DummyRequest
class JSONCodec(CodecBase):
__content_subtype__ = 'json'
def encode(self, message, message_type):
return json.dumps(message, ensure_ascii=False).encode('utf-8')
def decode(self, data: bytes, message_type):
return json.loads(data.decode('utf-8'))
class PingServiceHandler:
async def UnaryUnary(self, stream):
request = await stream.recv_message()
assert request == {'value': 'ping'}
await stream.send_message({'value': 'pong'})
def __mapping__(self):
return {
'/ping.PingService/UnaryUnary': grpclib.const.Handler(
self.UnaryUnary,
grpclib.const.Cardinality.UNARY_UNARY,
None,
None,
),
}
class PingServiceStub:
def __init__(self, channel):
self.UnaryUnary = UnaryUnaryMethod(
channel,
'/ping.PingService/UnaryUnary',
None,
None,
)
@pytest.mark.asyncio
async def test_rpc_call():
ctx = ClientServer(PingServiceHandler, PingServiceStub, codec=JSONCodec())
async with ctx as (handler, stub):
reply = await stub.UnaryUnary({'value': 'ping'})
assert reply == {'value': 'pong'}
@pytest.mark.asyncio
async def test_client_receive_json():
cs = ClientStream(codec=JSONCodec())
async with cs.client_stream as stream:
await stream.send_request()
_, request_received = cs.client_conn.to_server_transport.events()
await stream.send_message({'value': 'ping'}, end=True)
content_type = dict(request_received.headers)['content-type']
assert content_type == 'application/grpc+json'
cs.client_conn.server_h2c.send_headers(
request_received.stream_id,
[
(':status', '200'),
('content-type', 'application/grpc+json'),
],
)
cs.client_conn.server_h2c.send_data(
request_received.stream_id,
grpc_encode({'value': 'pong'}, None, JSONCodec()),
)
cs.client_conn.server_h2c.send_headers(
request_received.stream_id,
[
('grpc-status', str(grpclib.const.Status.OK.value)),
],
end_stream=True,
)
cs.client_conn.server_flush()
reply = await stream.recv_message()
assert reply == {'value': 'pong'}
@pytest.mark.asyncio
async def test_client_receive_invalid():
cs = ClientStream(codec=JSONCodec())
with pytest.raises(GRPCError) as exc:
async with cs.client_stream as stream:
await stream.send_request()
_, request_received = cs.client_conn.to_server_transport.events()
content_type = dict(request_received.headers)['content-type']
assert content_type == 'application/grpc+json'
cs.client_conn.server_h2c.send_headers(
request_received.stream_id,
[
(':status', '200'),
('content-type', 'application/grpc+proto'),
],
)
cs.client_conn.server_h2c.send_data(
request_received.stream_id,
grpc_encode({'value': 'pong'}, None, JSONCodec()),
)
cs.client_conn.server_h2c.send_headers(
request_received.stream_id,
[
('grpc-status', str(grpclib.const.Status.OK.value)),
],
end_stream=True,
)
cs.client_conn.server_flush()
await stream.recv_message()
exc.match(r"Invalid content-type: 'application/grpc\+proto'")
@pytest.mark.asyncio
async def test_server_receive_json():
handler = PingServiceHandler()
mapping = handler.__mapping__()
path = next(iter(mapping.keys()))
ss = ServerStream(codec=JSONCodec(), path=path,
content_type='application/grpc+json')
ss.server_conn.client_h2c.send_data(
ss.stream_id,
grpc_encode({'value': 'ping'}, None, JSONCodec()),
end_stream=True,
)
ss.server_conn.client_flush()
await grpclib.server.request_handler(
handler.__mapping__(),
ss.server_h2s,
ss.server_conn.server_proto.processor.handler.headers,
JSONCodec(),
None,
_DispatchServerEvents(),
lambda: None,
)
response_received, data_received, trailers_received, _ = \
ss.server_conn.to_client_transport.events()
assert dict(response_received.headers)[':status'] == '200'
assert dict(response_received.headers)['content-type'] == \
'application/grpc+json'
reply = grpc_decode(data_received.data, None, JSONCodec())
assert reply == {'value': 'pong'}
assert dict(trailers_received.headers)['grpc-status'] == '0'
@pytest.mark.asyncio
async def test_server_receive_invalid():
handler = PingServiceHandler()
mapping = handler.__mapping__()
path = next(iter(mapping.keys()))
ss = ServerStream(codec=JSONCodec(), path=path,
content_type='application/grpc+invalid')
ss.server_conn.client_h2c.send_data(
ss.stream_id,
grpc_encode({'value': 'ping'}, None, JSONCodec()),
end_stream=True,
)
ss.server_conn.client_flush()
await grpclib.server.request_handler(
handler.__mapping__(),
ss.server_h2s,
ss.server_conn.server_proto.processor.handler.headers,
JSONCodec(),
None,
_DispatchServerEvents(),
lambda: None,
)
response_received, _ = ss.server_conn.to_client_transport.events()
assert dict(response_received.headers)[':status'] == '415'
assert dict(response_received.headers)['grpc-status'] == '2'
assert dict(response_received.headers)['grpc-message'] == \
'Unacceptable content-type header'
@pytest.mark.asyncio
async def test_server_return_json():
ss = ServerStream(codec=JSONCodec())
ss.server_conn.client_h2c.send_data(
ss.stream_id,
grpc_encode({'value': 'ping'}, None, JSONCodec()),
end_stream=True,
)
ss.server_conn.client_flush()
message = await ss.server_stream.recv_message()
assert message == {'value': 'ping'}
await ss.server_stream.send_initial_metadata()
response_received, = ss.server_conn.to_client_transport.events()
content_type = dict(response_received.headers)['content-type']
assert content_type == 'application/grpc+json'
await ss.server_stream.send_message({'value': 'pong'})
data_received, = ss.server_conn.to_client_transport.events()
reply = grpc_decode(data_received.data, None, JSONCodec())
assert reply == {'value': 'pong'}
def test_proto_invalid_type():
codec = ProtoCodec()
assert codec.encode(DummyRequest(value='42'), DummyRequest) == \
DummyRequest(value='42').SerializeToString()
with pytest.raises(TypeError, match='Message must be of type'):
codec.encode(1, DummyRequest)
``` |
{
"source": "a01334390/LWC",
"score": 3
} |
#### File: a01334390/LWC/testBSearchSS.py
```python
from estructuras import *
def ucs(g, s, m):
frontera = ColaPriorizada()
frontera.put(0, s)
anteriores = {}
anteriores[s] = None
acumulado = {}
acumulado[s] = 0
while not frontera.esVacia():
actual = frontera.get()
if actual == m:
break
for vecino in g.vecinos(actual):
costo = acumulado[actual] + g.costo(actual, vecino)
if vecino not in acumulado or costo < acumulado[vecino]:
acumulado[vecino] = costo
frontera.put(costo, vecino)
anteriores[vecino] = actual
return acumulado, acumulado[actual]
def main():
g = Grafo()
with open('espana.txt') as f:
for l in f:
(c1, c2, c) = l.split(',')
g.aAristaPeso(c1, c2, c)
print(ucs(g, 'Coruna', 'Vigo'))
if __name__ == '__main__':
main()
``` |
{
"source": "A01352283/TC1001S.100-202211",
"score": 4
} |
#### File: TC1001S.100-202211/Python/memory.py
```python
from random import *
from turtle import *
from freegames import path
mountain = path('car.gif')
tiles = list(range(32)) * 2
state = {'mark': None}
hide = [True] * 64
def square(x, y):
"Draw white square with black outline at (x, y)."
up()
goto(x, y)
down()
color('red', 'white')
begin_fill()
for count in range(4):
forward(50)
left(90)
end_fill()
def index(x, y):
"Convert (x, y) coordinates to tiles index."
return int((x + 200) // 50 + ((y + 200) // 50) * 8)
def xy(count):
"Convert tiles count to (x, y) coordinates."
return (count % 8) * 50 - 200, (count // 8) * 50 - 200
def tap(x, y):
"Update mark and hidden tiles based on tap."
spot = index(x, y)
mark = state['mark']
if mark is None or mark == spot or tiles[mark] != tiles[spot]:
state['mark'] = spot
else:
hide[spot] = False
hide[mark] = False
state['mark'] = None
def draw():
"Draw image and tiles."
clear()
goto(0, 0)
shape(mountain)
stamp()
for count in range(64):
if hide[count]:
x, y = xy(count)
square(x, y)
mark = state['mark']
if mark is not None and hide[mark]:
x, y = xy(mark)
up()
goto(x + 2, y)
color('blue')
write(tiles[mark], font=('Verdana', 24, 'normal', 'bold', 'italic'))
update()
ontimer(draw, 100)
shuffle(tiles)
setup(420, 420, 370, 0)
addshape(mountain)
hideturtle()
tracer(False)
onscreenclick(tap)
draw()
done()
```
#### File: TC1001S.100-202211/Python/snake.py
```python
from turtle import *
from random import randrange
from freegames import square, vector
colors = ['cyan', 'dark goldenrod', 'chartreuse', 'purple', 'chocolate']
food = vector(0, 0)
snake = [vector(10, 0)]
aim = vector(0, -10)
boundary = randrange(300, 500) #Randomizes board size
def change(x, y):
"Change snake direction."
aim.x = x
aim.y = y
def inside(head):
"Return True if head inside boundaries."
return -boundary < head.x < boundary and -boundary < head.y < boundary
def move():
"Move snake forward one segment."
head = snake[-1].copy()
head.move(aim)
if not inside(head) or head in snake:
square(head.x, head.y, 9, 'red')
update()
return
snake.append(head)
if head == food:
print('Snake:', len(snake))
food.x = randrange(-15, 15) * 10
food.y = randrange(-15, 15) * 10
else:
snake.pop(0)
clear()
for body in snake:
square(body.x, body.y, 10, colors[snakecolor])
square(food.x, food.y, 6, colors[foodcolor])
update()
ontimer(move, 50) #Makes the game faster
setup(boundary*2, boundary*2, 370, 0)
snakecolor = randrange(0, 5)
foodcolor = snakecolor
while(foodcolor==snakecolor):
foodcolor = randrange(0, 5)
hideturtle()
tracer(False)
listen()
onkey(lambda: change(10, 0), 'Right')
onkey(lambda: change(-10, 0), 'Left')
onkey(lambda: change(0, 10), 'Up')
onkey(lambda: change(0, -10), 'Down')
move()
done()
``` |
{
"source": "A01377098/pruebaSemanaTEC-A01377098",
"score": 3
} |
#### File: A01377098/pruebaSemanaTEC-A01377098/miconvolucion.py
```python
import numpy as np
def convolucion (A, B):
contaFil = 0
contaCol = 0
limiteFil = len(A)
limiteCol = len(A)
longitudB = len(B)
for x in range (len(C)):
for y in range (len(C)):
for n in range (contaFil, len(B)+contaFil):
if len(B)+contaFil > limiteFil:
break
for o in range (contaCol, len(B)+contaCol):
if len(B)+contaCol> limiteCol:
break
C[x][y] += A[n][o] * B[n-contaFil][o-contaCol]
if contaCol+longitudB<limiteCol:
contaCol += 1
elif contaCol+longitudB== limiteCol:
contaCol = 0
if contaFil+longitudB< limiteFil:
contaFil += 1
elif contaFil+longitudB == limiteFil:
return
Matriz = [[6, 9, 0, 3], [8, 4, 9, 1], [4, 1, 3, 12], [3, 2, 1, 100]]
Filtro = [[1, 0, 2], [5, 0, 9], [6, 2, 1]]
A = np.array(Matriz)
B = np.array(Filtro)
C = np.zeros((2,2))
convolucion(A,B)
print (C)
``` |
{
"source": "A01379375/Equipo-1-HCAP",
"score": 3
} |
#### File: A01379375/Equipo-1-HCAP/bgr2grey.py
```python
import numpy as np
import cv2
def rgb2gray(A):
filas = len(A)
columnas = len(A[0])
B = np.zeros((filas,columnas))
for i in range(filas):
for j in range(columnas):
B[i][j] = (A[i][j][0]*0.07 + A[i][j][1]*0.72 + A[i][j][2]*0.21)
return B
imagen_i = cv2.imread("imagen.jpg")
imagenrgb2gray = rgb2gray(imagen_i)
cv2.imwrite("Imagen_escala_de_grises.jpg",imagenrgb2gray)
imagen_2 = cv2.imread('paisaje.jpg')
imagen_2 = cv2.resize(imagen_2, (512, 256))
imagen2rgb2gray = rgb2gray(imagen_2)
cv2.imwrite('Imagen_paisaje_escala_de_grises.jpg', imagen2rgb2gray)
``` |
{
"source": "a01635008/semana-tec",
"score": 3
} |
#### File: a01635008/semana-tec/piano.py
```python
import pynput
import music21
import pygame
freq = 44100 # audio CD quality
bitsize = -16 # unsigned 16 bit
channels = 2 # 1 is mono, 2 is stereo
buffer = 1024 # number of samples
pygame.mixer.init(freq, bitsize, channels, buffer)
# optional volume 0 to 1.0
pygame.mixer.music.set_volume(0.8)
class PianoKey:
def __init__(self, tone, key):
self.isKeyPress = False
self.key = key
self.stream = music21.stream.Stream()
note = music21.note.Note(tone)
note.duration.quarterLength = 16
self.stream.append(note)
self.stream.write('midi', fp=key + ".mid")
# self.player = pygame.mixer.music.load(key + ".mid");
def KeyPress(self):
print('{0} pressed'.format(self.key))
self.player = pygame.mixer.music.load(self.key + ".mid")
self.player = pygame.mixer.music.play()
# self.player.play();
return
def KeyRelease(self):
print('{0} release'.format(self.key))
# self.player = pygame.mixer.music.stop();
# self.player = pygame.mixer.music.unload();
# self.player.stop();
return
PianoKey_Setting = [['C', "'1'"],
['D', "'2'"],
['E', "'3'"],
['F', "'4'"],
['G', "'5'"],
['A', "'6'"],
['B', "'7'"]]
Obj_PianoKeys = {}
def initPiano():
for setting in PianoKey_Setting:
Obj_PianoKeys[setting[1]] = (PianoKey(setting[0], setting[1]))
def on_press_fun(key):
# print('{0} pressed'.format(key));
print("key" + str(key))
if(str(key) in Obj_PianoKeys):
Obj_PianoKeys[str(key)].KeyPress()
def on_release_fun(key):
# print('{0} release'.format(key));
if(str(key) in Obj_PianoKeys):
Obj_PianoKeys[str(key)].KeyRelease()
if(key == pynput.keyboard.Key.esc):
# Stop Listener
return False
if __name__ == "__main__":
print("Hello World")
initPiano()
with pynput.keyboard.Listener(on_press=on_press_fun, on_release=
on_release_fun) as listener:
listener.join()
``` |
{
"source": "A01653108/HCAP2021",
"score": 3
} |
#### File: A01653108/HCAP2021/convolucion.py
```python
import numpy as np
import cv2
def convolucion(Ioriginal,Kernel):
fr=len(Ioriginal)-(len(Kernel)-1)
cr=len(Ioriginal[0])-(len(Kernel[0])-1)
Resultado=np.zeros((fr,cr),np.uint8)
#For para recorrer filas
for i in range(len(Resultado)):
#For para recorrer columnas
for j in range(len(Resultado[0])):
suma=0
#hace las multiplicaciones y las suma
for m in range(len(Kernel)):
for n in range(len(Kernel[0])):
suma+=Kernel[m][n]*Ioriginal[m+i][n+j]
if suma<=255:
Resultado[i][j]=round(suma)
else:
Resultado[i][j]=255
return Resultado
#imagenes
K=[[-1,0,1],[-1,0,1],[-1,0,1]]
I=[[2,0,1,1,1],[3,0,0,0,2],[1,1,1,1,1],[3,1,1,1,2],[1,1,1,1,1]]
#imagenes a numpy arrays
In=np.array(I)
Kn=np.array(K)
IRGB=cv2.imread('Spiderman.jpg')
IGS=cv2.cvtColor(IRGB,cv2.COLOR_BGR2GRAY)
print(IGS.shape)
#funcion de convolucion
R=convolucion(IGS,Kn)
print(R)
print(R.shape)
cv2.imwrite('Spiderman.jpg',R)
``` |
{
"source": "a01745043/Equipo-2-Semana-Tec",
"score": 3
} |
#### File: a01745043/Equipo-2-Semana-Tec/convolucion.py
```python
import numpy as np
import cv2
def convulucion(A,B):
C=np.zeros((len(A)-2,len(A[0])-2))
suma=0
for i in range(len(C)):
for j in range(len(C[0])):
res=0
for m in range(len(B)):
for n in range(len(B[0])):
suma+= A[m+i][j+n]*B[m][n]
C[i][j]= suma
suma=0
return C
imagen = cv2.imread('003.jpg')
imagen = cv2.cvtColor(imagen,cv2.COLOR_BGR2GRAY)
imagen = cv2.resize(imagen,(256,256))
matriz=[img_final]
filtro=[[1,1,1],[1,0,1],[1,1,1]]
A=np.array(matriz)
B=np.array(filtro)
#C=np.zeros((2,2))
resultado=convulucion(A,B)
final=cv2.imwrite('convolucion.jpg',resultado)
``` |
{
"source": "a01745865/hcap-equipo",
"score": 3
} |
#### File: a01745865/hcap-equipo/convolucion.py
```python
import numpy as np
def convolucion(A,B):
C=np.zeros((len(A)-2,len(A[0])-2))
for i1 in range(len(C)):
for j1 in range(len(C[0])):
suma=0
for i in range(len(B)):
for j in range(len(B[0])):
suma+=A[i+i1][j+j1]*B[i][j]
C[i1][j1]=suma
return C
```
#### File: a01745865/hcap-equipo/grayescale.py
```python
import cv2
import numpy as np
def escala(M):
matriz_zeros = np.zeros((M.shape[0], M.shape[1]))
for i in range(len(M)):
for a in range(len(M[i])):
suma = 0
for b in M[i][a]:
suma += b
promedio = suma/len(M[i][a])
matriz_zeros[i][a] = promedio
return matriz_zeros
``` |
{
"source": "A01746540/SEM9Algoritmos",
"score": 4
} |
#### File: SEM9Algoritmos/Tareas/DistanciaGrafos.py
```python
from collections import defaultdict
class Graph:
metro = ['El Rosario', 'Instituto del Petroleo', 'Tacuba', 'Hidalgo', 'Tacubaya', 'Deportivo 18 de Marzo',
'Centro Medico', 'Mixcoac', 'Balderas', 'Bellas Artes', 'Guerrero', '<NAME>', 'Zapata',
'Chabacano',
'Salto del Agua', 'Garibaldi', 'La Raza', '<NAME>', 'Consulado', 'Candelaria', 'Ermita',
'Santa Anita', 'Oceania', 'Morelos',
'San Lazaro', 'Jamaica', 'Atlalilco', 'Pantitlan']
def __init__(self):
self.graph = defaultdict(list)
def addEdge(self, u, v):
# agregar un Edge al grafo
for val in v:
self.graph[u].append(val)
def BFS(self, s):
# realizar el BFS
d = []
for i in range(100):
d.append(0)
d[s] = 0
queue = []
visited = [False] * (max(self.graph) + 1)
queue.append(s)
visited[s] = True
while queue:
s = queue.pop(0)
print(s, end=" ")
for v in self.graph[s]:
if visited[v] == False:
queue.append(v)
visited[v] = True
d[v] = d[s] + 1
print("\nNodo inicial: El Rosario")
for i in range(28):
print(f"Desde el Rosario hasta {self.metro[i]} es {d[i]}")
g = Graph()
g.addEdge(0, [1, 2])
g.addEdge(1, [14, 15])
g.addEdge(2, [3, 4])
g.addEdge(3, [11, 12])
g.addEdge(4, [5, 6, 9])
g.addEdge(5, [8, 9])
g.addEdge(6, [7])
g.addEdge(7, [25])
g.addEdge(8, [10, 21, 25, 26])
g.addEdge(9, [3, 10])
g.addEdge(11, [10, 21])
g.addEdge(12, [13, 14])
g.addEdge(13, [11])
g.addEdge(14, [15, 17])
g.addEdge(15, [16])
g.addEdge(17, [16, 18, 19])
g.addEdge(18, [23])
g.addEdge(19, [13, 20])
g.addEdge(20, [18, 23])
g.addEdge(21, [10, 22])
g.addEdge(22, [19, 20])
g.addEdge(23, [24])
g.addEdge(24, [8, 22])
g.addEdge(25, [27])
g.addEdge(26, [24, 27])
print("BFT:")
g.BFS(0)
``` |
{
"source": "A02l01/INFEST",
"score": 2
} |
#### File: A02l01/INFEST/fit_INFEST.py
```python
# ### ### #
# #
# ### ### ### #
# ### ### ### #
# # # # #
## # # # ##
import pandas as pd
from pylab import *
import matplotlib.pyplot as plt
import numpy as np
import sys
import argparse
def get_residuals(model,fit_parameter,original_x,original_y):
if model == "PolyModel":
m = PolyModel()
res = (original_y - m.f(original_x,a1=qm.pardict['a1'],a2=qm.pardict['a2'],a3=qm.pardict['a3'],a4=qm.pardict['a4'],a5=qm.pardict['a5']))**2
res = np.mean(res)
return res
def integrate(qm2,df3,ft):
t = np.arange(0,df3.t.tolist()[-1],0.5)
ys = np.poly1d(qm2[0])(t)
# ys -= qm2[0][4]
ii=0
tau_600 = 0
tau_300 = 0
while (ii<len(ys)-1):
if(ys[ii]<ft) & (ys[ii+1]>=ft):
tau_300 = t[ii]
if(ys[ii]<2*ft) & (ys[ii+1]>=2*ft):
tau_600 = t[ii]
break
ii+=1
return tau_600-tau_300,tau_600
def m_plot(qm2,df2,l):
plt.figure(l.split('/')[-1])
plt.plot(df2.t,np.poly1d(qm2[0])(df2.t),'--',label="model")
plt.plot(df2.t,df2.Lesion,'.',label="Lesion raw")
plt.legend()
show()
if __name__=="__main__":
parser = argparse.ArgumentParser()
parser.add_argument("path_in", help="the path to the file containing temporal data computed by INFEST")
parser.add_argument("path_out", help="the path to the file containing LDT and Latency",default='')
parser.add_argument("-ft","--first", help="the first time to consider for the computation of the LDT",type=int,default=300,)
parser.add_argument("-g","--graph", action="store_true",help="monitoring the fit of the curve")
args = parser.parse_args()
print("Open "+args.path_in)
df=pd.read_csv(args.path_in,sep="\t")
df['t'] = (df['time'])*10
leaf = np.unique(df.Id)
out = "Id\ta1\ta2\ta3\ta4\ta5\tresiduals\tLDT\tLatency\n"
ii = 0
for l in leaf:
# df2 = df[(df.Id == l) & (df.t<1500) & (df.t>600)]
df2 = df[(df.Id == l)]
if size(df2.t[df2.Lesion>300]) > 10 :
qm2 = np.polyfit(df2.t,df2.Lesion,4,full=True)
if args.graph:
m_plot(qm2,df2,args.path_in+l)
res = qm2[1][0]
puissance63,puissance60 = integrate(qm2,df2,args.first)
new_out = l+"\t"+str(qm2[0][0])+"\t"+str(qm2[0][1])+"\t"+str(qm2[0][2])+"\t"+str(qm2[0][3])+"\t"+str(qm2[0][4])+"\t"+str(res)+"\t"+str(puissance63)+"\t"+str(puissance60)+"\n"
out+= new_out
else:
fig = plt.figure(l.split('/')[-1])
new_out = l+"\t"+str(0)+"\t"+str(0)+"\t"+str(0)+"\t"+str(0)+"\t"+str(0)+"\t"+str(0)+"\t"+str(0)+"\t"+str(0)+"\n"
print("Bad Data: Lesion size < 30 pxl")
print("save as "+args.path_out)
f = open(args.path_out,"w")
f.write(out)
f.close()
``` |
{
"source": "A03HCY/SNS",
"score": 3
} |
#### File: A03HCY/SNS/Run.py
```python
from Core import *
import sys
__verson__ = '0.1.2'
class Runner(Network):
Contin = True
t_id = None
t_pt = None
def __init__(self, port, token):
super().__init__(port, token)
pass
def Getcmd(self):
head = self.t_id
if self.t_id == None:
head = 'Terminal'
else:
head = head.split('-')[0]
print(head+' # ', end='')
cmd = []
for part in input('').split(' '):
for final in part.split(':'):
cmd.append(final)
return cmd
def simplifyPath(self, path):
path.replace('//', '/')
path.replace('\\', '/')
list = path.split('/')
res =[]
for i in range(len(list)):
if list[i] == '..' and len(res) > 0:
res = res[:-1]
elif list[i] != '' and list[i] != '.' and list[i] != '..':
res.append(list[i])
return '/'+'/'.join(res)
def run(self):
while self.Contin:
cmd = self.Getcmd()
if cmd[0] in ['sns'] and len(cmd) == 3:
ip = cmd[1]
try:port = int(cmd[2])
except:continue
token = input('Token > ')
rs = self.Connect((ip, port), token)
if 'Error' in rs:
print(rs)
continue
self.t_pt = input('Folder > ')
self.t_id = rs
continue
if cmd[0] in ['ter']:
ter = self.GetTerminal()
for key in ter:
print(key + ':', end=' ')
if ter[key] == []:
print('None')
continue
print('')
for value in ter[key]:
print(' ' + value)
continue
if cmd[0] in ['ls']:
rz = self.File(self.t_id, 'list', '', self.t_pt)
if 'Error' in rz:
print(rz)
continue
for part in rz:
if rz[part] == []:
continue
print(part+':')
for files in rz[part]:print(' '+files)
continue
if cmd[0] in ['cd']:
rz = self.File(self.t_id, 'list', '', self.t_pt)
if 'Error' in rz:
self.t_pt = input('Reset Path > ')
continue
if rz['folders'] == []:
print('No folder can change.')
stay = input('Up folder? > ')
if stay in ['yes', 'y']:
self.t_pt = os.path.join(self.t_pt, '/..')
self.t_pt = self.simplifyPath(self.t_pt)[1:]
print(self.t_pt)
continue
print('folders:')
i = 1
for part in rz['folders']:
print(' '+str(i)+' '+part)
i += 1
cmd = int(input('Number > '))
if cmd == 0:
self.t_pt = os.path.join(self.t_pt, '/..')
self.t_pt = self.simplifyPath(self.t_pt)[1:]
continue
if rz['folders'][cmd-1]:
self.t_pt = os.path.join(self.t_pt, rz['folders'][cmd-1])
self.t_pt = self.simplifyPath(self.t_pt)[1:]
print(self.t_pt)
continue
if cmd[0] in ['get']:
rz = self.File(self.t_id, 'list', '', self.t_pt)
if 'Error' in rz:
self.t_pt = input('Reset Path > ')
continue
if rz['files'] == [] and rz['unknows'] == []:
print('No Files.')
continue
fun = rz['files'] + rz['unknows']
print('Files and unknows:')
i = 1
for part in fun:
print(' '+str(i)+' '+part)
i += 1
cmd = int(input('Number > '))
if fun[cmd-1]:
download = os.path.join(self.t_pt, fun[cmd-1]).replace('\\', '/')
download = download.replace('/', '\\')
print(download)
rz = self.File(self.t_id, 'get', './', download)
print(rz)
a = Runner(1010, 'F6987ij42')
a.t_id = a.Connect(('192.168.1.8', 1010), 'F6987ij42')
a.t_pt = 'x:/'
a.run()
``` |
{
"source": "A03ki/chatdb",
"score": 2
} |
#### File: chatdb/chatdb/models.py
```python
import copy
from py2neo.ogm import Label, Model, Property, Related, RelatedTo, RelatedFrom
from chatdb.util import flatten_dictionary
def MetaKeepingModelAttrs(name, bases, attributes):
model_attributes = {"property": {}, "label": {}, "related": {}}
for attr_name, attr in attributes.items():
if isinstance(attr, Property):
model_attributes["property"][attr_name] = attr
elif isinstance(attr, Label):
model_attributes["label"][attr_name] = attr
elif isinstance(attr, Related):
model_attributes["related"][attr_name] = attr
cls = type(name, bases, attributes)
if "__model_attributes__" in dir(cls):
new_model_attributes = copy.deepcopy(cls.__model_attributes__)
for key, value in model_attributes.items():
new_model_attributes[key].update(**value)
cls.__model_attributes__ = new_model_attributes
else:
cls.__model_attributes__ = model_attributes
return cls
class Status(Model, metaclass=MetaKeepingModelAttrs):
__primarylabel__ = "Status"
text = Property()
in_reply_to_statuses = RelatedTo(__primarylabel__, "REPLY")
in_reply_from_statuses = RelatedFrom(__primarylabel__, "REPLY")
def __init__(self, **properties) -> None:
all_properties = self.__model_attributes__["property"]
for key, value in properties.items():
if key in all_properties:
setattr(self, key, value)
def reply_to(self, model: "Status") -> None:
self.in_reply_to_statuses.add(model)
model.in_reply_from_statuses.add(self)
def reply_from(self, model: "Status") -> None:
self.in_reply_from_statuses.add(model)
model.in_reply_to_statuses.add(self)
class OrdinaryStatus(Status, metaclass=MetaKeepingModelAttrs):
__primarykey__ = "status_id"
status_id = Property()
user_name = Property()
date = Property()
def __init__(self, **properties) -> None:
super().__init__(**properties)
class SimpleTweetStatus(Status, metaclass=MetaKeepingModelAttrs):
__primarylabel__ = "TweetStatus"
__primarykey__ = "id_str"
id_str = Property()
in_reply_to_status_id_str = Property()
created_at = Property()
def __init__(self, **properties) -> None:
flat_dict = flatten_dictionary(properties)
super().__init__(**flat_dict)
```
#### File: chatdb/chatdb/util.py
```python
def flatten_dictionary(dictionary: dict, sep: str = "_") -> dict:
flat_dictionary = {}
def _flatten_dictionary(dictionary, parent_name=""):
for key, value in dictionary.items():
if isinstance(value, dict):
_flatten_dictionary(value, f"{parent_name + key}{sep}")
else:
flat_dictionary[parent_name+key] = value
_flatten_dictionary(dictionary)
return flat_dictionary
``` |
{
"source": "A03ki/PFN_internship2019_ML",
"score": 3
} |
#### File: 2/src/function.py
```python
import numpy as np
def relu(x):
return np.maximum(0, x)
def logexp(x):
return np.where(x > 100, x, np.log(1 + np.exp(x)))
def binary_cross_entropy(x, y):
loss = y * logexp(-x) + (1 - y) * logexp(x)
return loss
```
#### File: 4/src/preprocessing.py
```python
import random
import numpy as np
class SplitError(Exception):
pass
def data_shuffle(seed, *args):
"""
args同士のindexの関係性を保ちつつランダムで並び替えたargsのリストを取得する
Parameters
----------
seed : int or None
シャッフル時のシード値
args : array-likes
並び替えるlistやndarray
Return
------
: list
ランダムで並び替えたargsのリスト
"""
random.seed(seed)
permutation = list(zip(*args))
random.shuffle(permutation)
return list(zip(*permutation))
def data_split(ratio, *args):
"""
指定した比率で分割したargsのリストを取得する
Parameters
----------
ratio : float
分割する比率。0~1の範囲で指定
args : array-likes
分割するlistやndarray
Return
------
: list of tuple
分割したargをタプルにまとめたリスト
"""
return [np.split(data, [int(len(data) * ratio)]) for data in args]
def data_shuffle_split(ratio, seed, *args):
"""
指定した比率で分割したシャッフル済みのargsのリストを取得する
Parameters
----------
ratio : float
分割する比率。0~1の範囲で指定
seed : int or None
シャッフル時のシード値
args : array-likes
分割するlistやndarray
Return
------
: list of tuple
並び替えた済みのargを分割し、それぞれタプルにまとめたリスト
See Also
--------
data_shuffle : indexの関係性を保ちつつランダムで並び替えたargsのリストを取得する
data_split : 指定した比率で分割したargsのリストを取得する
"""
args = data_shuffle(seed, *args)
return data_split(ratio, *args)
def data_delete(range, *args):
"""
指定した範囲を削除したargsのリストを取得する
Parameters
----------
range : tuple
スライスを行う範囲
args : array-likes
指定した範囲を削除するlistやndarray
Return
------
deleted : list of ndarray
指定した範囲を削除したargsのリスト
"""
deleted = [np.delete(arg, slice(*range)) for arg in args]
return deleted
def sampling(split_size, seed, *args):
"""
指定した数で分割しミニバッチ化したargsのリストを取得する
Parameters
----------
split_size : int
argを分割する数
seed : int or None
シャッフル時のシード値
args : array-likes
ミニバッチにするlistやndarray
Return
------
mini_batchs : list
ミニバッチにしたargsのリスト
Raises
------
SplitError
argsをsplit_sizeで分割できない時
See Also
--------
data_shuffle : indexの関係性を保ちつつランダムで並び替えたargsのリストを取得する
"""
if split_size > min(set(map(len, args))):
raise SplitError("Split_size is more than the element of an arg")
batchs = data_shuffle(seed, *args)
mini_batchs = [list(np.array_split(batch,
split_size)) for batch in batchs]
return mini_batchs
``` |
{
"source": "A03ki/text",
"score": 2
} |
#### File: test/datasets/test_iwslt2016.py
```python
import os
import random
import string
from collections import defaultdict
from unittest.mock import patch
from parameterized import parameterized
from torchtext.datasets.iwslt2016 import IWSLT2016
from torchtext.data.datasets_utils import _generate_iwslt_files_for_lang_and_split
from ..common.case_utils import TempDirMixin, zip_equal
from ..common.torchtext_test_case import TorchtextTestCase
def _get_mock_dataset(root_dir, split, src, tgt):
"""
root_dir: directory to the mocked dataset
"""
temp_dataset_dir = os.path.join(root_dir, f"IWSLT2016/2016-01/texts/{src}/{tgt}/{src}-{tgt}/")
os.makedirs(temp_dataset_dir, exist_ok=True)
seed = 1
mocked_data = defaultdict(lambda: defaultdict(list))
valid_set = "tst2013"
test_set = "tst2014"
files_for_split, _ = _generate_iwslt_files_for_lang_and_split(16, src, tgt, valid_set, test_set)
src_file = files_for_split[src][split]
tgt_file = files_for_split[tgt][split]
for file_name in (src_file, tgt_file):
txt_file = os.path.join(temp_dataset_dir, file_name)
with open(txt_file, "w") as f:
# Get file extension (i.e., the language) without the . prefix (.en -> en)
lang = os.path.splitext(file_name)[1][1:]
for i in range(5):
rand_string = " ".join(
random.choice(string.ascii_letters) for i in range(seed)
)
dataset_line = f"{rand_string} {rand_string}\n"
# append line to correct dataset split
mocked_data[split][lang].append(dataset_line)
f.write(f'{rand_string} {rand_string}\n')
seed += 1
return list(zip(mocked_data[split][src], mocked_data[split][tgt]))
class TestIWSLT2016(TempDirMixin, TorchtextTestCase):
root_dir = None
patcher = None
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.root_dir = cls.get_base_temp_dir()
cls.patcher = patch(
"torchdata.datapipes.iter.util.cacheholder.OnDiskCacheHolderIterDataPipe._cache_check_fn", return_value=True
)
cls.patcher.start()
@classmethod
def tearDownClass(cls):
cls.patcher.stop()
super().tearDownClass()
@parameterized.expand([("train", "de", "en"), ("valid", "de", "en")])
def test_iwslt2016(self, split, src, tgt):
expected_samples = _get_mock_dataset(self.root_dir, split, src, tgt)
dataset = IWSLT2016(root=self.root_dir, split=split)
samples = list(dataset)
for sample, expected_sample in zip_equal(samples, expected_samples):
self.assertEqual(sample, expected_sample)
@parameterized.expand(["train", "valid"])
def test_iwslt2016_split_argument(self, split):
dataset1 = IWSLT2016(root=self.root_dir, split=split)
(dataset2,) = IWSLT2016(root=self.root_dir, split=(split,))
for d1, d2 in zip_equal(dataset1, dataset2):
self.assertEqual(d1, d2)
``` |
{
"source": "A03ki/ueca",
"score": 2
} |
#### File: ueca/tests/test_constants.py
```python
from ueca.constants import c, e, epsilon_0, g, G, h, k, m_e, m_p, m_u, mu_0, N_A, R
class TestConstants:
def test_c(self):
assert c.value == 2.99792458E8
assert c.unit == "meter / second"
def test_e(self):
assert e.value == 1.602176634E-19
assert e.unit == "coulomb"
def test_epsilon_0(self):
assert epsilon_0.value == 8.8541878128E-12
assert epsilon_0.unit == "farad / meter"
def test_g(self):
assert g.value == 9.80665
assert g.unit == "meter / second ** 2"
def test_G(self):
assert G.value == 6.6743E-11
assert G.unit == "meter ** 2 * newton / kilogram ** 2"
def test_h(self):
assert h.value == 6.62607015E-34
assert h.unit == "joule * second"
def test_k(self):
assert k.value == 1.380649E-23
assert k.unit == "joule / kelvin"
def test_m_e(self):
assert m_e.value == 9.1093837015E-31
assert m_e.unit == "kilogram"
def test_m_p(self):
assert m_p.value == 1.67262192369E-27
assert m_p.unit == "kilogram"
def test_m_u(self):
assert m_u.value == 1.6605390666E-27
assert m_u.unit == "kilogram"
def test_mu_0(self):
assert mu_0.value == 1.25663706212E-6
assert mu_0.unit == "henry / meter"
def test_N_A(self):
assert N_A.value == 6.02214076E23
assert N_A.unit == "1 / mole"
def test_R(self):
assert R.value == 8.314462618
assert R.unit == "joule / kelvin / mole"
```
#### File: ueca/tests/test_uncertainty.py
```python
import numpy as np
import pytest
from ueca.data import PhysicsData
from ueca.symbolf import Rational
from ueca.uncertainty import combined_standard_uncertainty
def test_combined_standard_uncertainty_calculation():
mass = PhysicsData(2.5, "kilogram", symbol="M", uncertainty=1.3)
outer_diameter = PhysicsData(3.1, "meter", symbol="D_1", uncertainty=0.81)
inner_diameter = PhysicsData(4.2, "meter", symbol="D_2", uncertainty=1.1)
moment_of_inertia = Rational(1, 8) * mass * (outer_diameter ** 2 + inner_diameter ** 2)
delta_I = combined_standard_uncertainty(moment_of_inertia)
assert delta_I.unit == "kilogram * meter ** 2"
expectation = np.sqrt((mass.uncertainty * (outer_diameter.value ** 2
+ inner_diameter.value ** 2) / 8) ** 2
+ (mass.value * outer_diameter.value
* outer_diameter.uncertainty / 4) ** 2
+ (mass.value * inner_diameter.value
* inner_diameter.uncertainty / 4) ** 2)
assert pytest.approx(delta_I.value) == expectation
delta_I_relative = combined_standard_uncertainty(moment_of_inertia, relative=True)
expectation = np.sqrt((mass.uncertainty / mass.value) ** 2
+ ((2 * outer_diameter.value * outer_diameter.uncertainty) ** 2
+ (2 * inner_diameter.value * inner_diameter.uncertainty) ** 2)
/ (outer_diameter.value ** 2 + inner_diameter.value ** 2) ** 2)
assert pytest.approx(delta_I_relative.value) == expectation
assert delta_I_relative.unit == "dimensionless"
```
#### File: ueca/ueca/latex.py
```python
import contextlib
from sympy.printing.conventions import split_super_sub
from sympy.printing.latex import translate
@contextlib.contextmanager
def translate_space_latex():
from sympy.printing.latex import LatexPrinter
_deal_with_super_sub = LatexPrinter._deal_with_super_sub
def _deal_with_super_sub_expanded(self, string, style="plain"):
"""
This function is expansion of _deal_with_super_sub (LatexPrinter's method)
Copyright (c) 2006-2020 SymPy Development Team
Copyright (c) 2013-2020 <NAME>
Copyright (c) 2014 <NAME>
Licensed under https://github.com/sympy/sympy/blob/master/LICENSE
"""
if "{" in string:
name, supers, subs = string, [], []
else:
name, supers, subs = split_super_sub(string)
names = [translate(name) for name in name.split(" ")]
supers = [translate(sup) for sup in supers]
subs = [translate(sub) for sub in subs]
name = " ".join(names)
# apply the style only to the name
if style == "bold":
name = "\\mathbf{{{}}}".format(name)
# glue all items together:
if supers:
name += "^{%s}" % " ".join(supers)
if subs:
name += "_{%s}" % " ".join(subs)
return name
LatexPrinter._deal_with_super_sub = _deal_with_super_sub_expanded
try:
yield
finally:
LatexPrinter._deal_with_super_sub = _deal_with_super_sub
``` |
{
"source": "A03ki/uecbbs",
"score": 3
} |
#### File: uecbbs/tests/test_api.py
```python
import numpy as np
import unittest
from unittest.mock import Mock, patch
from twissify.api import (has_media, is_photo, is_retweet,
filter_myretweeted_tweets, filter_retweets,
filter_protected_tweets, extract_photo_tweets,
extract_tweet_ids, extract_photos_urls,
extract_photo_urls)
class TestAPI(unittest.TestCase):
def test_has_media(self):
response = [{"user": None, "media": None}, {"user": None}]
expectations = [True, False]
for expectation, status in zip(expectations, response):
tweet = Mock(entities=status)
actual = has_media(tweet)
self.assertEqual(expectation, actual)
@patch("twissify.api.has_media", side_effect=[True, True, False])
def test_is_photo(self, _):
expectations = [True, False, False]
media_types = ["photo", "movie", "photo"]
for expectation, media_type in zip(expectations, media_types):
tweet = Mock(extended_entities={"media": [{"type": media_type}]})
actual = is_photo(tweet)
self.assertEqual(expectation, actual)
def test_is_retweet(self):
expectations = [True, False]
for expectation in expectations:
tweet = Mock(retweeted_status=None)
if not expectation:
del tweet.retweeted_status
actual = is_retweet(tweet)
self.assertEqual(expectation, actual)
def test_filter_myretweeted_tweets(self):
bools = [True, False, True, False, False]
tweets = [Mock(retweeted=bool) for bool in bools]
actuals = filter_myretweeted_tweets(tweets)
expectations = [tweet for tweet, bool in zip(tweets, bools)
if not bool]
np.testing.assert_array_equal(expectations, actuals)
@patch("twissify.api.is_retweet",
side_effect=[True, False, True, False, False])
def test_filter_retweets(self, _):
bools = [True, False, True, False, False]
tweets = [Mock() for _ in bools]
actuals = filter_retweets(tweets)
expectations = [tweet for tweet, bool in zip(tweets, bools)
if not bool]
np.testing.assert_array_equal(expectations, actuals)
def test_filter_protected_tweets(self):
bools = [True, False, True, False, False]
tweets = [Mock(**{"user.protected": bool}) for bool in bools]
actuals = filter_protected_tweets(tweets)
expectations = [tweet for tweet, bool in zip(tweets, bools)
if not bool]
np.testing.assert_array_equal(expectations, actuals)
@patch("twissify.api.is_photo",
side_effect=[True, False, True, False, False])
def test_extract_photo_tweets(self, _):
bools = [True, False, True, False, False]
tweets = [Mock() for _ in bools]
actuals = extract_photo_tweets(tweets)
expectations = [tweet for tweet, bool in zip(tweets, bools) if bool]
np.testing.assert_array_equal(expectations, actuals)
def test_extract_tweet_ids(self):
expectations = [True, False, True, False, False]
tweets = [Mock(id=bool) for bool in expectations]
actuals = extract_tweet_ids(tweets)
np.testing.assert_array_equal(expectations, actuals)
@patch("twissify.api.extract_photo_urls", side_effect=lambda x: x)
@patch("twissify.api.is_photo",
side_effect=[True, True, False, False, True])
def test_extract_photos_urls(self, is_photo, extract_photo_urls):
tweets = [1, 2, 3, 4, 5]
actuals = extract_photos_urls(tweets)
expectations = [1, 2, 5]
self.assertEqual(5, is_photo.call_count)
self.assertEqual(3, extract_photo_urls.call_count)
np.testing.assert_array_equal(expectations, actuals)
def test_extract_photo_urls(self):
expectations = [3, 5, 7]
tweet = Mock(extended_entities={"media": [{"media_url": i}
for i in expectations]})
actuals = extract_photo_urls(tweet)
np.testing.assert_array_equal(expectations, actuals)
if __name__ == "__main__":
unittest.main()
```
#### File: uecbbs/tests/test_storages.py
```python
import unittest
from unittest.mock import Mock
from test_tables import test_ids, insert_test_db
from twissify.tables import TimelineIndex
from twissify.storages import TimelineIndexStorage
class TestTimelineIndexStorage(unittest.TestCase):
def test__create_existing_timeline_name(self):
names = ["home_timeline", "mentions_timeline", "retweets_of_me"]
ids = test_ids(names, 2)
storage = TimelineIndexStorage("sqlite:///:memory:")
insert_test_db(names, ids, storage.session())
for name in names:
with self.subTest(name=name):
with self.assertRaises(ValueError):
storage._create(name)
def test__update_no_existing_timeline_name(self):
names = ["home_timeline", "mentions_timeline", "retweets_of_me"]
ids = test_ids(names, 2)
storage = TimelineIndexStorage("sqlite:///:memory:")
for name, (since_id, max_id) in zip(names, ids):
with self.assertRaises(ValueError):
storage._update(name, since_id=since_id, max_id=max_id)
def test_create_ids(self):
storage = TimelineIndexStorage("sqlite:///:memory:")
expectation_ids = {"since_id": 100, "max_id": 2000}
names = ["timeline"]
tweets = Mock(**expectation_ids)
storage.create_ids(names[0], tweets)
timelineindex = TimelineIndex.find_by_name(names[0], storage.session())
self.assertEqual(timelineindex.since_id, expectation_ids["since_id"])
self.assertEqual(timelineindex.max_id, expectation_ids["max_id"])
def test_update_ids(self):
storage = TimelineIndexStorage("sqlite:///:memory:")
names = ["timeline"]
ids = test_ids(names, 2)
expectation_ids = {"since_id": 100, "max_id": 2000}
session = storage.session()
insert_test_db(names, ids, session)
tweets = Mock(since_id=expectation_ids["since_id"],
max_id=expectation_ids["max_id"])
storage.update_ids(names[0], tweets)
timelineindex = TimelineIndex.find_by_name(names[0], session)
self.assertEqual(timelineindex.since_id, expectation_ids["since_id"])
self.assertEqual(timelineindex.max_id, expectation_ids["max_id"])
if __name__ == "__main__":
unittest.main()
```
#### File: uecbbs/tests/test_tables.py
```python
import unittest
import sqlalchemy.orm as orm
from sqlalchemy import create_engine
from sqlalchemy.sql import text
from twissify.tables import TimelineIndex
class TestTimelineIndex(unittest.TestCase):
def test_find_by_name_default_none(self):
names = ["home_timeline", "mentions_timeline", "retweets_of_me"]
session = create_test_session("sqlite:///:memory:")
create_test_db(session)
for name in names:
with self.subTest(name=name):
actual = TimelineIndex.find_by_name(name, session)
self.assertEqual(actual, None)
def test_find_by_name(self):
names = ["home_timeline", "mentions_timeline", "retweets_of_me"]
session = create_test_session("sqlite:///:memory:")
ids = test_ids(names, 2) # [(0, 1), (2, 3), (4, 5)]
create_test_db(session)
insert_test_db(names, ids, session)
for name, (since_id, max_id) in zip(names, ids):
with self.subTest(name=name, since_id=since_id, max_id=max_id):
actual = TimelineIndex.find_by_name(name, session)
self.assertEqual(actual.name, name)
self.assertEqual(actual.since_id, since_id)
self.assertEqual(actual.max_id, max_id)
def test_all(self):
names = ["home_timeline", "mentions_timeline", "retweets_of_me"]
session = create_test_session("sqlite:///:memory:")
ids = test_ids(names, 2)
create_test_db(session)
insert_test_db(names, ids, session)
actuals = TimelineIndex.all(session)
for name, (since_id, max_id), actual in zip(names, ids,
actuals):
with self.subTest(name=name, since_id=since_id, max_id=max_id):
self.assertEqual(actual.name, name)
self.assertEqual(actual.since_id, since_id)
self.assertEqual(actual.max_id, max_id)
def test_ids(names, n):
ids = [iter(range(len(names)*n))]*n
return list(zip(*ids))
def create_test_session(url):
engine = create_engine(url)
Session = orm.sessionmaker(bind=engine)
session = Session()
return session
def create_test_db(session):
create_query = text("""create table TimelineIndex(name text,
since_id integer, max_id integer);""")
session.execute(create_query)
session.commit()
def insert_test_db(names, ids, session):
rows = [("('{name}', {since_id}, {max_id})"
.format(name=name, since_id=since_id, max_id=max_id))
for name, (since_id, max_id) in zip(names, ids)]
insert_query = text("insert into TimelineIndex values"
+ ", ".join(rows) + ";")
session.execute(insert_query)
session.commit()
if __name__ == "__main__":
unittest.main()
```
#### File: uecbbs/twissify/image.py
```python
import io
import requests
from PIL import Image
def load_image_url(image_url):
"""画像urlからImageオブジェクトとHTTPステータスコードを得る
画像urlに正常にアクセスできたときはImageオブジェクトとHTTPステータスコードを得る
正常にアクセスできなかったときはImageオブジェクトの代わりに ``None`` が返る
Parameters
----------
image_url : str
画像urlの文字列
Returns
-------
tuple of a inheritance of PIL.ImageFile.ImageFile and int
Imageオブジェクト(または ``None`` )とHTTPステータスコードのタプル
Notes
-----
urlが存在しないときは ``ConnectionError`` が呼ばれる
また、画像url以外のurlでは ``UnidentifiedImageError`` が呼ばれる
"""
image = None
response = requests.get(image_url)
if response.status_code == 200:
image = open_image_binary(response.content)
return image, response.status_code
def open_image_binary(image_binary):
"""画像のバイナリデータをImageオブジェクトとして得る
Parameters
----------
image_binary : bytes
画像のバイナリデータ
Returns
-------
a inheritance of PIL.ImageFile.ImageFile
Imageオブジェクト
"""
return Image.open(io.BytesIO(image_binary))
``` |
{
"source": "A03ki/uec_tl_markov",
"score": 3
} |
#### File: uec_tl_markov/uectl/models.py
```python
import json
import markovify
class MarkovChainModel:
def __init__(self, model: markovify.NewlineText):
self.model = model
def generate_sentence(self, max_chars=140, **kwargs) -> str:
"`max_chars` 以内の文字列を生成する"
sentence = None
while sentence is None:
sentence = self.model.make_short_sentence(max_chars, **kwargs)
return sentence.replace(" ", "")
def generate_sentence_with_start(self, beginning: str, **kwargs) -> str:
"`beginning` で始まる文字列を生成する"
sentence = None
while sentence is None:
sentence = self.model.make_sentence_with_start(beginning, **kwargs)
return sentence.replace(" ", "")
@classmethod
def load_json(cls, path: str) -> "MarkovChainModel":
"JSON形式で学習済みモデルを読み込んだ後, 新しいクラスのインスタンスオブジェクトを作成して返す"
with open(path, "r") as f:
json_data = json.load(f)
model = markovify.NewlineText.from_json(json_data)
return cls(model)
def save_json(self, path: str, indent: int = 4) -> None:
"モデルをJSON形式で保存する"
json_data = self.model.to_json()
with open(path, "w") as f:
json.dump(json_data, f, indent=indent)
@classmethod
def train(cls, training_text: str,
state_size: int = 2) -> "MarkovChainModel":
"""モデルの学習後, 新しいクラスのインスタンスオブジェクトを作成して返す
Args:
training_text: モデルの学習に使用する文字列. 各単語は空白文字で
区切られている必要があり, 改行ごとに学習する.
state_size: Optional; 現在から過去までの考慮する状態のサイズ
"""
model = markovify.NewlineText(training_text, state_size=state_size)
return cls(model)
def compile(self, inplace: bool = False) -> None:
"モデルを軽量化する"
self.model = self.model.compile(inplace=inplace)
```
#### File: uec_tl_markov/uectl/text_filter.py
```python
from collections import OrderedDict
import emoji
import html
import neologdn
import pkgutil
import re
from typing import Dict, List, Union
import yaml
def remove_screen_name(text: str) -> str:
"スクリーンネームを削除した文字列を返す"
return re.sub(r"@[a-zA-Z0-9_]+", "", text)
def remove_url(text: str) -> str:
"URLを削除した文字列を返す"
return re.sub(r'http\S+', "", text)
def remove_hashtag(text: str) -> str:
"ハッシュタグを削除した文字列を返す"
return re.sub(r"#\S+", "", text)
def remove_emoji(text: str) -> str:
"絵文字を削除した文字列を返す"
return "".join([s for s in text if s not in emoji.UNICODE_EMOJI])
def _remove_brackets_and_quotations(text: str) -> str:
"半角括弧と半角引用符(+“)を削除した文字列を返す"
return re.sub(r"[()\[\]\"'“]", "", text)
def dump_text(text: str, word: str) -> str:
"指定した単語が含まれていたとき, 空文字を返す"
if word in text:
return ""
return text
def replace_words(text: str, words_dict: Dict[str, List[str]]) -> str:
for new_word, old_words in words_dict.items():
for old_word in old_words:
text = text.replace(old_word, new_word)
return text
def replace_orthographic_variants(text: str,
ymal_path: Union[str, None] = None) -> str:
"表記ゆれの単語を統一する"
if ymal_path is None:
_path = "data/orthographic_variants.yml"
raw_data = pkgutil.get_data("uectl", _path).decode()
words_dict = yaml.safe_load(raw_data)
else:
with open(ymal_path) as f:
words_dict = yaml.safe_load(f)
return replace_words(text,
OrderedDict(sorted(words_dict.items())))
def format_text(text: str) -> str:
"文字列を整えた結果を返す"
text = remove_screen_name(text)
text = remove_hashtag(text)
text = remove_url(text)
text = html.unescape(text)
text = text.replace("\\n", "").replace("\n", "")
text = neologdn.normalize(text, repeat=5)
text = _remove_brackets_and_quotations(text)
text = remove_emoji(text)
text = text.lower()
text = replace_orthographic_variants(text)
return text
```
#### File: uec_tl_markov/workspace/preprocessing.py
```python
import argparse
import MeCab
from typing import List
from uectl.text_filter import dump_text, format_text
def dump_specific_text(text: str) -> str:
"学習の邪魔になりそうな文字列のとき, 空文字を返す"
ng_words = ["#匿名質問募集中", "のポスト数:", "ツイート数:", "前日比:", "#本田とじゃんけん",
"Twitter家族:", "#NowPlaying", "I'm at", "事前登録",
"人からブロックされています", "ツイ廃結果", "Twitter歴"]
for ng_word in ng_words:
text = dump_text(text, ng_word)
if text == "":
break
return text
def preprocessing(lines: List[str]) -> List[str]:
"""各要素の文字列に対して前処理を行なった結果を返す
Note:
前処理した要素が空文字の時はその要素を結果に含めないので入力と出力の各要素が対応しないことがある
"""
m = MeCab.Tagger("-Owakati")
newlines = [format_text(dump_specific_text(line)) for line in lines]
newlines = [m.parse(newline) for newline in newlines if newline != ""]
return newlines
def main(input_filepath: str, output_filepath: str) -> None:
"""ファイルの各行を整えたあと, 分かち書きを行い, 結果をファイルに書き出す
Note:
整えた結果が空文字の時はその行を書き出さないので入力と出力ファイルの各行が対応しないことがある
"""
with open(input_filepath, "r") as f:
text = f.read()
lines = text.split("\n")
newlines = preprocessing(lines)
with open(output_filepath, "w") as f:
f.write("".join(newlines))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--input_filepath", "-i", default="input.txt",
type=str, help="テキストのファイルパス")
parser.add_argument("--output_filepath", "-o", default="output.txt",
type=str, help="前処理後のテキストの出力先のファイルパス")
args = parser.parse_args()
main(args.input_filepath, args.output_filepath)
```
#### File: uec_tl_markov/workspace/testing_model.py
```python
import argparse
from uectl.models import MarkovChainModel
def main(input_json_path: str, beginning: str, count: int = 100,
rejection_origin_text: bool = False) -> None:
"`count` で指定した回数だけマルコフ連鎖を用いて文字列を生成し, 標準出力する"
model = MarkovChainModel.load_json(input_json_path)
for _ in range(count):
sentence = model.generate_sentence_with_start(beginning=beginning,
test_output=rejection_origin_text)
print(sentence)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--input_json_path", "-i", default="model.json",
type=str, help="学習済みモデルJSONファイルパス")
parser.add_argument("--beginning", "-b", type=str, default="___BEGIN__",
help="文章を生成し始める文字列(各単語は空白文字で区切る)")
parser.add_argument("--count", "-c", default=100,
type=int, help="文章を生成する回数")
parser.add_argument("--rejection_origin_text", "-r", action="store_true",
help="学習データとほぼ一致する文章を生成しないようにする")
args = parser.parse_args()
main(args.input_json_path, args.beginning, count=args.count,
rejection_origin_text=args.rejection_origin_text)
``` |
{
"source": "a07061625/swooleyaf_install",
"score": 3
} |
#### File: docs/yapi/apidoc2swagger.py
```python
import json
import common_utils as cu
# python参数校验模式
IS_PYTHON_SCHEMA = False
# 添加cookie字段到header中
COOKIE_IN_HEADER_MODEL = {
"default": "",
"description": "",
"in": "header",
"name": "Cookie",
"required": False,
"type": "string"
}
def prompt():
"""提示信息"""
print('示例: python3 apidoc2swagger.py -s ./api_data.json -d '
'./swagger.json')
print('-s apidoc生成的api_data.json文件,必填')
print('-d 转换后的swagger.json文件,可选')
print('-p 转换为python schema可以用的简约字段,不加该参数表示转yapi格式参数')
print('-h 帮助')
print('\n********************************************')
def api_data_to_swagger(api_data):
"""解析apidoc的json数据
:param api_data: 请求数据
:return: swagger格式json
"""
# 所有的分组
tags = []
# 所有的请求路径
paths = {}
# 遍历每个请求
for item in api_data:
# 请求方法
request_type = item["type"]
# 请求路径
url = item["url"]
# 如果不存在该url则新建
paths[url] = {} if url not in paths else paths[url]
# 创建请求类型
if request_type in paths[url]:
raise Exception(item["filename"] + ":" + url + ":有两个相同的请求")
else:
paths[url][request_type] = {}
# 该api分组
if {"name": item["group"], "description": item["groupTitle"]} \
not in tags:
tags.append({
"name": item["group"],
"description": item["groupTitle"]
})
# 接口名称
summary = item["title"]
api_description = item[
"description"] if 'description' in item else None
# 获取apidoc的头部数据
api_data_header = item['header']['fields']['Header'] if \
(('header' in item) and (item['header'] != {}) and (
'Header' in item['header']['fields'])) else []
# Header参数解析
yapi_request_header = api_data_to_swagger_header_path_query(
api_data_header, "Header")
# 添加cookie字段到header中,方便自动化测试
# yapi_request_header.append(COOKIE_IN_HEADER_MODEL)
# 获取apidoc的Body请求参数
api_data_parameter = item['parameter'] if 'parameter' in item else {}
# 获取apidoc的Query请求参数
api_data_query_parameter = api_data_parameter['fields']['Query'] if \
((api_data_parameter != {} and 'fields' in api_data_parameter) and
('Query' in api_data_parameter['fields'])) else []
# 获取apidoc的Path请求参数
api_data_path_parameter = api_data_parameter['fields']['Path'] if \
((api_data_parameter != {} and 'fields' in api_data_parameter)
and ('Path' in api_data_parameter['fields'])) else []
# Query参数解析
yapi_request_query_parameter = api_data_to_swagger_header_path_query(
api_data_query_parameter, 'Query')
# Path参数解析
yapi_request_path_parameter = api_data_to_swagger_header_path_query(
api_data_path_parameter, 'Path')
# Body参数解析
yapi_request_body_parameters = api_data_to_swagger_request_response(
api_data_parameter, "Parameter", "url: " + url + " name: " +
summary + " 请求参数错误")
# 汇总header query path body数据
parameters = [yapi_request_body_parameters] if \
yapi_request_body_parameters != {} else []
for header in yapi_request_header:
if header != {}:
parameters.append(header)
for query in yapi_request_query_parameter:
if query != {}:
parameters.append(query)
for path in yapi_request_path_parameter:
if path != {}:
parameters.append(path)
# 获取apidoc的返回参数
api_data_success = item["success"] if 'success' \
in item else {}
# 解析返回参数
responses = api_data_to_swagger_request_response(
api_data_success, "Success 200",
"url: " + url + " name: " + summary + " 返回参数错误")
# 请求示例
api_data_request_example = api_data_parameter[
'examples'] if 'examples' in api_data_parameter else {}
# 返回示例
api_data_response_example = api_data_success[
'examples'] if 'examples' in api_data_success else {}
# 请求示例字符串
yapi_request_example = api_data_to_swagger_example(
"* 请求示例", api_data_request_example)
# 相应示例字符串
yapi_response_example = api_data_to_swagger_example(
"* 返回示例", api_data_response_example)
if api_description is None:
description = yapi_request_example + "\n\n" + yapi_response_example
else:
# 组合成一个字符串添加yapi备注中
description = "* 说明\n" + api_description + "\n\n" + \
yapi_request_example + "\n\n" + yapi_response_example
# 生成一个请求
paths[url][request_type] = {
"tags": [item["group"]],
"summary": summary,
"description": description,
"consumes": [
"application/json"
],
"parameters": parameters,
"responses": responses
}
swagger_json = {
"swagger": "2.0",
"schemes": [
"http"
],
"tags": tags,
"paths": paths
}
return swagger_json
def api_data_to_swagger_request_response(request_param, api_data_type, msg):
"""解析body请求参数和返回参数
:param request_param: 参数
:param api_data_type: 参数类型
:param msg: 错误说明
:return: swagger格式的json数据
"""
properties = {}
required = []
# 判断是否有数据
if request_param == {} or 'fields' not in request_param:
return {}
# 判断该字段下是否有数据
if api_data_type not in request_param['fields']:
return {}
for field in request_param['fields'][api_data_type]:
param_type = field['type'].lower()
description = cu.noqa_conversion(field['description'])
key = field['field']
parent_location = properties
"""处理逻辑
1.field是否有"."
│
├───是:2.找父节点
│ │
│ └───3.去到4
│
└───否:4.type是否含有"[]"
│
├───是:5.type是否为object
│ │
│ ├───是:6.数组对象模板
│ │
│ └───否:7.数组非对象模板
│
└───否:8.type是否为object
│
├───是:9.对象模板
│
└───否:10.非对象模板
"""
if '.' in key:
# 有父节点情况
field_items = key.split(".")
parent_location_required = None
# 查找父节点
for field_item in field_items:
try:
if field_item not in parent_location:
break
# 数据类型
if parent_location[field_item]["type"] == "array":
parent_location_required = parent_location[field_item][
'items']
parent_location = parent_location[field_item]['items'][
'properties']
# 对象类型
else:
parent_location_required = parent_location[field_item]
parent_location = parent_location[field_item][
'properties']
except KeyError:
# 大致可以检查格式是否正确
raise Exception(msg)
# 去掉前缀,如果根节点为data,子节点写的data.a.b这种形式,没有a
# 这种识别不了错误,会直接忽略a
key = field_items[-1]
# 该节点是否可选 True为可选,False为必须,required中添加必须参数
if field['optional'] is False:
try:
if 'required' in parent_location_required:
parent_location_required['required'].append(key)
else:
parent_location_required['required'] = [key]
except TypeError:
# 大致可以检查格式是否正确
raise Exception(msg)
else:
# 根节点是否可选
if field['optional'] is False:
required.append(key)
if not IS_PYTHON_SCHEMA:
object_item = cu.yapi_swagger_param_template(param_type,
description, field)
else:
object_item = cu.python_json_schema(param_type, field)
parent_location[key] = object_item
if api_data_type == "Parameter":
# 请求参数格式
yapi_body = {
"name": "root",
"in": "body",
"schema": {
"$schema": "http://json-schema.org/draft-04/schema#",
"type": "object",
"properties": properties,
"required": required
}
}
else:
# 返回参数格式
yapi_body = {
"200": {
"description": "successful operation",
"schema": {
"$schema": "http://json-schema.org/draft-04/schema#",
"type": "object",
"properties": properties,
"required": required
}
}
}
return yapi_body
def api_data_to_swagger_header_path_query(requests, param_type):
"""解析header path参数 query参数
:param requests: 请求参数
:param param_type: 参数类型
:return: swagger json格式数据
"""
parameters = []
for request in requests:
parameter = {
"name": request["field"],
"in": param_type.lower(),
"required": bool(1 - request["optional"]),
"type": 'string' if param_type == 'Header' else request["type"],
"default": "",
"description": cu.noqa_conversion(request["description"])
}
if 'defaultValue' in request:
parameter["default"] = request["defaultValue"]
parameters.append(parameter)
return parameters
def api_data_to_swagger_example(param_type, api_data_example):
"""将example转为字符串形式,并且保持格式不变
:param param_type: 请求示例 or 响应示例
:param api_data_example: 示例数据
:return:
"""
if api_data_example == {}:
return ""
if len(api_data_example) > 0:
# content 是一个json字符串
yapi_example = "<p>" + param_type + ":<br>" + api_data_example[0][
'content'].replace(
'\n', '<br>\n').replace(' ', ' ') + "</p>\n"
return yapi_example
else:
return ""
if __name__ == '__main__':
argvs = cu.check_argv(prompt)
try:
if 'h' in argvs:
prompt()
else:
# 接收输入输出文件路径
s_file = argvs['s'] if 's' in argvs else None
d_file = argvs['d'] if 'd' in argvs else './swagger.json'
IS_PYTHON_SCHEMA = True if 'p' in argvs else False
# 读取apidoc
data = open(s_file, 'r', encoding='utf-8')
# 格式转换
moudel0 = api_data_to_swagger(json.load(data))
api_yapi_data = json.dumps(moudel0, ensure_ascii=False, indent=4,
sort_keys=True)
# 输出新文件
f = open(d_file, 'w', encoding='utf-8')
f.write(api_yapi_data)
except Exception as e:
print('参数有误')
prompt()
raise e
```
#### File: docs/yapi/common_utils.py
```python
import sys
import re
def check_argv(func):
"""检查参数"""
if len(sys.argv) > 1:
argv_map = {}
# 遍历外部传参
for argv in sys.argv:
index = sys.argv.index(argv)
# 如果是以-开头的参数
if argv[0] == '-':
if index + 1 == len(sys.argv):
argv_map[argv[1::]] = None
else:
# 取后一位参数
try:
argv_value = sys.argv[index + 1]
if argv_value[0] != '-':
argv_map[argv[1::]] = argv_value
elif argv_value[0] == '-':
argv_map[argv[1::]] = None
except Exception:
return func()
# 如果连续两个非-开头的参数
elif argv[0] != '-' and index != 0:
last_argv = sys.argv[index - 1]
if last_argv[0] != '-':
return func()
# {参数1:值1,参数2:值2...}
return argv_map
def noqa_conversion(desc):
"""去掉多余的标签
:param desc: 备注内容
:return: 返回的备注
"""
# 如果字段过长,可以通过" # noqa"方式进行标识,可以通过pep8字段超79检测
description = desc.replace(" # noqa", "") if " # noqa" in desc else desc
# 如果单独标识 # noqa,apidoc会解析为<h1>noqa</h1>
description = description.replace("<h1>noqa</h1>", "") if \
"<h1>noqa</h1>" in description else description
description = re.sub(r'<p>|</p>', '', description)
description = re.sub(r'<pre>|</pre>', '', description)
description = re.sub(r'<code>|</code>', '', description)
description = description.strip()
return description
def yapi_swagger_param_template(param_type, description, field):
"""解析apidoc为yapi或在swagger格式的参数模板
:param param_type: 请求类型
:param description: 描述
:param field: 参数
:return:
"""
if '[]' in param_type:
# 数组类型模板
object_item = {
"type": "array",
"items": {
"type": param_type[:-2],
"description": "",
},
"description": description
}
if 'object' == param_type[:-2]:
# 对象数组模板
object_item["items"]["properties"] = {}
else:
# 非数组模板
object_item = {
"type": param_type,
"description": description,
}
if 'object' == param_type:
# 对象类型模板
object_item["properties"] = {}
# 是否包含默认值
if 'defaultValue' in field.keys():
object_item["default"] = field['defaultValue']
# 允许的值
if 'allowedValues' in field.keys():
object_item['enum'] = field['allowedValues']
if 'size' in field.keys():
# 数字范围
if field['type'].lower() in ['number', 'integer']:
# filed['size'] -> re.findall() -> result
# -3--1 -> -3,-1 -> -3,-1
# -1-1 -> -1,-1 -> -1,1
# 1-3 -> 1,-3 -> 1,3
# 匹配正负整数和小数
sizes = re.findall(r'-?\d+\.\d+|-?\d+', field['size'])
sizes[0] = float(sizes[0]) if '.' in sizes[0] else int(
sizes[0])
sizes[1] = float(sizes[1]) if '.' in sizes[1] else int(
sizes[1])
func = {
# 1-3
'1': lambda x: -x,
# -1-1
'2': lambda x: -x,
# -3--1
'3': lambda x: x
}
# 根据-推算符号的位置
sizes[1] = func[str(field['size'].count('-'))](sizes[1])
object_item['minimum'] = sizes[0]
object_item['maximum'] = sizes[1]
object_item['exclusiveMinimum'] = "true"
object_item['exclusiveMaximum'] = "true"
# 字符串长度范围
elif 'string' == field['type'].lower():
sizes = field['size'].split('..')
if sizes[0] != '':
object_item['minLength'] = sizes[0]
object_item['maxLength'] = sizes[1]
return object_item
def python_json_schema(param_type, field):
"""解析apidoc为yapi或在swagger格式的参数模板
:param param_type: 请求类型
:param field: 参数
:return:
"""
if '[]' in param_type:
# 数组类型模板
object_item = {
"type": "array",
"items": {
"type": param_type[:-2]
}
}
if 'object' == param_type[:-2]:
# 对象数组模板
object_item["items"]["properties"] = {}
else:
# 非数组模板
object_item = {
"type": param_type
}
if 'object' == param_type:
# 对象类型模板
object_item["properties"] = {}
# 允许的值
if 'allowedValues' in field.keys():
object_item['enum'] = field['allowedValues']
if 'size' in field.keys():
# 数字范围
if field['type'].lower() in ['number', 'integer']:
# filed['size'] -> re.findall() -> result
# -3--1 -> -3,-1 -> -3,-1
# -1-1 -> -1,-1 -> -1,1
# 1-3 -> 1,-3 -> 1,3
# 匹配正负整数和小数
sizes = re.findall(r'-?\d+\.\d+|-?\d+', field['size'])
sizes[0] = float(sizes[0]) if '.' in sizes[0] else int(
sizes[0])
sizes[1] = float(sizes[1]) if '.' in sizes[1] else int(
sizes[1])
func = {
# 1-3
'1': lambda x: -x,
# -1-1
'2': lambda x: -x,
# -3--1
'3': lambda x: x
}
# 根据-推算符号的位置
sizes[1] = func[str(field['size'].count('-'))](sizes[1])
object_item['minimum'] = sizes[0]
object_item['maximum'] = sizes[1]
# 字符串长度范围
elif 'string' == field['type'].lower():
sizes = field['size'].split('..')
if sizes[0] != '':
object_item['minLength'] = sizes[0]
object_item['maxLength'] = sizes[1]
return object_item
```
#### File: a07061625/swooleyaf_install/demo_click.py
```python
import click
from progressbar import *
def get_version(ctx, param, value):
if not value or ctx.resilient_parsing:
return
click.echo('Version 1.0')
ctx.exit()
@click.group()
@click.option('-v', '--version', is_flag=True, callback=get_version, expose_value=False, is_eager=True)
def main():
pass
@main.command()
@click.option('-u', '--user', required=True, type=str, help="用户名")
@click.option('-p', '--password', required=True, type=str, help="密码")
@click.option('-t', '--type', required=True, default="phone", type=str, help="账户类型", show_default=True)
def add_user(user, password, type):
click.echo(f"user:{user} password:{password} type:{type}")
@main.command()
@click.option('-t', '--type', required=True, type=click.Choice(['user', 'admin']))
@click.option('-p', '--password', prompt=True, hide_input=True, confirmation_prompt=True)
def set_password(type, password):
click.echo(f"Your type:{type} password:{password}")
@main.command()
@click.confirmation_option(prompt='Are you sure you want to drop the db?')
def confirm_delete():
click.echo("Dropped all tables!")
@main.command()
def color_text():
click.secho('Hello World!', fg='green')
click.secho('Some more text', bg='blue', fg='white')
click.secho('ATTENTION', blink=True, bold=True)
all_process = {1: 'a', 2: 'b', 3: 'c', 4: 'd'}
@main.command()
def progress_bar():
widgets = ['Progress: ', Percentage(), ' ', Bar('#'), ' ', Timer()]
process_bar = ProgressBar(widgets=widgets, maxval=len(all_process)).start()
# with click.progressbar(all_process) as bar:
for key, val in all_process.items():
time.sleep(key)
process_bar.update(key)
print()
if __name__ == '__main__':
main()
```
#### File: a07061625/swooleyaf_install/helper_install.py
```python
import os
import sys
import click
from configs_pro import *
def get_version(ctx, param, value):
if not value or ctx.resilient_parsing:
return
click.echo('辅助管理脚本')
click.echo('Version: 1.1')
click.echo('Author: 姜伟')
click.echo('公共依赖: click colorama progressbar')
click.echo('环境安装依赖: fabric3')
click.echo('自动化操作依赖: pyautogui')
ctx.exit()
@click.group()
@click.option('-v', '--version', is_flag=True, callback=get_version, expose_value=False, is_eager=True)
def main():
pass
def __check_packages(packages):
total_packages = os.popen(python3Prefix + 'list').read()
for name in packages:
if name not in total_packages:
click.secho('package ' + name + ' not install', fg='red')
sys.exit()
def __sy_check(checks):
__check_packages(['click', 'colorama', 'progressbar', 'Fabric3'])
step1 = checks.get('step1', 0)
step2 = checks.get('step2', 0)
err_info = ''
if step1 <= 0:
err_info = '开始步骤必须大于0'
elif step2 <= 0:
err_info = '结束步骤必须大于0'
elif step1 > step2:
err_info = '开始步骤必须小于等于结束步骤'
if len(err_info) > 0:
click.secho(err_info, fg='red')
sys.exit()
def __sy_handler(params):
__sy_check(params)
step1 = params.get('step1', 0)
step2 = params.get('step2', 0)
init = params.get('init', '-1')
title = params.get('title', '')
command = params.get('command', '')
if init == '0':
click.secho(f"开始安装{title} 开始步骤:{step1} 结束步骤:{step2} 不初始化系统", fg='green')
else:
click.secho(f"开始安装{title} 开始步骤:{step1} 结束步骤:{step2} 初始化系统", fg='green')
os.system(command)
@main.command()
@click.option('--step1', default=1, type=int, help="开始步骤")
@click.option('--step2', default=9999, type=int, help="结束步骤")
@click.option('--init', default='1', type=click.Choice(['0', '1']), help="系统初始化标识 0:不初始化 1:初始化")
@click.option('--env', required=True, type=click.Choice(['dev', 'product']), help="环境类型 dev:开发环境 product:生产环境")
def sy_front(step1, step2, init, env):
"""安装前端环境"""
params = {
'step1': step1,
'step2': step2,
'init': init,
'env': env,
'title': '前端环境',
'command': commandInstallPrefix + 'sy_front:step1=' + str(step1) + ',step2=' + str(step2) + ',init=' + init + ',env=' + env
}
__sy_handler(params)
@main.command()
@click.option('--step1', default=1, type=int, help="开始步骤")
@click.option('--step2', default=9999, type=int, help="结束步骤")
@click.option('--init', default='1', type=click.Choice(['0', '1']), help="系统初始化标识 0:不初始化 1:初始化")
@click.option('--env', required=True, type=click.Choice(['dev', 'product']), help="环境类型 dev:开发环境 product:生产环境")
def sy_backend(step1, step2, init, env):
"""安装后端环境"""
params = {
'step1': step1,
'step2': step2,
'init': init,
'env': env,
'title': '后端环境',
'command': commandInstallPrefix + 'sy_backend:step1=' + str(step1) + ',step2=' + str(step2) + ',init=' + init + ',env=' + env
}
__sy_handler(params)
@main.command()
@click.option('--step1', default=1, type=int, help="开始步骤")
@click.option('--step2', default=9999, type=int, help="结束步骤")
@click.option('--init', default='1', type=click.Choice(['0', '1']), help="系统初始化标识 0:不初始化 1:初始化")
@click.option('--env', required=True, type=click.Choice(['dev', 'product']), help="环境类型 dev:开发环境 product:生产环境")
def sy_fb(step1, step2, init, env):
"""安装前后端混合环境"""
params = {
'step1': step1,
'step2': step2,
'init': init,
'env': env,
'title': '前后端混合环境',
'command': commandInstallPrefix + 'sy_fb:step1=' + str(step1) + ',step2=' + str(step2) + ',init=' + init + ',env=' + env
}
__sy_handler(params)
if __name__ == '__main__':
main()
```
#### File: jinstall/centos7/swooleyaf.py
```python
from jinstall.centos7.envs.Front import *
from jinstall.centos7.envs.Backend import *
from jinstall.centos7.envs.FrontBackend import *
sy_dicts = {
'front': {
'role': 'syfront'
},
'backend': {
'role': 'sybackend'
},
'frontbackend': {
'role': 'syfb'
}
}
@roles(sy_dicts['front']['role'])
def env_front(params):
obj = Front()
obj.install(params)
@roles(sy_dicts['backend']['role'])
def env_backend(params):
obj = Backend()
obj.install(params)
@roles(sy_dicts['frontbackend']['role'])
def env_front_backend(params):
obj = FrontBackend()
obj.install(params)
sy_dicts['front']['func'] = env_front
sy_dicts['backend']['func'] = env_backend
sy_dicts['frontbackend']['func'] = env_front_backend
def sy_dict_get(tag):
sy_dict = sy_dicts.get(tag, '')
if not isinstance(sy_dict, dict):
print('环境标识不存在')
sys.exit()
return sy_dict
```
#### File: centos7/tools/SyCache.py
```python
from jinstall.centos7.utils.Tool import *
class SyCache:
@staticmethod
def install_redis(params: dict):
"""
安装缓存redis
注: 默认账号:default 密码:<PASSWORD>
"""
Tool.check_local_files([
'resources/cache/redis/redis-6.2.0.tar.gz',
'resources/cache/redis/redisbloom.so',
'resources/cache/redis/redisearch.so',
'resources/cache/redis/redisgears.so',
'resources/cache/redis/redisgraph.so',
'resources/cache/redis/redistimeseries.so',
'resources/cache/redis/redisgears-dependencies.linux-centos7-x64.1.0.5.tar',
'configs/swooleyaf/redis/redis',
'configs/swooleyaf/redis/redis.conf',
'configs/swooleyaf/redis/users.acl',
])
Tool.upload_file_fabric({
'/resources/cache/redis/redis-6.2.0.tar.gz': 'remote/redis-6.2.0.tar.gz',
'/resources/cache/redis/redisbloom.so': 'remote/redisbloom.so',
'/resources/cache/redis/redisearch.so': 'remote/redisearch.so',
'/resources/cache/redis/redisgears.so': 'remote/redisgears.so',
'/resources/cache/redis/redisgraph.so': 'remote/redisgraph.so',
'/resources/cache/redis/redistimeseries.so': 'remote/redistimeseries.so',
'/resources/cache/redis/redisgears-dependencies.linux-centos7-x64.1.0.5.tar': 'remote/redisgears-dependencies.linux-centos7-x64.1.0.5.tar',
})
with cd(install_configs['path.package.remote']):
run('mkdir %s' % install_configs['redis.path.install'])
run('mkdir %s/modules' % install_configs['redis.path.install'])
run('mkdir %s' % install_configs['redis.path.log'])
run('mkdir /etc/redis')
run('touch %s/redis.log' % install_configs['redis.path.log'])
run('chmod a+x redisbloom.so && mv redisbloom.so %s/modules/' % install_configs['redis.path.install'])
run('chmod a+x redisearch.so && mv redisearch.so %s/modules/' % install_configs['redis.path.install'])
run('chmod a+x redisgears.so && mv redisgears.so %s/modules/' % install_configs['redis.path.install'])
run('chmod a+x redisgraph.so && mv redisgraph.so %s/modules/' % install_configs['redis.path.install'])
run('chmod a+x redistimeseries.so && mv redistimeseries.so %s/modules/' % install_configs['redis.path.install'])
run('mkdir -p /var/opt/redislabs/modules/rg')
run('tar --warning=no-timestamp -xf redisgears-dependencies.linux-centos7-x64.1.0.5.tar')
run('mv python3_1.0.5/ /var/opt/redislabs/modules/rg/')
run('rm -rf redisgears-dependencies.linux-centos7-x64.1.0.5.tar')
run('tar -zxf redis-6.2.0.tar.gz')
run('cd redis-6.2.0/ && make && make PREFIX=/usr/local/redis install')
run('rm -rf redis-6.2.0/ && rm -rf redis-6.2.0.tar.gz')
redis_service_remote = '/etc/init.d/redis'
Tool.upload_file_fabric({
'/configs/swooleyaf/redis/redis': redis_service_remote,
})
run('sed -i "6iREDISPORT=%s" %s' % (install_configs['redis.port'], redis_service_remote), False)
run('chmod +x %s' % redis_service_remote)
redis_acl_remote = ''.join([install_configs['redis.path.install'], '/users.acl'])
redis_conf_remote = ''.join(['/etc/redis/', install_configs['redis.port'], '.conf'])
Tool.upload_file_fabric({
'/configs/swooleyaf/redis/users.acl': redis_acl_remote,
'/configs/swooleyaf/redis/redis.conf': redis_conf_remote,
})
run('echo -e "aclfile %s" >> %s' % (redis_acl_remote, redis_conf_remote), False)
run('echo -e "bind 127.0.0.1 %s" >> %s' % (env.host, redis_conf_remote), False)
run('echo -e "pidfile /var/run/redis_%s.pid" >> %s' % (install_configs['redis.port'], redis_conf_remote), False)
run('echo -e "port %s" >> %s' % (install_configs['redis.port'], redis_conf_remote), False)
run('echo -e "logfile \"%s/redis.log\"" >> %s' % (install_configs['redis.path.log'], redis_conf_remote), False)
run('echo -e "dir %s" >> %s' % (install_configs['redis.path.log'], redis_conf_remote), False)
run('echo -e "loadmodule %s/modules/redisbloom.so" >> %s' % (install_configs['redis.path.install'], redis_conf_remote), False)
run('echo -e "loadmodule %s/modules/redisearch.so" >> %s' % (install_configs['redis.path.install'], redis_conf_remote), False)
run('echo -e "loadmodule %s/modules/redisgears.so" >> %s' % (install_configs['redis.path.install'], redis_conf_remote), False)
run('echo -e "loadmodule %s/modules/redisgraph.so" >> %s' % (install_configs['redis.path.install'], redis_conf_remote), False)
run('echo -e "loadmodule %s/modules/redistimeseries.so" >> %s' % (install_configs['redis.path.install'], redis_conf_remote), False)
run('systemctl daemon-reload')
run('chkconfig redis on')
@staticmethod
def install_redis_insight(params: dict):
"""安装redis客户端工具-RedisInsight"""
Tool.check_local_files([
'resources/cache/redis/redisinsight-linux64-1.6.3',
])
with cd(install_configs['path.package.remote']):
run('mkdir %s' % install_configs['redis.insight.path.install'])
run('mkdir %s/data' % install_configs['redis.insight.path.install'])
run('mkdir %s/bin' % install_configs['redis.insight.path.install'])
run('mkdir %s' % install_configs['redis.insight.path.log'])
run('echo "export REDISINSIGHT_HOST=%s" >> /etc/profile' % install_configs['redis.insight.host'], False)
run('echo "export REDISINSIGHT_PORT=%s" >> /etc/profile' % install_configs['redis.insight.port'], False)
run('echo "export REDISINSIGHT_HOME_DIR=%s/data" >> /etc/profile' % install_configs['redis.insight.path.install'], False)
run('echo "export LOG_DIR=%s" >> /etc/profile' % install_configs['redis.insight.path.log'], False)
redis_insight_remote = ''.join([install_configs['redis.insight.path.install'], '/bin/redisinsight'])
Tool.upload_file_fabric({
'/resources/cache/redis/redisinsight-linux64-1.6.3': redis_insight_remote,
})
run('chmod +x %s' % redis_insight_remote)
@staticmethod
def install_memcache_server(params: dict):
"""安装缓存memcache"""
Tool.check_local_files([
'resources/cache/memcache/memcached-1.5.12.tar.gz',
])
Tool.upload_file_fabric({
'/resources/cache/memcache/memcached-1.5.12.tar.gz': 'remote/memcached-1.5.12.tar.gz',
})
with cd(install_configs['path.package.remote']):
run('yum -y install libevent libevent-devel')
run('mkdir %s' % install_configs['memcached.path.install'])
run('tar -zxf memcached-1.5.12.tar.gz')
run('cd memcached-1.5.12/ && ./configure --prefix=%s && make && make install' % install_configs['memcached.path.install'])
run('rm -rf memcached-1.5.12/ && rm -rf memcached-1.5.12.tar.gz')
@staticmethod
def install_memcache_lib(params: dict):
"""安装缓存memcache依赖"""
Tool.check_local_files([
'resources/cache/memcache/libmemcached-1.0.18.tar.gz',
])
Tool.upload_file_fabric({
'/resources/cache/memcache/libmemcached-1.0.18.tar.gz': 'remote/libmemcached-1.0.18.tar.gz',
})
with cd(install_configs['path.package.remote']):
run('mkdir %s' % install_configs['libmemcached.path.install'])
run('tar -zxf libmemcached-1.0.18.tar.gz')
run('cd libmemcached-1.0.18/ && ./configure --prefix=%s --with-memcached && make && make install' % install_configs['libmemcached.path.install'])
run('rm -rf libmemcached-1.0.18/ && rm -rf libmemcached-1.0.18.tar.gz')
```
#### File: centos7/tools/SyJs.py
```python
from jinstall.centos7.utils.Tool import *
class SyJs:
@staticmethod
def install_nodejs(params: dict):
"""安装nodejs"""
Tool.check_local_files([
'resources/lang/js/node-v14.15.4-linux-x64.tar.xz',
])
Tool.upload_file_fabric({
'/resources/lang/js/node-v14.15.4-linux-x64.tar.xz': 'remote/node-v14.15.4-linux-x64.tar.xz',
})
with settings(warn_only=True):
run('mkdir %s && mkdir %s' % (install_configs['node.path.log'], install_configs['node.forever.path.log']))
with cd(install_configs['path.package.remote']):
run('tar -xJf node-v14.15.4-linux-x64.tar.xz')
run('mv node-v14.15.4-linux-x64/ /usr/local/nodejs')
run('rm -rf node-v14.15.4-linux-x64.tar.xz')
run('npm install -g nodemon forever && npm install -g cnpm --registry=https://registry.npm.taobao.org')
run('npm config set registry https://registry.npm.taobao.org --global')
run('npm config set disturl https://npm.taobao.org/dist --global')
```
#### File: centos7/tools/SyMonitor.py
```python
from jinstall.centos7.utils.Tool import *
class SyMonitor:
@staticmethod
def install_netdata(params: dict):
"""安装netdata"""
Tool.check_local_files([
'resources/monitor/netdata/kickstart-static64.sh',
'resources/monitor/netdata/netdata-v1.29.3.gz.run',
'resources/monitor/netdata/sha256sums.txt',
])
Tool.upload_file_fabric({
'/resources/monitor/netdata/kickstart-static64.sh': 'remote/kickstart-static64.sh',
'/resources/monitor/netdata/netdata-v1.29.3.gz.run': 'remote/netdata-v1.29.3.gz.run',
'/resources/monitor/netdata/sha256sums.txt': 'remote/sha256sums.txt',
})
with cd(install_configs['path.package.remote']):
run('chmod a+x kickstart-static64.sh && mv kickstart-static64.sh /tmp/')
run('mv netdata-v1.29.3.gz.run /tmp/')
run('mv sha256sums.txt /tmp/')
# 弹出确认框选YES即可
run('cd /tmp && bash /tmp/kickstart-static64.sh --local-files /tmp/netdata-v1.29.3.gz.run /tmp/sha256sums.txt')
run('rm -rf /tmp/kickstart-static64.sh')
run('rm -rf /tmp/netdata-v1.29.3.gz.run')
run('rm -rf /tmp/sha256sums.txt')
run('echo 1 >/sys/kernel/mm/ksm/run')
run('echo 1000 >/sys/kernel/mm/ksm/sleep_millisecs')
run('systemctl start netdata')
```
#### File: centos7/tools/SyVersionControl.py
```python
from jinstall.centos7.utils.Tool import *
class SyVersionControl:
@staticmethod
def install_git(params: dict):
"""安装git"""
Tool.check_local_files([
'resources/version-control/git/git-2.25.1.tar.gz',
'resources/version-control/git/git-lfs',
])
run('yum -y remove git')
with settings(warn_only=True):
run('mkdir /usr/local/git')
Tool.upload_file_fabric({
'/resources/version-control/git/git-2.25.1.tar.gz': 'remote/git-2.25.1.tar.gz',
})
with cd(install_configs['path.package.remote']):
run('tar -zxf git-2.25.1.tar.gz')
run('cd git-2.25.1/ && ./configure --prefix=/usr/local/git && make all && make install && cd ../ && rm -rf git-2.25.1/ && rm -rf git-2.25.1.tar.gz')
run('git config --global user.name "%s"' % install_configs['git.user.name'])
run('git config --global user.email "%s"' % install_configs['git.user.email'])
Tool.upload_file_fabric({
'/resources/version-control/git/git-lfs': '/usr/local/git/bin',
})
run('chmod a+x /usr/local/git/bin/git-lfs')
@staticmethod
def install_gitea(params: dict):
"""安装gitea"""
Tool.check_local_files([
'resources/version-control/git/gitea-1.12.4-linux-amd64',
])
run('mkdir %s' % install_configs['gitea.path.install'])
Tool.upload_file_fabric({
'/resources/version-control/git/gitea-1.12.4-linux-amd64': ''.join([install_configs['gitea.path.install'], '/gitea']),
})
run('chmod a+x %s/gitea' % install_configs['gitea.path.install'])
service_remote_gitea = '/lib/systemd/system/gitea.service'
run('touch %s' % service_remote_gitea, False)
run('echo -e "[Unit]" >> %s' % (service_remote_gitea), False)
run('echo -e "Description=gitea" >> %s' % (service_remote_gitea), False)
run('echo -e "[Service]" >> %s' % (service_remote_gitea), False)
run('echo -e "User=root" >> %s' % (service_remote_gitea), False)
run('echo -e "ExecStart=%s/gitea web >%s/log/console.log 2>&1" >> %s' % (install_configs['gitea.path.install'], install_configs['gitea.path.install'], service_remote_gitea), False)
run('echo -e "Restart=on-abort" >> %s' % (service_remote_gitea), False)
run('echo -e "[Install]" >> %s' % (service_remote_gitea), False)
run('echo -e "WantedBy=multi-user.target" >> %s' % (service_remote_gitea), False)
run('chmod 754 %s' % service_remote_gitea)
run('systemctl enable gitea')
```
#### File: jinstall/constants/Project.py
```python
from jinstall.constants.Base import Base
class Project(Base):
def __init__(self):
super(Project, self).__init__()
``` |
{
"source": "a07458666/StockCrawlerSendSlack",
"score": 3
} |
#### File: StockCrawlerSendSlack/dags/crawl.py
```python
import os
import json
import csv
import time
from datetime import date
import requests
class CrawlerController(object):
'''Split targets into several Crawler, avoid request url too long'''
def __init__(self, targets, max_stock_per_crawler=50):
self.crawlers = []
for index in range(0, len(targets), max_stock_per_crawler):
crawler = Crawler(targets[index:index + max_stock_per_crawler])
self.crawlers.append(crawler)
def run(self):
data = []
for crawler in self.crawlers:
data.extend(crawler.get_data())
return data
class Crawler(object):
'''Request to Market Information System'''
def __init__(self, targets):
endpoint = 'http://mis.twse.com.tw/stock/api/getStockInfo.jsp'
# Add 1000 seconds for prevent time inaccuracy
timestamp = int(time.time() * 1000 + 1000000)
channels = '|'.join('tse_{}.tw'.format(target) for target in targets)
self.query_url = '{}?_={}&ex_ch={}'.format(endpoint, timestamp, channels)
def get_data(self):
try:
# Get original page to get session
req = requests.session()
req.get('http://mis.twse.com.tw/stock/index.jsp',
headers={'Accept-Language': 'zh-TW'})
response = req.get(self.query_url)
content = json.loads(response.text)
except Exception as err:
print(err)
data = []
else:
data = content['msgArray']
return data
class Recorder(object):
'''Record data to csv'''
def __init__(self, path='data'):
self.folder_path = '{}/{}'.format(path, date.today().strftime('%Y%m%d'))
if not os.path.isdir(self.folder_path):
os.mkdir(self.folder_path)
def record_to_csv(self, data):
#print('data ', data)
for row in data:
try:
file_path = '{}/{}.csv'.format(self.folder_path, row['c'])
with open(file_path, 'a') as output_file:
writer = csv.writer(output_file, delimiter=',')
writer.writerow([
row['t'],# 資料時間
row['n'],# 名子
row['z'],# 最近成交價
row['y'] # 昨收價
# row['tv'],# 當盤成交量
# row['v'],# 當日累計成交量
# row['a'],# 最佳五檔賣出價格
# row['f'],# 最價五檔賣出數量
# row['b'],# 最佳五檔買入價格
# row['g']# 最佳五檔買入數量
])
except Exception as err:
print(err)
def RunCrawl():
print('os.path ', os.getcwd())
targets = [_.strip() for _ in open('stocknumber.csv', 'r')]
controller = CrawlerController(targets)
data = controller.run()
recorder = Recorder()
recorder.record_to_csv(data)
if __name__ == '__main__':
RunCrawl()
```
#### File: StockCrawlerSendSlack/dags/stock_app.py
```python
import os
import time
import json
import logging
from datetime import datetime, timedelta
from selenium import webdriver
from airflow import DAG
from airflow.operators.python_operator import PythonOperator, BranchPythonOperator
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.latest_only_operator import LatestOnlyOperator
import pycurl
import requests
import json
from selenium.webdriver.chrome.options import Options
from crawl import RunCrawl
from datetime import date
from glob import glob
import pandas as pd
import ntpath
#local_tz = pendulum.timezone("Asia/Taipei")
default_args = {
'owner': 'AndySu',
'start_date': datetime(2020, 7, 26, 9, 5),
'retries': 2,
'retry_delay': timedelta(minutes=1)
}
def get_slack_url():
file_dir = os.path.dirname(__file__)
json_path = os.path.join(file_dir, '../data/slack.json')
with open(json_path, 'r') as fp:
url = json.load(fp)['url']
return url
def get_stock_history(**context):
RunCrawl()
def readCsv(path):
df = pd.read_csv(path)
num = ntpath.basename(path)[:-3]
index = df.shape[0] - 1
dawnRaid = calculationDawnRaid(df.values[index][2], df.values[index][3])
return str(df.values[index][1]) + ' , ' + str(df.values[index][2]) + ', ' + str(df.values[index][3]) + ' , ' + str((dawnRaid)) + '%\n'
def calculationDawnRaid(nowPrice, yesterdayPrice):
if (nowPrice == '-' or yesterdayPrice == '-'):
return '-'
nowPrice = float(n)
yesterdayPrice = float(yesterdayPrice)
value = (nowPrice - yesterdayPrice) / yesterdayPrice * 100
return round(value, 2)
def get_message_text():
file_dir = os.path.dirname(__file__)
message_path = os.path.join(file_dir, '../data')
files_dir = '{}/{}/{}'.format(message_path, date.today().strftime('%Y%m%d'), '*')
filesList = glob(files_dir)
message = ""
for path in filesList:
message += readCsv(path)
return message
def send_notification(**context):
send_msg(get_message_text())
return
def send_msg(msg):
# HTTP POST Request
s_url = get_slack_url()
dict_headers = {'Content-type': 'application/json'}
dict_payload = {
"text": msg}
json_payload = json.dumps(dict_payload)
rtn = requests.post(s_url, data=json_payload, headers=dict_headers)
print(rtn.text)
#with DAG('stock_app', default_args=default_args, schedule_interval = '*/20 9-13 * * *') as dag:
with DAG('stock_app', default_args=default_args, schedule_interval = '*/1 * * * *') as dag:
# define tasks
#latest_only = LatestOnlyOperator(task_id='latest_only')
get_stock_history = PythonOperator(
task_id='get_stock_history',
python_callable=get_stock_history,
provide_context=True
)
send_notification = PythonOperator(
task_id='send_notification',
python_callable=send_notification,
provide_context=True
)
# define workflow
get_stock_history >> send_notification
``` |
{
"source": "a07458666/SwinSR",
"score": 2
} |
#### File: a07458666/SwinSR/main_test_swinir.py
```python
import argparse
import cv2
import glob
import numpy as np
from collections import OrderedDict
import os
import torch
import requests
from models.network_swinir import SwinIR as net
from utils import utils_image as util
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--task",
type=str,
default="color_dn",
help="classical_sr",
)
parser.add_argument(
"--scale",
type=int,
default=1, help="scale factor: 1, 2, 3, 4, 8"
) # 1 for dn and jpeg car
parser.add_argument(
"--noise", type=int, default=15, help="noise level: 15, 25, 50")
parser.add_argument(
"--jpeg", type=int, default=40, help="scale factor: 10, 20, 30, 40"
)
parser.add_argument(
"--training_patch_size",
type=int,
default=128,
help="patch size used in training SwinIR. ",
)
parser.add_argument(
"--large_model",
action="store_true",
help="use large model, only provided for real image sr",
)
parser.add_argument(
"--model_path",
type=str,
default="",
)
parser.add_argument(
"--folder_lq",
type=str,
default=None,
help="input low-quality test image folder",
)
parser.add_argument(
"--folder_gt",
type=str,
default=None,
help="input ground-truth test image folder",
)
parser.add_argument(
"--tile",
type=int,
default=None,
help="Tile size, None for no tile during testing (testing as a whole)",
)
parser.add_argument(
"--tile_overlap", type=int, default=32, help=""
)
args = parser.parse_args()
if args.folder_gt is None:
args.folder_gt = args.folder_lq
device = torch.device("cuda:1" if torch.cuda.is_available() else "cpu")
# set up model
if os.path.exists(args.model_path):
print(f"loading model from {args.model_path}")
else:
os.makedirs(os.path.dirname(args.model_path), exist_ok=True)
gitHub = "https://github.com/"
nameStr = "JingyunLiang/SwinIR/"
url = gitHub + nameStr + "releases/download/v0.0/{}".format(
os.path.basename(args.model_path)
)
r = requests.get(url, allow_redirects=True)
print(f"downloading model {args.model_path}")
open(args.model_path, "wb").write(r.content)
model = define_model(args)
model.eval()
model = model.to(device)
# setup folder and path
folder, save_dir, border, window_size = setup(args)
os.makedirs(save_dir, exist_ok=True)
test_results = OrderedDict()
test_results["psnr"] = []
test_results["ssim"] = []
test_results["psnr_y"] = []
test_results["ssim_y"] = []
test_results["psnr_b"] = []
psnr, ssim, psnr_y, ssim_y, psnr_b = 0, 0, 0, 0, 0
for idx, path in enumerate(sorted(glob.glob(os.path.join(folder, "*")))):
# read image
imgname, img_lq, img_gt = get_image_pair(
args, path
) # image to HWC-BGR, float32
tmp = (2, 0, 1)
img_lq = np.transpose(
img_lq if img_lq.shape[2] == 1 else img_lq[:, :, [2, 1, 0]], tmp
) # HCW-BGR to CHW-RGB
img_lq = (
torch.from_numpy(img_lq).float().unsqueeze(0).to(device)
) # CHW-RGB to NCHW-RGB
if args.folder_lq == args.folder_gt:
img_gt = None
# inference
with torch.no_grad():
# pad input image to be a multiple of window_size
_, _, h_old, w_old = img_lq.size()
h_pad = (h_old // window_size + 1) * window_size - h_old
w_pad = (w_old // window_size + 1) * window_size - w_old
img_lq = torch.cat([img_lq, torch.flip(img_lq, [2])], 2)[
:, :, : h_old + h_pad, :
]
img_lq = torch.cat([img_lq, torch.flip(img_lq, [3])], 3)[
:, :, :, : w_old + w_pad
]
output = test(img_lq, model, args, window_size)
output = output[..., : h_old * args.scale, : w_old * args.scale]
# save image
output = output.data.squeeze().float().cpu().clamp_(0, 1).numpy()
if output.ndim == 3:
output = np.transpose(
output[[2, 1, 0], :, :], (1, 2, 0)
) # CHW-RGB to HCW-BGR
output = (output * 255.0).round().astype(np.uint8) # float32 to uint8
cv2.imwrite(f"{save_dir}/{imgname}_pred.png", output)
# evaluate psnr/ssim/psnr_b
if img_gt is not None:
# float32 to uint8
img_gt = (img_gt * 255.0).round().astype(np.uint8)
# crop gt
img_gt = img_gt[: h_old * args.scale, : w_old * args.scale, ...]
img_gt = np.squeeze(img_gt)
psnr = util.calculate_psnr(output, img_gt, border=border)
ssim = util.calculate_ssim(output, img_gt, border=border)
test_results["psnr"].append(psnr)
test_results["ssim"].append(ssim)
if img_gt.ndim == 3: # RGB image
output_y = util.bgr2ycbcr(output.astype(np.float32) / 255.0)
output_y *= 255.0
img_gt_y = util.bgr2ycbcr(img_gt.astype(np.float32) / 255.0)
img_gt_y *= 255.0
psnr_y = util.calculate_psnr(output_y, img_gt_y, border=border)
ssim_y = util.calculate_ssim(output_y, img_gt_y, border=border)
test_results["psnr_y"].append(psnr_y)
test_results["ssim_y"].append(ssim_y)
if args.task in ["jpeg_car"]:
psnr_b = util.calculate_psnrb(output, img_gt, border=border)
test_results["psnr_b"].append(psnr_b)
print(
"Testing {:d} {:20s} - PSNR: {:.2f} dB; SSIM: {:.4f}; "
"PSNR_Y: {:.2f} dB; SSIM_Y: {:.4f}; "
"PSNR_B: {:.2f} dB.".format(
idx, imgname, psnr, ssim, psnr_y, ssim_y, psnr_b
)
)
else:
print("Testing {:d} {:20s}".format(idx, imgname))
# summarize psnr/ssim
tr = test_results
if img_gt is not None:
ave_psnr = sum(test_results["psnr"]) / len(test_results["psnr"])
ave_ssim = sum(test_results["ssim"]) / len(test_results["ssim"])
print(
"\n{} \n-- Average PSNR/SSIM(RGB): {:.2f} dB; {:.4f}".format(
save_dir, ave_psnr, ave_ssim
)
)
if img_gt.ndim == 3:
ave_psnr_y = sum(tr["psnr_y"]) / len(tr["psnr_y"])
ave_ssim_y = sum(tr["ssim_y"]) / len(tr["ssim_y"])
print(
"-- Average PSNR_Y/SSIM_Y: {:.2f} dB; {:.4f}".format(
ave_psnr_y, ave_ssim_y
)
)
if args.task in ["jpeg_car"]:
ave_psnr_b = sum(tr["psnr_b"]) / len(tr["psnr_b"])
print("-- Average PSNR_B: {:.2f} dB".format(ave_psnr_b))
def define_model(args):
# 001 classical image sr
if args.task == "classical_sr":
model = net(
upscale=args.scale,
in_chans=3,
img_size=48,
window_size=8,
img_range=1.0,
depths=[6, 6, 6, 6, 6, 6],
embed_dim=180,
num_heads=[6, 6, 6, 6, 6, 6],
mlp_ratio=2,
upsampler="pixelshuffle",
resi_connection="3conv",
)
param_key_g = "params"
# 002 lightweight image sr
# use 'pixelshuffledirect' to save parameters
elif args.task == "lightweight_sr":
model = net(
upscale=args.scale,
in_chans=3,
img_size=64,
window_size=8,
img_range=1.0,
depths=[6, 6, 6, 6],
embed_dim=60,
num_heads=[6, 6, 6, 6],
mlp_ratio=2,
upsampler="pixelshuffledirect",
resi_connection="1conv",
)
param_key_g = "params"
# 003 real-world image sr
elif args.task == "real_sr":
if not args.large_model:
# use 'nearest+conv' to avoid block artifacts
model = net(
upscale=4,
in_chans=3,
img_size=64,
window_size=8,
img_range=1.0,
depths=[6, 6, 6, 6, 6, 6],
embed_dim=180,
num_heads=[6, 6, 6, 6, 6, 6],
mlp_ratio=2,
upsampler="nearest+conv",
resi_connection="1conv",
)
else:
# larger model size; use '3conv' to
# save parameters and memory;
# use ema for GAN training
model = net(
upscale=4,
in_chans=3,
img_size=64,
window_size=8,
img_range=1.0,
depths=[6, 6, 6, 6, 6, 6, 6, 6, 6],
embed_dim=240,
num_heads=[8, 8, 8, 8, 8, 8, 8, 8, 8],
mlp_ratio=2,
upsampler="nearest+conv",
resi_connection="3conv",
)
param_key_g = "params_ema"
# 004 grayscale image denoising
elif args.task == "gray_dn":
model = net(
upscale=1,
in_chans=1,
img_size=128,
window_size=8,
img_range=1.0,
depths=[6, 6, 6, 6, 6, 6],
embed_dim=180,
num_heads=[6, 6, 6, 6, 6, 6],
mlp_ratio=2,
upsampler="",
resi_connection="1conv",
)
param_key_g = "params"
# 005 color image denoising
elif args.task == "color_dn":
model = net(
upscale=1,
in_chans=3,
img_size=128,
window_size=8,
img_range=1.0,
depths=[6, 6, 6, 6, 6, 6],
embed_dim=180,
num_heads=[6, 6, 6, 6, 6, 6],
mlp_ratio=2,
upsampler="",
resi_connection="1conv",
)
param_key_g = "params"
# 006 JPEG compression artifact reduction
# use window_size=7 because JPEG encoding
# uses 8x8; use img_range=255
# because it's sligtly better than 1
elif args.task == "jpeg_car":
model = net(
upscale=1,
in_chans=1,
img_size=126,
window_size=7,
img_range=255.0,
depths=[6, 6, 6, 6, 6, 6],
embed_dim=180,
num_heads=[6, 6, 6, 6, 6, 6],
mlp_ratio=2,
upsampler="",
resi_connection="1conv",
)
param_key_g = "params"
pretrained_model = torch.load(args.model_path)
model.load_state_dict(
pretrained_model[param_key_g]
if param_key_g in pretrained_model.keys()
else pretrained_model,
strict=True,
)
return model
def setup(args):
# 001 classical image sr/ 002 lightweight image sr
if args.task in ["classical_sr", "lightweight_sr"]:
save_dir = f"results/swinir_{args.task}_x{args.scale}"
folder = args.folder_gt
border = args.scale
window_size = 8
# 003 real-world image sr
elif args.task in ["real_sr"]:
save_dir = f"results/swinir_{args.task}_x{args.scale}"
if args.large_model:
save_dir += "_large"
folder = args.folder_lq
border = 0
window_size = 8
# 004 grayscale image denoising/ 005 color image denoising
elif args.task in ["gray_dn", "color_dn"]:
save_dir = f"results/swinir_{args.task}_noise{args.noise}"
folder = args.folder_gt
border = 0
window_size = 8
# 006 JPEG compression artifact reduction
elif args.task in ["jpeg_car"]:
save_dir = f"results/swinir_{args.task}_jpeg{args.jpeg}"
folder = args.folder_gt
border = 0
window_size = 7
return folder, save_dir, border, window_size
def get_image_pair(args, path):
(imgname, imgext) = os.path.splitext(os.path.basename(path))
# 001 classical image sr/ 002 lightweight image sr
# (load lq-gt image pairs)
if args.task in ["classical_sr", "lightweight_sr"]:
img_gt = cv2.imread(path, cv2.IMREAD_COLOR).astype(np.float32) / 255.0
cv_int = cv2.IMREAD_COLOR
img_lq = (
cv2.imread(f"{args.folder_lq}/{imgname}{imgext}", cv_int).astype(
np.float32
)
/ 255.0
)
# 003 real-world image sr (load lq image only)
elif args.task in ["real_sr"]:
img_gt = None
img_lq = cv2.imread(path, cv2.IMREAD_COLOR).astype(np.float32)
img_lq /= 255.0
# 004 grayscale image denoising
# (load gt image and generate lq image on-the-fly)
elif args.task in ["gray_dn"]:
img_gt = cv2.imread(path, cv2.IMREAD_GRAYSCALE).astype(np.float32)
img_gt /= 255.0
np.random.seed(seed=0)
img_lq = img_gt + np.random.normal(0, args.noise / 255.0, img_gt.shape)
img_gt = np.expand_dims(img_gt, axis=2)
img_lq = np.expand_dims(img_lq, axis=2)
# 005 color image denoising
# (load gt image and generate lq image on-the-fly)
elif args.task in ["color_dn"]:
img_gt = cv2.imread(path, cv2.IMREAD_COLOR).astype(np.float32) / 255.0
np.random.seed(seed=0)
img_lq = img_gt + np.random.normal(0, args.noise / 255.0, img_gt.shape)
# 006 JPEG compression artifact reduction
# (load gt image and generate lq image on-the-fly)
elif args.task in ["jpeg_car"]:
img_gt = cv2.imread(path, 0)
result, encimg = cv2.imencode(
".jpg", img_gt, [int(cv2.IMWRITE_JPEG_QUALITY), args.jpeg]
)
img_lq = cv2.imdecode(encimg, 0)
img_gt = np.expand_dims(img_gt, axis=2).astype(np.float32) / 255.0
img_lq = np.expand_dims(img_lq, axis=2).astype(np.float32) / 255.0
return imgname, img_lq, img_gt
def test(img_lq, model, args, window_size):
if args.tile is None:
# test the image as a whole
output = model(img_lq)
else:
# test the image tile by tile
b, c, h, w = img_lq.size()
tile = min(args.tile, h, w)
strMsg = "tile size should be a multiple of window_size"
assert tile % window_size == 0, strMsg
tile_overlap = args.tile_overlap
sf = args.scale
stride = tile - tile_overlap
h_idx_list = list(range(0, h - tile, stride)) + [h - tile]
w_idx_list = list(range(0, w - tile, stride)) + [w - tile]
E = torch.zeros(b, c, h * sf, w * sf).type_as(img_lq)
W = torch.zeros_like(E)
for h_idx in h_idx_list:
for w_idx in w_idx_list:
th = h_idx
in_patch = img_lq[..., th: h_idx + tile, w_idx: w_idx + tile]
out_patch = model(in_patch)
out_patch_mask = torch.ones_like(out_patch)
E[
...,
h_idx * sf: (h_idx + tile) * sf,
w_idx * sf: (w_idx + tile) * sf,
].add_(out_patch)
W[
...,
h_idx * sf: (h_idx + tile) * sf,
w_idx * sf: (w_idx + tile) * sf,
].add_(out_patch_mask)
output = E.div_(W)
return output
if __name__ == "__main__":
main()
``` |
{
"source": "a07458666/UncertaintyFlow",
"score": 2
} |
#### File: a07458666/UncertaintyFlow/cnfModel.py
```python
import pytorch_lightning as pl
import torch
import torch.nn as nn
import torch.nn.functional as F
from module.flow import cnf
class CNFModel(pl.LightningModule):
def __init__(self, learning_rate, batch_size):
super().__init__()
self.save_hyperparameters()
flow_modules = '8-8-8-8-8'
cond_size = 17
num_blocks = 1
inputDim = 1
self.flow = cnf(inputDim, flow_modules, cond_size, num_blocks)
def forward(self, x, context=None, logpx=None, integration_times=None):
y = self.flow(x, context, logpx, integration_times)
return y
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), lr=self.hparams.learning_rate)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1)
return [optimizer], [lr_scheduler]
def training_step(self, batch, batch_idx):
x, y = batch
approx21, delta_log_p2 = self.forward(x[0].squeeze(1), x[1], torch.zeros(self.hparams.batch_size, x[0].shape[2], 1).to(x[0]))
approx2 = standard_normal_logprob(approx21).view(self.hparams.batch_size, -1).sum(1, keepdim=True)
delta_log_p2 = delta_log_p2.view(self.hparams.batch_size, x[0].shape[2], 1).sum(1)
log_p2 = (approx2 - delta_log_p2)
loss = -log_p2.mean()
return loss
def validation_step(self, batch, batch_idx):
x, y = batch
approx21, delta_log_p2 = self.forward(x[0].squeeze(1), x[1], torch.zeros(self.hparams.batch_size, x[0].shape[2], 1).to(x[0]))
approx2 = standard_normal_logprob(approx21).view(self.hparams.batch_size, -1).sum(1, keepdim=True)
delta_log_p2 = delta_log_p2.view(self.hparams.batch_size, x[0].shape[2], 1).sum(1)
log_p2 = (approx2 - delta_log_p2)
val_loss = -log_p2.mean()
self.log("val_loss", val_loss)
@staticmethod
def standard_normal_logprob(z):
dim = z.size(-1)
log_z = -0.5 * dim * log(2 * pi)
return log_z - z.pow(2) / 2
```
#### File: module/condition_sampler/flow_sampler.py
```python
from audioop import reverse
import os
from math import log, pi
import torch
import numpy as np
from tqdm import tqdm
from torch import optim
from torch.utils import data
from scipy.stats import norm
from module.flow import build_model
class PModel:
@staticmethod
def logprob(z):
dim = z.size(-1)
log_z = -0.5 * dim * log(2 * pi)
return log_z - z.pow(2) / 2
@staticmethod
def prob(z):
return 1/(2*pi)**0.5 * np.exp(-((z*z)/2))
@staticmethod
def invcdf(q):
return norm.ppf(q)
@staticmethod
def sample(shape) -> np.ndarray:
return np.random.normal(0, 1, shape)
class FlowSampler:
def __init__(self, shape, flow_modules, num_blocks, gpu=0, pmodel=PModel) -> None:
self.pmodel = pmodel
self.shape = shape
self.gpu = gpu
input_dim = 1
for dim in self.shape:
input_dim *= dim
self.input_dim = input_dim
def cnf(input_dim, dims, num_blocks):
dims = tuple(map(int, dims.split("-")))
model = build_model(input_dim, dims, 1, num_blocks, True).cuda()
return model
self.prior = cnf(input_dim, flow_modules, num_blocks).cuda(self.gpu)
def fit(self, x, epoch=10, lr=1e-2, save_model=False, save_dir=None, batch=32) -> list:
self.prior.train()
class MyDataset(data.Dataset):
def __init__(self, x, transform=None):
self.x = x
self.transform = transform
def __getitem__(self, index):
x = self.x[index]
if self.transform is not None:
x = self.transform(x)
return x
def __len__(self):
return len(self.x)
my_dataset = MyDataset(x=torch.Tensor(x).cuda(self.gpu))
train_loader = data.DataLoader(my_dataset, shuffle=True, batch_size=batch)
optimizer = optim.Adam(self.prior.parameters(), lr=lr)
scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=epoch)
loss_list = []
for i in tqdm(range(epoch)):
for x in train_loader:
loss = - self.__logp(x).mean()
optimizer.zero_grad()
loss.backward()
optimizer.step()
loss_list.append(loss.item())
scheduler.step()
if save_model and save_dir is not None:
path = os.path.join(save_dir, 'sampler_' + str(i).zfill(2) + '.pt')
self.save(path)
if save_model and save_dir is not None:
path = os.path.join(save_dir, 'sampler_last.pt')
self.save(path)
return loss_list
def save(self, path) -> None:
torch.save(
self.prior.state_dict(),
path
)
def load(self, path) -> None:
self.prior.load_state_dict(torch.load(path))
def sample(self, n=1) -> torch.Tensor:
self.prior.eval()
with torch.no_grad():
z = self.pmodel.sample((n, self.input_dim))
z = torch.tensor(z).float().to(self.gpu)
x = self.prior(z, torch.zeros(n, 1, 1).to(z), reverse=True)
return x.view((-1,)+self.shape)
def logprob(self, x) -> torch.Tensor:
self.prior.eval()
with torch.no_grad():
return self.__logp(x)
def __logp(self, x) -> torch.Tensor:
x = x.view(x.size()[0], 1, -1)
# delta_p = torch.zeros(x.size()).to(x)
context = torch.zeros(x.size()[0], 1, 1).to(x)
delta_p = torch.zeros(x.shape[0], x.shape[1], 1).to(x)
# print("x : ", x.size())
# print("context : ", context.size())
# print("delta_p : ", delta_p.size())
z, delta_log_p = self.prior(x, context, delta_p)
log_p_z = self.pmodel.logprob(z).view(x.shape[0], -1).sum(1, keepdim=True)
delta_log_p = delta_log_p.view(x.shape[0], 1, -1).sum(1)
log_p_x = (log_p_z - delta_log_p)
return log_p_x
```
#### File: UncertaintyFlow/module/flow.py
```python
from .odefunc import ODEfunc, ODEnet
from .normalization import MovingBatchNorm1d
from .cnf import CNF, SequentialFlow
import torch.nn as nn
# def _dropoutCondition(X, condition_pose, drop_prob):
# X = X.float()
# assert 0 <= condition_pose < X.shape[0]
# assert 0 <= drop_prob <= 1
# keep_prob = 1. - drop_prob
# mask = torch.ones(X.shape).to(X)
# mask[condition_pose] = (torch.randn(1) < keep_prob).float()
# return X * mask * (torch.tensor(X.shape[0]).to(X) / torch.sum(mask))
# class dropoutCondition(nn.Module):
# def __init__(self, condition_pose, drop_prob):
# super().__init__()
# assert 0 <= drop_prob <= 1
# self.condition_pose = condition_pose
# self.keep_prob = 1. - drop_prob
# def forward(self, x):
# assert 0 <= self.condition_pose < x.shape[0]
# x = x.float()
# mask = torch.ones(x.shape).to(x)
# mask[self.condition_pose] = (torch.randn(1) < self.keep_prob).float()
# return x * mask * (torch.tensor(X.shape[0]).to(X) / torch.sum(mask))
def count_nfe(model):
class AccNumEvals(object):
def __init__(self):
self.num_evals = 0
def __call__(self, module):
if isinstance(module, CNF):
self.num_evals += module.num_evals()
accumulator = AccNumEvals()
model.apply(accumulator)
return accumulator.num_evals
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def count_total_time(model):
class Accumulator(object):
def __init__(self):
self.total_time = 0
def __call__(self, module):
if isinstance(module, CNF):
self.total_time = self.total_time + module.sqrt_end_time * module.sqrt_end_time
accumulator = Accumulator()
model.apply(accumulator)
return accumulator.total_time
def build_model( input_dim, hidden_dims, context_dim, num_blocks, conditional, context_encode_dim = 0):
def build_cnf():
diffeq = ODEnet(
hidden_dims=hidden_dims,
input_shape=(input_dim,),
context_dim=context_dim,
layer_type='concatsquash',
nonlinearity='tanh',
context_encode_dim = context_encode_dim,
)
odefunc = ODEfunc(
diffeq=diffeq,
)
cnf = CNF(
odefunc=odefunc,
T=1.0,
train_T=True,
conditional=conditional,
solver='dopri5',
use_adjoint=True,
atol=1e-5,
rtol=1e-5,
)
return cnf
chain = [build_cnf() for _ in range(num_blocks)]
bn_layers = [MovingBatchNorm1d(input_dim, bn_lag=0, sync=False)
for _ in range(num_blocks)]
bn_chain = [MovingBatchNorm1d(input_dim, bn_lag=0, sync=False)]
for a, b in zip(chain, bn_layers):
bn_chain.append(a)
bn_chain.append(b)
chain = bn_chain
model = SequentialFlow(chain)
return model
def cnf(input_dim,dims,zdim,num_blocks, encode_dims = 0):
dims = tuple(map(int, dims.split("-")))
model = build_model(input_dim, dims, zdim, num_blocks, True, encode_dims).cuda()
print("Number of trainable parameters of Point CNF: {}".format(count_parameters(model)))
return model
```
#### File: UncertaintyFlow/module/resnet.py
```python
import torch.nn as nn
from torchvision import models
import torch
from torch import Tensor
class MyResNet(nn.Module):
def __init__(self, in_channels=1, out_features = 512):
super(MyResNet, self).__init__()
# bring resnet
self.model = models.resnet18()
# original definition of the first layer on the renset class
# self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.model.conv1 = nn.Conv2d(in_channels, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.model.fc = nn.Linear(in_features=512, out_features=out_features, bias=True)
def forward(self, x):
return self.model(x)
def forward_flatten(self, x: Tensor) -> Tensor:
x = self.model.conv1(x)
x = self.model.bn1(x)
x = self.model.relu(x)
x = self.model.maxpool(x)
x = self.model.layer1(x)
x = self.model.layer2(x)
x = self.model.layer3(x)
x = self.model.layer4(x)
x = self.model.avgpool(x)
x = torch.flatten(x, 1)
# x = self.fc(x)
return x
```
#### File: a07458666/UncertaintyFlow/test_flow.py
```python
import os
import numpy as np
import argparse
import torch
from trainer import UncertaintyTrainer
from module.config import loadConfig, showConfig
try:
import wandb
except ImportError:
wandb = None
logger.info("Install Weights & Biases for experiment logging via 'pip install wandb' (recommended)")
def main(config, device, model_path):
trainer = UncertaintyTrainer(config, device)
trainer.load(model_path)
trainer.loadValDataset()
trainer.sample()
if __name__ == '__main__':
# args
parser = argparse.ArgumentParser(description="Uncertainty trainer")
parser.add_argument("--modelPath", type=str)
parser.add_argument("--gpu", type=str, default="0")
parser.add_argument("--config", type=str, default="")
args = parser.parse_args()
# config
if (args.config == ""):
args.config = os.path.dirname(args.modelPath) + "/config.yaml"
config = loadConfig(args.config)
showConfig(config)
if (args.gpu != ""):
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
device = torch.device("cuda" if torch.cuda.is_available() and args.gpu != "-1" else "cpu")
# main
main(config, device, args.modelPath)
```
#### File: a07458666/UncertaintyFlow/test_image_flow.py
```python
import os
import numpy as np
import argparse
import pandas
import torch
from trainer import UncertaintyTrainer
from module.config import loadConfig, showConfig
try:
import wandb
except ImportError:
wandb = None
logger.info("Install Weights & Biases for experiment logging via 'pip install wandb' (recommended)")
def main(config, device, model_path, encoder_path):
trainer = UncertaintyTrainer(config, device)
csv_path = "./" + config["output_folder"] + ".csv"
trainer.setDataFrame(csv_path)
trainer.load(model_path)
trainer.load_encoder(encoder_path)
trainer.loadValImageDataset()
# trainer.sampleImageAcc()
corruptions = {
# "CIFAR10": [(0, cor) for cor in range(1, 6)],
"CIFAR10": [],
# "CIFAR100": [(0, cor) for cor in range(1, 6)],
"MNIST": [],
# "Fashion": [],
# "SVHN": []
}
rotations = {
"CIFAR10": [],
# "CIFAR100": [],
"MNIST": [(rot, 0) for rot in range(15, 181, 15)],
# "Fashion": [],
# "SVHN": []
}
target_datasets = {
"MNIST": ["Fashion"],
# "Fashion": ["MNIST", "KMNIST"],
"CIFAR10": ["SVHN"],
# "CIFAR100": ["SVHN"],
# "SVHN": ["CIFAR10"]
}
for i in range(10):
err_list, ll_list = trainer.rot_measurements(rotations, corruptions)
err_props = trainer.rejection_measurements(target_datasets)
if __name__ == '__main__':
# args
parser = argparse.ArgumentParser(description="Uncertainty trainer")
parser.add_argument("--modelPath", type=str)
parser.add_argument("--encoderPath", type=str)
parser.add_argument("--gpu", type=str, default="0")
parser.add_argument("--config", type=str, default="")
args = parser.parse_args()
# config
if (args.config == ""):
args.config = os.path.dirname(args.modelPath) + "/config.yaml"
config = loadConfig(args.config)
showConfig(config)
if (args.gpu != ""):
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
device = torch.device("cuda" if torch.cuda.is_available() and args.gpu != "-1" else "cpu")
# main
main(config, device, args.modelPath, args.encoderPath)
``` |
{
"source": "a0919610611/Library_Dokcer",
"score": 3
} |
#### File: Library_Dokcer/web/database_check.py
```python
import os
import sys
import MySQLdb
def database_check():
dbname = os.environ.get('MYSQL_DATABASE')
user = os.environ.get('MYSQL_USER')
password = os.environ.get('MYSQL_PASSWORD')
host = "db"
port = 3306
print("HOST: {host}:{port}, DB: {dbname}, USER: {user}".format(
dbname=dbname,
user=user,
host=host,
port=port))
try:
MySQLdb.connect(
db=dbname,
user=user,
passwd=password,
host=host,
port=port)
except:
sys.exit(1)
sys.exit(0)
if __name__ == "__main__":
database_check()
``` |
{
"source": "a0919610611/Library",
"score": 2
} |
#### File: Library/Library/urls.py
```python
from django.conf.urls import url, include
from django.contrib import admin
from rest_framework.permissions import AllowAny
from rest_framework.response import Response
from rest_framework.schemas import SchemaGenerator
from rest_framework.views import APIView
from rest_framework_swagger import renderers
from rest_framework_swagger.views import get_swagger_view
class SwaggerSchemaView(APIView):
permission_classes = [AllowAny, ]
# authentication_classes = []
renderer_classes = [
renderers.OpenAPIRenderer,
renderers.SwaggerUIRenderer,
]
def get(self, request):
generator = SchemaGenerator()
schema = generator.get_schema(request=request)
return Response(schema)
schema_view = get_swagger_view(title='Library')
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^api/', include('api.urls')),
url(r'^$', SwaggerSchemaView.as_view()),
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),
]
``` |
{
"source": "a0919610611/My-blog",
"score": 2
} |
#### File: My-blog/article/views.py
```python
from django.shortcuts import *
from django.http import *
from .models import *
from datetime import *
from django.views.generic import *
from django.core.urlresolvers import *
from django.views.generic.edit import *
from blog.forms import *
import markdown2
class ArticleListView(ListView):
template_name='homepage.html'
context_object_name='article_list'
def get_queryset(self):
article_list = Article.objects.filter(status='p')
for article in article_list:
article.content = markdown2.markdown(article.content,extras=['fenced-code-blocks'],)
return article_list
def get_context_data(self, **kwargs):
kwargs['category_list'] = Category.objects.all().order_by('name')
kwargs['tag_list'] = Tag.objects.all().order_by('name')
return super(ArticleListView, self).get_context_data(**kwargs)
def article_draft_list(request):
if request.user.is_superuser:
article_list=Article.objects.all().filter(status='d')
for article in article_list:
article.content=markdown2.markdown(article.content,extras=['fenced-code-blocks'],)
return render(request,'draft_list.html',locals())
else:
return HttpResponseRedirect('/')
def admin_article_list(request):
article_list=Article.objects.all()
return render(request,'admin_article_list.html',locals())
def publish(request,article_id):
article=Article.objects.get(pk=article_id)
article.publish()
article.save()
return HttpResponseRedirect(reverse('article:admin_article_list'))
def unpublish(request,article_id):
article=Article.objects.get(pk=article_id)
article.unpublish()
article.save()
return HttpResponseRedirect(reverse('article:admin_article_list'))
class ArticleDetailView(DetailView):
model=Article
template_name='detail.html'
context_object_name="article"
pk_url_kwarg = 'article_id'
def get(self,request,*args,**kwargs):
obj=self.get_object()
if obj.status=='d' and not request.user.is_superuser:
return HttpResponseRedirect('/')
else:
return super(ArticleDetailView,self).get(self,request,*args,**kwargs)
def get_object(self, queryset=None):
obj = super(ArticleDetailView, self).get_object()
obj.content = markdown2.markdown(obj.content, extras=['fenced-code-blocks'], )
return obj
def get_context_data(self, **kwargs):
kwargs['comment_list'] = self.object.blogcomment_set.all()
kwargs['form'] = BlogCommentForm()
return super(ArticleDetailView, self).get_context_data(**kwargs)
class CategoryView(ListView):
template_name = "homepage.html"
context_object_name = "article_list"
def get_queryset(self):
article_list = Article.objects.filter(category=self.kwargs['category_id'],status='p')
for article in article_list:
article.content = markdown2.markdown(article.content,extras=['fenced-code-blocks'],)
return article_list
def get_context_data(self, **kwargs):
kwargs['category_list'] = Category.objects.all().order_by('name')
kwargs['tag_list'] = Tag.objects.all().order_by('name')
return super(CategoryView, self).get_context_data(**kwargs)
class CommentPostView(FormView):
form_class=BlogCommentForm
template_name='detail.html'
def form_valid(self,form):
target_article=get_object_or_404(Article,pk=self.kwargs['article_id'])
comment=form.save(commit=False)
comment.article=target_article
comment.save()
self.success_url = target_article.get_absolute_url()
return HttpResponseRedirect(self.success_url)
def form_invalid(self,form):
target_article=get_object_or_404(Article,pk=self.kwargs['article_id'])
return render(self.request, 'detail.html', {
'form': form,
'article': target_article,
'comment_list': target_article.blogcomment_set.all(),
})
class TagView(ListView):
template_name = 'homepage.html'
context_object_name = 'article_list'
def get_queryset(self):
article_list=Article.objects.all().filter(tags=self.kwargs['tag_id'],status='p')
for article in article_list:
article.content=markdown2.markdown(article.content,extras=['fenced-code-blocks'],)
return article_list
def get_context_data(self, **kwargs):
kwargs['tag_list']=Tag.objects.all().order_by('name')
return super(TagView,self).get_context_data(**kwargs)
``` |
{
"source": "a092devs/Torrent-Api-py",
"score": 3
} |
#### File: Torrent-Api-py/torrents/bitsearch.py
```python
from bs4 import BeautifulSoup
import time
import asyncio
import aiohttp
from helper.asyncioPoliciesFix import decorator_asyncio_fix
import re
from helper.html_scraper import Scraper
class Bitsearch:
def __init__(self):
self.BASE_URL = 'https://bitsearch.to'
self.LIMIT = None
def _parser(self, htmls):
try:
for html in htmls:
soup = BeautifulSoup(html, 'lxml')
my_dict = {
'data': []
}
for divs in soup.find_all('li', class_='search-result'):
info = divs.find("div", class_='info')
name = info.find('h5', class_='title').find('a').text
url = info.find('h5', class_='title').find('a')['href']
category = info.find('div').find(
'a', class_='category').text
stats = info.find('div', class_='stats').find_all('div')
if stats:
downloads = stats[0].text
size = stats[1].text
seeders = stats[2].text.strip()
leechers = stats[3].text.strip()
date = stats[4].text
links = divs.find("div", class_='links').find_all('a')
magnet = links[1]['href']
torrent = links[0]['href']
my_dict['data'].append({
'name': name,
'size': size,
'seeders': seeders,
'leechers': leechers,
'category': category,
'hash': re.search(r'([{a-f\d,A-F\d}]{32,40})\b', magnet).group(0),
'magnet': magnet,
'torrent': torrent,
'url': self.BASE_URL + url,
'date': date,
'downloads': downloads
})
if len(my_dict['data']) == self.LIMIT:
break
try:
total_pages = int(soup.select(
'body > main > div.container.mt-2 > div > div:nth-child(1) > div > span > b')[0].text) / 20 # !20 search result available on each page
total_pages = total_pages + \
1 if type(total_pages) == float else total_pages if int(
total_pages) > 0 else total_pages + 1
current_page = int(soup.find('div', class_='pagination').find(
'a', class_='active').text)
my_dict['current_page'] = current_page
my_dict['total_pages'] = int(total_pages)
except:
pass
return my_dict
except:
return None
async def search(self, query, page, limit):
async with aiohttp.ClientSession() as session:
start_time = time.time()
self.LIMIT = limit
url = self.BASE_URL + \
'/search?q={}&page={}'.format(
query, page)
return await self.parser_result(start_time, url, session)
async def parser_result(self, start_time, url, session):
html = await Scraper().get_all_results(session, url)
results = self._parser(html)
if results != None:
results['time'] = time.time() - start_time
results['total'] = len(results['data'])
return results
return results
async def trending(self, category, page, limit):
async with aiohttp.ClientSession() as session:
start_time = time.time()
self.LIMIT = limit
url = self.BASE_URL + '/trending'
return await self.parser_result(start_time, url, session)
``` |
{
"source": "a0937027831/recipe-app-api",
"score": 2
} |
#### File: app/recipe/test_recipe_api.py
```python
import tempfile
import os
from PIL import Image
from django.contrib.auth import get_user_model
from django.test import TestCase
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APIClient
from core.models import Recipe,Tag, Ingredient
from recipe.serializers import RecipeSerializer,RecipeDetailSerializer
RECIPES_URL = reverse('recipe:recipe-list')
def image_upload_url(recipe_id):
# return URL for recipe image upload
return reverse('recipe:recipe-upload-image',args=[recipe_id])
def detail_url(recipe_id):
# return recipe detail URL
return reverse('recipe:recipe-detail',args=[recipe_id])
def sample_tag(user,name='Main course'):
# Create and return a sample tag
return Tag.objects.create(user=user,name=name)
def sample_ingredient(user,name='Cinnamon'):
# Create and return a sample ingredient
return Ingredient.objects.create(user=user,name=name)
def sample_recipe(user,**params):
# **將參數轉成字典
# create and return a sample recipe
defaults = {
'title':'Sample recipe',
'time_minutes':10,
'price':5.00
}
# 這裡update 是指用戶傳近來的參數 可以覆蓋defaults值或新增進來
defaults.update(params)
# 在調用函數時有用兩個星號會產生相反的效果 將字典轉成參數
return Recipe.objects.create(user=user,**defaults)
class PublicRecipeApiTests(TestCase):
# Test unauthenticated recipe API access
def setUp(self):
self.client = APIClient()
def test_auth_required(self):
# test unauthentication is required
res = self.client.get(RECIPES_URL)
self.assertEqual(res.status_code,status.HTTP_401_UNAUTHORIZED)
class PrivateRecipeApiTests(TestCase):
# Test unauthenticated recipe API access
def setUp(self):
self.client = APIClient()
self.user = get_user_model().objects.create_user(
'<EMAIL>',
'<PASSWORD>'
)
self.client.force_authenticate(self.user)
def test_retrieve_recips(self):
# test retrieving a list of recipes
sample_recipe(user=self.user)
sample_recipe(user=self.user)
res = self.client.get(RECIPES_URL)
recipes = Recipe.objects.all().order_by('id')
serializer = RecipeSerializer(recipes, many=True)
self.assertEqual(res.status_code,status.HTTP_200_OK)
self.assertEqual(res.data,serializer.data)
def test_recipe_limited_to_user(self):
# test retrieving recipes for user
user2 = get_user_model().objects.create_user(
'<EMAIL>',
'<PASSWORD>'
)
sample_recipe(user=user2)
sample_recipe(user=self.user)
res = self.client.get(RECIPES_URL)
recipes = Recipe.objects.filter(user=self.user)
serializer = RecipeSerializer(recipes,many=True)
self.assertEqual(res.status_code,status.HTTP_200_OK)
self.assertEqual(len(res.data),1)
self.assertEqual(res.data,serializer.data)
def test_view_recipe_detail(self):
# Test viewing a recipe detail
recipe = sample_recipe(user=self.user)
recipe.tags.add(sample_tag(user=self.user))
recipe.ingredients.add(sample_ingredient(user=self.user))
url = detail_url(recipe.id)
res = self.client.get(url)
serializer = RecipeDetailSerializer(recipe)
self.assertEqual(res.data,serializer.data)
def test_create_basic_recipe(self):
# Test creating recipe
payload = {
'title':'Chocolate cheesecake',
'time_minutes': 30,
'price':5.00
}
res = self.client.post(RECIPES_URL,payload)
self.assertEqual(res.status_code,status.HTTP_201_CREATED)
recipe = Recipe.objects.get(id=res.data['id'])
# 遍歷字典 key欄位的值 比對
# python getattr 可以返回一个对象属性值
for key in payload.keys():
self.assertEqual(payload[key],getattr(recipe,key))
def test_create_recipe_with_tags(self):
# test creating a recipe with tags
tag1 = sample_tag(user=self.user, name='Vegan')
tag2 = sample_tag(user=self.user, name='Dessert')
payload = {
'title' : 'Avocado lime cheesecake',
'tags' : [tag1.id ,tag2.id],
'time_minutes':60,
'price':20.00
}
res = self.client.post(RECIPES_URL,payload)
self.assertEqual(res.status_code,status.HTTP_201_CREATED)
recipe = Recipe.objects.get(id=res.data['id'])
tags = recipe.tags.all()
self.assertEqual(tags.count(),2)
self.assertIn(tag1,tags)
self.assertIn(tag2,tags)
def test_create_recipe_with_ingredients(self):
# test creating recipe with ingredients
ingredient1 = sample_ingredient(user=self.user,name='Prawns')
ingredient2 = sample_ingredient(user=self.user,name='Ginger')
payload = {
'title':'Thau prawn red curry',
'ingredients':[ingredient1.id,ingredient2.id],
'time_minutes':20,
'price':7.00
}
res = self.client.post(RECIPES_URL,payload)
self.assertEqual(res.status_code,status.HTTP_201_CREATED)
recipe = Recipe.objects.get(id = res.data['id'])
ingredients = recipe.ingredients.all()
self.assertEqual(ingredients.count(),2)
self.assertIn(ingredient1,ingredients)
self.assertIn(ingredient2,ingredients)
def test_partial_update_recipe(self):
# test updating a recipe with patch
recipe = sample_recipe(user=self.user)
recipe.tags.add(sample_tag(user=self.user))
new_tag = sample_tag(user=self.user,name='Curry')
payload = {
'title':'Chicken tikka',
'tags':[new_tag.id]
}
url = detail_url(recipe.id)
self.client.patch(url,payload)
# 因為有做更新 所以必須從db刷新拿取 否則細節不會改變
recipe.refresh_from_db()
self.assertEqual(recipe.title,payload['title'])
tags = recipe.tags.all()
self.assertEqual(len(tags),1)
self.assertIn(new_tag,tags)
def test_full_update_recipe(self):
# test updating a recipe with put
recipe = sample_recipe(user=self.user)
recipe.tags.add(sample_tag(user=self.user))
payload = {
'title':'Spaghetti carbonara',
'time_minutes':25,
'price': 5.00
}
url = detail_url(recipe.id)
self.client.put(url,payload)
# put 如果已經存在就替換,沒有就添加 跟 PATCH方法 差異在put要重傳所有的屬性
recipe.refresh_from_db()
self.assertEqual(recipe.title,payload['title'])
self.assertEqual(recipe.time_minutes,payload['time_minutes'])
self.assertEqual(recipe.price,payload['price'])
tags = recipe.tags.all()
self.assertEqual(len(tags),0)
class RecipeImageUploadTests(TestCase):
def setUp(self):
self.client= APIClient()
self.user = get_user_model().objects.create_user(
'<EMAIL>',
'<PASSWORD>'
)
self.client.force_authenticate(self.user)
self.recipe = sample_recipe(user=self.user)
def tearDown(self):
self.recipe.image.delete()
def test_upload_image_to_recipe(self):
# test uploading an image to recipe
url = image_upload_url(self.recipe.id)
with tempfile.NamedTemporaryFile(suffix='.jpg') as ntf:
img = Image.new('RGB',(10,10))
img.save(ntf, format='JPEG')
ntf.seek(0)
res = self.client.post(url,{'image':ntf},format='multipart')
self.recipe.refresh_from_db()
self.assertEqual(res.status_code,status.HTTP_200_OK)
self.assertIn('image',res.data)
self.assertTrue(os.path.exists(self.recipe.image.path))
def test_upload_image_bad_request(self):
# test uploading an invalid image
url = image_upload_url(self.recipe.id)
res = self.client.post(url,{'image':'notimage'},format='multipart')
self.assertEqual(res.status_code,status.HTTP_400_BAD_REQUEST)
def test_filter_recipes_by_tags(self):
# test returning recipes with specific tags
recipe1 = sample_recipe(user=self.user,title='Thai vegetable curry')
recipe2 = sample_recipe(user=self.user,title='Aubergine with tahini')
tag1 = sample_tag(user=self.user,name='Vegan')
tag2 = sample_tag(user=self.user,name='Vegetarian')
recipe1.tags.add(tag1)
recipe2.tags.add(tag2)
recipe3 = sample_recipe(user=self.user,title='Fish and chips')
res = self.client.get(
RECIPES_URL,
{'tags':f'{tag1.id},{tag2.id}'}
)
serializer1 = RecipeSerializer(recipe1)
serializer2 = RecipeSerializer(recipe2)
serializer3 = RecipeSerializer(recipe3)
self.assertIn(serializer1.data,res.data)
self.assertIn(serializer2.data,res.data)
self.assertNotIn(serializer3.data,res.data)
def test_filter_recipes_by_ingredients(self):
# test returning recipes with specific ingredients
recipe1 = sample_recipe(user=self.user,title='Posh beans on toast')
recipe2 = sample_recipe(user=self.user,title='Chicken cacciatore')
ingredient1 = sample_ingredient(user=self.user,name='Feta cheese')
ingredient2 = sample_ingredient(user=self.user,name='Chicken')
recipe1.ingredients.add(ingredient1)
recipe2.ingredients.add(ingredient2)
recipe3 = sample_recipe(user=self.user,title='Steak and mushrooms')
res = self.client.get(
RECIPES_URL,
{'ingredients':f'{ingredient1.id},{ingredient2.id}'}
)
serializer1 = RecipeSerializer(recipe1)
serializer2 = RecipeSerializer(recipe2)
serializer3 = RecipeSerializer(recipe3)
self.assertIn(serializer1.data, res.data)
self.assertIn(serializer2.data, res.data)
self.assertNotIn(serializer3.data, res.data)
``` |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.