max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
cyder/cydns/models.py | ngokevin/cyder | 1 | 12792751 | from django.core.exceptions import ObjectDoesNotExist, ValidationError
from django.db import models
import cyder
from cyder.cydns.domain.models import Domain, _check_TLD_condition
from cyder.cydns.mixins import ObjectUrlMixin
from cyder.cydns.validation import validate_label, validate_name
from cyder.settings import CYDNS_BASE_URL
class CydnsRecord(models.Model, ObjectUrlMixin):
"""
This class provides common functionality that many DNS record
classes share. This includes a foreign key to the ``domain`` table
and a ``label`` CharField. This class also inherits from the
``ObjectUrlMixin`` class to provide the ``get_absolute_url``,
``get_edit_url``, and ``get_delete_url`` functions.
This class does validation on the ``label`` field. Call
``clean_all`` to trigger the validation functions. Failure to
validate will raise a ``ValidationError``.
If you plan on using the ``unique_together`` constraint on a Model
that inherits from ``CydnsRecord``, you must include ``domain`` and
``label`` explicitly if you need them to. ``CydnsRecord`` will not
enforce uniqueness for you.
All common records have a ``fqdn`` field. This field is updated
every time the object is saved::
fqdn = name + domain.name
or if name == ''
fqdn = domain.name
This field makes searching for records much easier. Instead of
looking at ``obj.label`` together with ``obj.domain.name``, you can
just search the ``obj.fqdn`` field.
As of commit 7b2fd19f, the build scripts do not care about ``fqdn``.
This could change.
"the total number of octets that represent a name (i.e., the sum of
all label octets and label lengths) is limited to 255" - RFC 4471
"""
domain = models.ForeignKey(Domain, null=False)
label = models.CharField(max_length=100, blank=True, null=True,
validators=[validate_label])
fqdn = models.CharField(max_length=255, blank=True, null=True,
validators=[validate_name])
# fqdn = label + domain.name <--- see set_fqdn
class Meta:
abstract = True
def clean(self):
self.set_fqdn()
self.check_TLD_condition()
def save(self, *args, **kwargs):
if kwargs.has_key('no_build'):
no_build = kwargs.pop('no_build') # Removes key.
else:
no_build = False # We are rebuilding
super(CydnsRecord, self).save(*args, **kwargs)
if no_build:
pass
else:
# Mark the domain as dirty so it can be rebuilt.
self.domain.dirty = True
self.domain.save()
def set_fqdn(self):
try:
if self.label == '':
self.fqdn = self.domain.name
else:
self.fqdn = "{0}.{1}".format(self.label, self.domain.name)
except ObjectDoesNotExist:
return
def check_for_cname(self):
""""If a CNAME RR is preent at a node, no other data should be
present; this ensures that the data for a canonical name and its
aliases cannot be different."
-- `RFC 1034 <http://tools.ietf.org/html/rfc1034>`_
Call this function in models that can't overlap with an existing
CNAME.
"""
CNAME = cyder.cydns.cname.models.CNAME
if CNAME.objects.filter(fqdn=self.fqdn).exists():
raise ValidationError("A CNAME with this name already exists.")
def check_for_delegation(self):
"""If an object's domain is delegated it should not be able to
be changed. Delegated domains cannot have objects created in
them.
"""
if not self.domain.delegated:
return
if not self.pk: # We don't exist yet.
raise ValidationError("No objects can be created in the {0}"
"domain. It is delegated."
.format(self.domain.name))
def check_TLD_condition(self):
_check_TLD_condition(self)
| 2.4375 | 2 |
bounca/templatetags/templatetags/version_tags.py | warthog9/bounca | 0 | 12792752 | <gh_stars>0
"""Version template tag"""
from django import template
from django.utils.version import get_version
from bounca import VERSION
register = template.Library()
@register.simple_tag
def bounca_version():
return str(get_version(VERSION))
| 1.585938 | 2 |
codigo/Live29/exemplo_6.py | cassiasamp/live-de-python | 572 | 12792753 | <reponame>cassiasamp/live-de-python
class Pessoa:
def __init__(self, n, s):
self.n = n
self.s = s
def __hash__(self):
return hash((self.n,self.s))
ll = Pessoa('Lugão','Ricardo')
lulu = Pessoa('Lugão','Ricardinho')
print(hash(ll)) # True
print(hash(lulu)) # True
| 3.53125 | 4 |
Lesson12/f1.py | shinkai-tester/python_beginner | 2 | 12792754 | try:
f = open('f.txt', 'r')
except FileNotFoundError:
f = open('f.txt', 'w')
else:
text = f.read()
print(text)
finally:
f.close()
| 3.1875 | 3 |
js/SFLIX/getfilepath.py | Apop85/Scripts | 0 | 12792755 | import os
from shutil import move as moveFile
os.chdir(os.getcwd())
print("".center(50, "="))
print("Update STEFFLIX-Daten".center(50))
print("".center(50, "="))
homeDir = os.getcwd()
allowedFileTypes = ["jpg", "jpeg", "mp4", "mp3", "png"]
diallowedItems = ["System Volume Information", "$RECYCLE.BIN", ".vscode", "sflix_sys"]
def recursiveCrawler(path, project="", serie="", staffel="", folge="", filelist={}, depth=0):
if depth == 0:
pass
elif depth == 1:
project = path.split("\\")[-1]
filelist.setdefault(project, {})
elif depth == 2:
serie = path.split("\\")[-1]
filelist[project].setdefault(serie, {})
elif depth == 3:
staffel = path.split("\\")[-1]
filelist[project][serie].setdefault(staffel, {})
elif depth == 4:
folge = path.split("\\")[-1]
filelist[project][serie][staffel].setdefault(folge, {})
# print(f"{project} {serie} {staffel}")
folderContent = os.listdir(path)
for item in folderContent:
if not item in diallowedItems:
if os.path.isfile(os.path.join(path, item)):
extension = item.split(".")[-1]
if extension in allowedFileTypes:
if depth == 1:
relPath = os.path.join(path, item)[len(homeDir):]
filelist[project].setdefault(os.path.join(".", relPath))
elif depth == 2:
relPath = os.path.join(path, item)[len(homeDir):]
filelist[project][serie].setdefault(os.path.join(".", relPath))
elif depth == 3:
relPath = os.path.join(path, item)[len(homeDir):]
filelist[project][serie][staffel].setdefault(os.path.join(".", relPath), None)
elif depth > 3:
relPath = os.path.join(path, item)[len(homeDir):]
filelist[project][serie][staffel][folge].setdefault(os.path.join(".", relPath), None)
elif os.path.isdir(os.path.join(path, item)):
filelist = recursiveCrawler(os.path.join(path, item), project, serie, staffel, folge, filelist, depth+1)
return filelist
print("Durchsuche Ordner...".ljust(40), end="")
try:
filelist = recursiveCrawler(homeDir)
print("OK")
except:
print("Fehler")
# fileWriter = open(os.path.join(homeDir, "output.txt"), "w", encoding="utf-8")
# fileWriter.write(str(filelist).replace("\\\\", "/").replace("None", "null"))
# fileWriter.close()
try:
print("Erstelle Backup...".ljust(40), end="")
if os.path.exists(os.path.join(homeDir, "sflix_sys", "data.js.bak")):
os.remove(os.path.join(homeDir, "sflix_sys", "data.js.bak"))
moveFile(os.path.join(homeDir, "sflix_sys", "data.js"), os.path.join(homeDir, "sflix_sys", "data.js.bak"))
print("OK")
except:
print("Fehler")
try:
print("Speichere neue Version...".ljust(40), end="")
fileWriter = open(os.path.join(homeDir, "sflix_sys", "data.js"), "w", encoding="utf-8")
fileWriter.write("var data = " + str(filelist).replace("\\\\", "/").replace("None", "null") + ";")
fileWriter.close()
print("OK")
except:
print("Fehler")
print("".center(50, "="))
print("Update abgeschlossen".center(50))
print("".center(50, "="))
print()
input("Enter zum Beenden") | 2.5625 | 3 |
libraries/ArbotiX/examples/old/CommExt/CommEXT.py | bouraqadi/ArbotiX-M | 1 | 12792756 | #!/usr/bin/env python
# CommEXT.py - ArbotiX Commander Extended Instruction Set Example
# Copyright (c) 2008-2010 Vanadium Labs LLC. All right reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Vanadium Labs LLC nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL VANADIUM LABS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import time, sys, serial
# Commander definitions
BUT_R1 = 1
BUT_R2 = 2
BUT_R3 = 4
BUT_L4 = 8
BUT_L5 = 16
BUT_L6 = 32
BUT_RT = 64
BUT_LT = 128
INPUT = 0
OUTPUT = 1
LOW = 0
HIGH = 1
class CommanderEXT():
NO_ACTION = 0x08
pan = 512
tilt = 512
def __init__(self, port):
self.ser = serial.Serial()
self.ser.baudrate = 38400
self.ser.port = port
self.ser.timeout = 0.5
self.ser.open()
def sendPacket(self, rjoy_h, rjoy_l, ljoy_h, ljoy_l, buttons, ext):
# send output
self.ser.write('\xFF')
self.ser.write(chr(rjoy_h))
self.ser.write(chr(rjoy_l))
self.ser.write(chr(ljoy_h))
self.ser.write(chr(ljoy_l))
self.ser.write(chr(buttons))
self.ser.write(chr(ext))
self.ser.write(chr(255 - ((rjoy_h+rjoy_l+ljoy_h+ljoy_l+buttons+ext)%256)))
def readPacket(self, inst, mode = 0, value = -1):
d = self.ser.read()
if d == '':
#print "Fail Read"
return -1
# now process our byte
if mode == 0: # get our first
if ord(d) == 0xff:
#print "Oxff found"
return self.readPacket(inst, 1)
else:
#print "Oxff NOT found, restart: " + str(ord(d))
return self.readPacket(inst, 0)
elif mode == 1: # get our instruction
if ord(d) == inst:
#print "Instruction found"
return self.readPacket(inst, 2)
else:
#print "Instruction NOT found, restart: " + str(ord(d))
return self.readPacket(inst, 0)
elif mode == 2: # get value
return self.readPacket(inst, 3, ord(d))
elif mode == 3: # get checksum
#print "Checksum found: " + str(ord(d))
checksum = inst + value + ord(d)
#print "Checksum computed: " + str(checksum)
if checksum % 256 != 255:
#print "Checksum ERROR"
return -1
return value
# fail
return -1
def extInstruction(self, inst):
self.sendPacket(self.pan>>8,self.pan%256,self.tilt>>8,self.tilt%256, 0, inst)
def readAnalog(self, id):
""" Read an analog port, id is 0 to 7. """
self.extInstruction(0x10 + id)
return self.readPacket(0x10 + id)
def readDigital(self):
""" Read all 8 digital ports as a single byte. """
self.extInstruction(0x1B)
return self.readPacket(0x1B)
def motorsOff(self):
self.extInstruction(0x40)
def leftMotor(self, power):
""" Set left motor power, -1 to 1. """
if power <= 1.0 and power >= -1.0:
self.extInstruction(0x50 + int(power*10))
def rightMotor(self, power):
""" Set right motor power, -1 to 1. """
if power <= 1.0 and power >= -1.0:
self.extInstruction(0x70 + int(power*10))
def setDigital(self, id, direction, value):
""" Set a digital pin value. id is 0 to 7. value and direction are 0 or 1. """
self.extInstruction(0x80 + 4*id + direction*2 + value)
if __name__ == "__main__":
# commanderEXT.py <serialport>
c = CommanderEXT(sys.argv[1])
# Cycle digital ports using extended mode
for i in range(8):
c.setDigital(i, OUTPUT, HIGH)
if i > 2:
c.setDigital(i-2, INPUT, LOW)
time.sleep(0.25)
c.setDigital(4, OUTPUT, HIGH)
c.setDigital(6, OUTPUT, HIGH)
# Read analog inputs
for i in range(8):
print c.readAnalog(i)
# Read digital inputs
print "Digital:", c.readDigital()
# Exercise turret
for i in range(20):
c.pan = 312 + i*20
c.extInstruction(c.NO_ACTION)
time.sleep(.2)
c.pan = 512
for i in range(20):
c.tilt = 312 + i*20
c.extInstruction(c.NO_ACTION)
time.sleep(.2)
| 1.867188 | 2 |
torch_rl/envs/__init__.py | JimmyMVP/plain_rl | 10 | 12792757 | from gym.envs.registration import register
from .wrappers import *
from .logger import *
from .envs import *
register(
id='BanditsX2-v0',
kwargs = {'num_bandits' : 2},
entry_point='torch_rl.envs:BanditEnv',
)
register(
id='BanditsX4-v0',
kwargs = {'num_bandits' : 4},
entry_point='torch_rl.envs:BanditEnv',
)
register(
id='BanditsX8-v0',
kwargs = {'num_bandits' : 8},
entry_point='torch_rl.envs:BanditEnv',
)
try:
from .roboschool_envs import *
register(
id='TRLRoboschoolReacher-v1',
kwargs = {},
entry_point='torch_rl.envs:RoboschoolReacher',
max_episode_steps=150,
reward_threshold=18.0,
tags={ "pg_complexity": 1*1000000 },
)
except ImportError as e:
print('Roboschool environments excluded, import error')
try:
from .opensim_envs import *
register(
id='OsimArm2D-v1',
kwargs={'visualize': False},
entry_point='osim.env:Arm2DEnv'
)
register(
id='OsimArm3D-v1',
kwargs={'visualize': False},
entry_point='osim.env:Arm3DEnv'
)
register(
id='OsimRun3D-v1',
kwargs={'visualize': False},
entry_point='osim.env:Run3DEnv'
)
except ImportError as e:
print('Opensim environments excluded, import error ', e)
| 1.617188 | 2 |
src/search/views.py | mahidul-islam/shopmanagementsystem | 0 | 12792758 | from django.shortcuts import render
from products.models import Product
from django.views.generic.list import ListView
from django.db.models import Q
class SearchProductView(ListView):
queryset = Product.objects.all()
template_name = "search/searched.html"
def get_context_data(self, *args, **kwargs):
context = super(SearchProductView, self).get_context_data(*args, **kwargs)
print(context)
context['query'] = self.request.GET.get('q')
return context
def get_queryset(self, *args, **kwargs):
request = self.request
dict = request.GET
query = dict.get('q', None)
if query is not None:
return Product.objects.search(query)
return Product.objects.featured()
| 1.9375 | 2 |
smart_alarm_clock/test_apis.py | kfb19/Smart-Alarm-Clock | 0 | 12792759 | """this module is designed to test the version of the APIs required
to see if they are up to date so the program can be run"""
import logging
from news_api import check_news_version
from weather_api import check_weather_version
from covid_api import check_covid_version
logging.basicConfig(filename='pysys.log',level=logging.INFO, format='%(asctime)s %(levelname)-8s%(message)s', datefmt='%Y-%m-%d %H:%M:%S')
def test_api() -> bool:
"""this function checks to see if each API can be properly set up
and if there is an error, it is logged and the user
is told to abort the program"""
weather = False
news = False
covid = False
if check_weather_version():
logging.info("Weather API version is up to date (check_weather_version())")
weather = True
else:
logging.info("Weather API version is not up to date (check_weather_version()) - ACTION REQUIRED")
if check_news_version():
logging.info("News API version is up to date (check_news_version())")
news = True
else:
logging.info("News API version is not up to date (check_news_version()) - ACTION REQUIRED")
if check_covid_version():
logging.info("Covid-19 API version is up to date (check_covid_version())")
covid = True
else:
logging.info("Covid-19 API version is not up to date (check_covid_version()) - ACTION REQUIRED")
return bool(weather and news and covid)
if __name__ == '__main__':
logging.info("Test API Module Tested")
print(test_api())#tests the function
| 3.421875 | 3 |
tide_predictor/tidal_model.py | guyt101z/tide-predictor | 1 | 12792760 | <reponame>guyt101z/tide-predictor<gh_stars>1-10
from __future__ import unicode_literals
import pytz
import math
from functools import partial
from dateutil.parser import parse as parse_datetime
from collections import namedtuple
Constituent = namedtuple(
'Constituent', 'name,description,amplitude,phase,speed')
class TidalModel(object):
def __init__(self, constituent_data):
self.constituents = None
self.time_datum = None
self._parse(constituent_data)
def predict(self, when):
"""
Predict a tidal height at a given point in time.
"""
assert when.tzinfo == pytz.UTC, 'datetime timezone must be UTC'
t_hours = to_hours(when - self.time_datum)
amplitudes = map(
partial(calculate_amplitude, t_hours),
self.constituents)
return sum(amplitudes)
def __unicode__(self):
return '<TidalModel, {} constituents>'.format(len(self.constituents))
def __str__(self):
return self.__unicode__().encode('ascii')
def _parse(self, data):
assert data['height_units'] == 'metres'
self.time_datum = parse_datetime(data['time_datum'])
self.constituents = TidalModel._parse_constituents(
data['constituents'])
@staticmethod
def _parse_constituents(constituents):
return [Constituent(
name=c['name'],
description=c.get('description', ''),
amplitude=float(c['amplitude']),
phase=float(c['phase']),
speed=float(c['speed'])) for c in constituents]
def to_hours(timedelta):
return timedelta.total_seconds() / 3600
def calculate_amplitude(time_hours, constituent):
angle = math.radians((constituent.speed * time_hours) - constituent.phase)
return constituent.amplitude * math.cos(angle)
| 2.875 | 3 |
rectangles.py | CoffeeTableEnnui/RedCircleGame | 0 | 12792761 | <reponame>CoffeeTableEnnui/RedCircleGame
import pygame
class Rectangle:
def collide(self, circle):
x_range = range(self.xmin - circle.size, self.xmax + circle.size)
y_range = range(self.ymin - circle.size, self.ymax + circle.size)
return circle.x in x_range and circle.y in y_range
class LevelButton(Rectangle):
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
width = 75
height = 75
pygame.init()
colour = WHITE
fill = BLACK
font = pygame.font.Font(None, 75)
def __init__(self, levelnumber, xmin, ymin):
self.num = levelnumber
self.xmin = xmin
self.ymin = ymin
self.xmax = xmin + self.width
self.ymax = ymin + self.height
def draw(self, background):
pygame.draw.polygon(background,
self.BLACK,
[(self.xmax, self.ymax),
(self.xmax, self.ymin),
(self.xmin, self.ymin),
(self.xmin, self.ymax),],
0)
renderlevelbutton = self.font.render(str(self.num), 0, self.colour, self.fill)
background.blit(renderlevelbutton, (self.xmin, self.ymin))
class BackButton(Rectangle):
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
width = 125
height = 50
pygame.init()
colour = WHITE
fill = BLACK
font = pygame.font.Font(None, 50)
def __init__(self, xmin, ymin):
self.xmin = xmin
self.ymin = ymin
self.xmax = xmin + self.width
self.ymax = ymin + self.height
def draw(self, background):
pygame.draw.polygon(background,
self.BLACK,
[(self.xmax, self.ymax),
(self.xmax, self.ymin),
(self.xmin, self.ymin),
(self.xmin, self.ymax),],
0)
renderbackbutton = self.font.render("BACK", 0, self.colour, self.fill)
background.blit(renderbackbutton, (self.xmin, self.ymin))
class Wall(Rectangle):
colour = (0, 0, 0)
def __init__(self, xmin, xmax, ymin, ymax):
self.xmin = xmin
self.xmax = xmax
self.ymin = ymin
self.ymax = ymax
def draw(self, background):
pygame.draw.polygon(background,
self.colour,
[(self.xmax, self.ymax),
(self.xmax, self.ymin),
(self.xmin, self.ymin),
(self.xmin, self.ymax),],
0)
class Border:
colour = (0, 0, 0)
def __init__(self, thickness, width, height):
self.thick = thickness
self.height = height
self.width = width
def draw(self, background):
pygame.draw.polygon(background, self.colour,
[(self.thick, self.thick), (self.thick, 0), (self.width - self.thick, 0), (self.width - self.thick, self.thick),],
0)
pygame.draw.polygon(background, self.colour,
[(self.thick, self.height), (self.thick, 0), (0, 0), (0, self.height),],
0)
pygame.draw.polygon(background, self.colour,
[(self.width - self.thick, self.height), (self.width - self.thick, 0), (self.width, 0), (self.width, self.height),],
0)
pygame.draw.polygon(background, self.colour,
[(self.thick, self.height), (self.thick, self.height - self.thick),
(self.width - self.thick, self.height - self.thick), (self.width - self.thick, self.height),],
0)
def collide(self, circle):
x_range = range(self.thick + circle.size, self.width - self.thick - circle.size)
y_range = range(self.thick + circle.size, self.height - self.thick - circle.size)
return not (circle.x in x_range and circle.y in y_range)
| 3.453125 | 3 |
Python/scope.py | MarsBighead/mustang | 4 | 12792762 | #!/usr/bin/python
class Difference:
def __init__(self, a):
self.__elements = a
def computeDifference(self):
a = self.__elements
max = a[0]
min = a[0]
for i in range(0, len(a)):
if max<a[i]:
max=a[i]
else:
pass
if min>a[i]:
min=a[i]
else:
pass
print 'max = %d, min = %d'%(max,min)
self.maximumDifference= max-min
_ = raw_input()
a = [int(e) for e in raw_input().split(' ')]
d = Difference(a)
d.computeDifference()
# d.maximunDifference is a variable
print d.maximumDifference
| 3.46875 | 3 |
saifooler/saliency/saliency_estimator.py | sailab-code/SAIFooler | 0 | 12792763 | <reponame>sailab-code/SAIFooler<filename>saifooler/saliency/saliency_estimator.py
import torch
from tqdm import tqdm
from saifooler.render.sailenv_module import SailenvModule
class SaliencyEstimator:
def __init__(self, mesh_descriptor, classifier, p3d_module, sailenv_module, data_module, use_cache=False):
self.mesh_descriptor = mesh_descriptor
self.mesh = self.mesh_descriptor.mesh
self.classifier = classifier
self.p3d_module = p3d_module
self.sailenv_module: SailenvModule = sailenv_module
self.data_module = data_module
self.use_cache = use_cache
self.device = self.classifier.device
self.tex_shape = self.mesh.textures.maps_padded().shape[1:-1]
def compute_batch_view_saliency_maps(self, images):
images.requires_grad_(True)
scores = self.classifier.classify(images)
max_score, _ = scores.max(1, keepdim=True)
max_score.mean().backward()
view_saliency_maps = torch.mean(images.grad.data.abs(), dim=3, keepdim=True)
return view_saliency_maps
def convert_view2tex_saliency_maps(self, view_saliency_maps, view2tex_maps):
tex_saliency = torch.zeros((view_saliency_maps.shape[0], *
self.tex_shape), device=self.device)
view2tex_maps = view2tex_maps * torch.tensor(self.tex_shape, device=self.device)
view2tex_maps = view2tex_maps.to(dtype=torch.long)
for idx in range(view_saliency_maps.shape[0]):
tex_saliency[(idx, view2tex_maps[idx, ..., 0], view2tex_maps[idx, ..., 1])] = view_saliency_maps.squeeze(3)[idx]
return tex_saliency
def render_batch(self, render_module, batch, view2tex_maps=None):
images = []
view2tex_maps_list = []
render_inputs, targets = batch
for render_input in render_inputs:
distance, camera_azim, camera_elev = render_input[:3]
render_module.look_at_mesh(distance, camera_azim, camera_elev)
lights_azim, lights_elev = render_input[3:]
render_module.set_lights_direction(lights_azim, lights_elev)
image = render_module.render(self.mesh)
if view2tex_maps is None:
view2tex_map = render_module.get_view2tex_map(self.mesh)
view2tex_maps_list.append(view2tex_map)
images.append(image.to(self.classifier.device))
images = torch.cat(images, 0)
view2tex_maps = view2tex_maps if view2tex_maps is not None else torch.cat(view2tex_maps_list, 0)
return images, view2tex_maps
def estimate_view_saliency_map(self, return_views=False):
self.sailenv_module.spawn_obj(self.mesh_descriptor)
view_saliencies = [[], []]
views = [[], []]
view2tex_maps = None
for idx, render_module in tqdm(enumerate([self.p3d_module, self.sailenv_module]), position=0, desc="Module"):
if render_module is None:
del view_saliencies[idx] # if unity module is not provided, just skip it
continue
for batch in tqdm(self.data_module.test_dataloader(), position=1, desc="Batch"):
images, view2tex_maps = self.render_batch(render_module, batch, view2tex_maps)
view_saliency_maps = self.compute_batch_view_saliency_maps(images)
if return_views:
views[idx].append(images)
view_saliencies[idx].append(view_saliency_maps)
view_saliencies[idx] = torch.cat(view_saliencies[idx], 0)
if return_views:
views[idx] = torch.cat(views[idx], 0)
self.sailenv_module.despawn_obj()
if return_views:
return view_saliencies, views
return view_saliencies
def estimate_saliency_map(self, return_view_saliencies=False, return_views=False):
self.sailenv_module.spawn_obj(self.mesh_descriptor)
tex_saliencies = [[], []]
view_saliencies = [[], []]
views = [[], []]
view2tex_maps = None
for idx, render_module in tqdm(enumerate([self.p3d_module, self.sailenv_module]), position=0, desc="Module"):
if render_module is None:
del tex_saliencies[idx] # if unity module is not provided, just skip it
continue
for batch in tqdm(self.data_module.test_dataloader(), position=1, desc="Batch"):
images, view2tex_maps = self.render_batch(render_module, batch, view2tex_maps)
view_saliency_maps = self.compute_batch_view_saliency_maps(images)
tex_saliency_maps = self.convert_view2tex_saliency_maps(view_saliency_maps, view2tex_maps)
tex_saliencies[idx].append(tex_saliency_maps)
if return_view_saliencies:
view_saliencies[idx].append(view_saliency_maps)
if return_views:
views[idx].append(images)
tex_saliencies[idx] = torch.cat(tex_saliencies[idx], 0)
if return_view_saliencies:
view_saliencies[idx] = torch.cat(view_saliencies[idx], 0)
if return_views:
views[idx] = torch.cat(views[idx], 0)
self.sailenv_module.despawn_obj()
ret = (tex_saliencies,)
if return_view_saliencies:
ret = (*ret, view_saliencies)
if return_views:
ret = (*ret, views)
return ret
def to(self, device):
self.mesh = self.mesh.to(device)
self.mesh.textures = self.mesh.textures.to(device)
self.p3d_module.to(device)
self.classifier.to(device)
self.device = device | 1.992188 | 2 |
cred/cred_data.py | TECommons/tec-sourcecred-dashboard | 0 | 12792764 | from datetime import datetime
import pandas as pd
from typing import Any, Dict, List, Tuple
class CredData():
"""
Parses information from Sourcecred
- Works with TimelineCred data format (sourcecred <= v0.7x)
"""
def __init__(self, cred_data, accounts_data):
self.cred_json_data = cred_data
self.weighted_graph = cred_data[1]['weightedGraph'][1]
self.cred_data = cred_data[1]['credData']
self.accounts_data = accounts_data
self.cache = {
'df': None,
'df_rank': None,
'df_grain': None,
'df_accounts': None,
'df_cred_ot': None,
'df_cred_eflow': None,
'df_cred_nflow': None,
}
def get_weighted_graph(self, data) -> Dict[str, Any]:
"""
Weighted graph from CredResult JSON data
"""
return self.weighted_graph
def get_cred_data(self) -> Dict[str, Any]:
"""
Raw CredResult JSON data
"""
return self.cred_data
def get_node(self, i: int) -> Dict[str, Any]:
"""
Returns specifc node's information
"""
node = dict()
address = self.weighted_graph['graphJSON'][1]['sortedNodeAddresses'][i]
node['address.source'] = f'{address[0]}/{address[1]}'
node['address.nodeType'] = address[2]
node['address.id'] = address[3]
node['totalCred'] = self.cred_data['nodeSummaries'][i]['cred']
node['credOverTime'] = self.cred_data['nodeOverTime'][i]['cred'] if self.cred_data['nodeOverTime'][i] else []
node['description'] = self.weighted_graph['graphJSON'][1]['nodes'][i]['description']
node['timestamp'] = self.weighted_graph['graphJSON'][1]['nodes'][i]['timestampMs']
node['user'] = self.weighted_graph['graphJSON'][1]['nodes'][i]['description'] if node['address.nodeType'] == 'IDENTITY' else None
return node
@property
def total_nodes(self) -> int:
"""
Total amount of nodes (users, posts, etc) in the graph
"""
return len(self.cred_data['nodeSummaries'])
@property
def nodes(self) -> List[Any]:
"""
Returns all nodes in the graph
"""
return [self.get_node(i) for i in range(self.total_nodes)]
@property
def intervals(self, to_datetime=False) -> List[Any]:
"""
Returns timestamp intervals where cred was computed
"""
return self.cred_data['intervals']
def get_dt_intervals(self) -> List[Any]:
"""
Return intervals in datetime format
"""
return [datetime.fromtimestamp(interval[('endTimeMs')] / 1000) for interval in self.intervals]
@property
def distributed_cred(self) -> float:
"""
Returns total distributed cred
"""
if self.cache['df'] is None:
self.to_df()
return self.cache['df'].totalCred.sum()
@property
def distributed_grain(self) -> float:
"""
Returns total distributed grain
"""
if self.cache['df_grain'] is None:
self.get_grain_distribution()
return self.cache['df_grain'].amount.sum()
@property
def accounts(self) -> pd.DataFrame:
"""
Returns user accounts info from 'output/accounts.json' file
"""
if self.cache['df_accounts'] is None:
self.cache['df_accounts'] = pd.json_normalize(self.accounts_data['accounts'])
self.cache['df_accounts']['account.balance'] = self.cache['df_accounts']['account.balance'].map(float) / 1e18
self.cache['df_accounts']['account.paid'] = self.cache['df_accounts']['account.paid'].map(float) / 1e18
return self.cache['df_accounts']
def get_user_nodes(self) -> pd.DataFrame:
"""
Returns user nodes in the graph
"""
if self.cache['df'] is None:
self.to_df()
return self.cache['df'][self.cache['df']['address.nodeType'] == 'IDENTITY']
def get_user_ranking(self) -> pd.DataFrame:
"""
Returns the user raking by total amount of cred gained so far
"""
if self.cache['df_rank'] is None:
# self.cache['df_rank'] = self.get_user_nodes().sort_values('totalCred', ascending=False).reset_index(drop=True)
# distributed_cred = self.cache['df_rank'].totalCred.sum()
# self.cache['df_rank']['credShare'] = (self.cache['df_rank'].totalCred / distributed_cred) * 100
df_rank_p = self.get_user_nodes()[['address.id', 'totalCred', 'credOverTime']]
distributed_cred = df_rank_p.totalCred.sum()
df_rank_p['credShare'] = (df_rank_p.totalCred / distributed_cred) * 100
df_rank_p.set_index('address.id', inplace=True)
df_acc_p = self.accounts[['account.identity.id',
'account.identity.name',
'account.identity.subtype',
'account.active',
'account.balance',
'account.paid'
]]
self.cache['df_rank'] = df_acc_p.join(df_rank_p,
on='account.identity.id',
how='inner'
).sort_values('totalCred', ascending=False).reset_index(drop=True)
self.cache['df_rank'].columns = ['id', 'user', 'type', 'active', 'grainBalance', 'grainPaid', 'totalCred', 'credOverTime', 'credShare']
return self.cache['df_rank']
def get_grain_distribution(self) -> pd.DataFrame:
"""
Returns the history of grain distribution
"""
if self.cache['df_grain'] is None:
grain_history = [acc for acc in self.accounts_data['accounts'] if 'allocationHistory' in acc['account']]
if len(grain_history) > 0:
grain_distribution = [{'credTimestampMs': record['credTimestampMs'], 'amount': int(record['grainReceipt']['amount']) / 1e18} \
for acc in grain_history for record in acc['account']['allocationHistory']]
self.cache['df_grain'] = pd.json_normalize(grain_distribution)
self.cache['df_grain']['credTimestampMs'] = pd.to_datetime(self.cache['df_grain']['credTimestampMs'], unit='ms')
else:
# zeros
self.cache['df_grain'] = pd.DataFrame([self.get_dt_intervals(), [0.] * len(self.intervals)]).T
self.cache['df_grain'].columns = ['credTimestampMs', 'amount']
return self.cache['df_grain']
def get_cred_over_time(self) -> pd.DataFrame:
"""
Returns distributed cred summary over all intervals
"""
if self.cache['df_cred_ot'] is None:
if self.cache['df'] is None:
self.to_df()
self.cache['df_cred_ot'] = pd.DataFrame([self.get_dt_intervals(),
pd.DataFrame(self.cache['df'].credOverTime.to_list()).sum()
]).T
self.cache['df_cred_ot'].columns = ['credTimestampMs', 'amount']
self.cache['df_cred_ot'].set_index('credTimestampMs', drop=True, inplace=True)
return self.cache['df_cred_ot']
def to_df(self) -> pd.DataFrame:
"""
Retuns all nodes data as a DataFrame
"""
if self.cache['df'] is None:
self.cache['df'] = pd.json_normalize(self.nodes)
self.cache['df'].timestamp = pd.to_datetime(self.cache['df'].timestamp, unit='ms')
# distributedCred = self.df.totalCred.sum()
# self.df['credShare'] = self.df.totalCred / distributedCred
return self.cache['df']
def get_cred_flow_from_graph(self) -> Tuple[pd.DataFrame, pd.DataFrame]:
"""
Gets cred flow through nodes & edges in the cred graph.
"""
if self.cache['df_cred_eflow'] is None:
def set_plugin(label):
for prefix, plugin in plugin_prefixes.items():
if label.startswith(prefix):
return plugin
return 'Not Found'
# PREPROCESSING
plugin_meta = dict()
edges = []
nodes = []
# edges_weights = dict()
# nodes_weights = dict()
for plugin in self.cred_json_data[1]['plugins'][1]:
plugin_meta[plugin['name']] = {
'nodePrefix': plugin['nodePrefix'],
'edgePrefix': plugin['edgePrefix'],
'edgeTypes': [{'prefix': et['prefix'], 'weight': et['defaultWeight']} for et in plugin['edgeTypes']],
'nodeTypes': [{'prefix': nt['prefix'], 'weight': nt['defaultWeight']} for nt in plugin['nodeTypes']],
}
edges.extend([et['prefix'] for et in plugin_meta[plugin['name']]['edgeTypes']])
# for et in plugin_meta[plugin['name']]['edgeTypes']:
# edges_weights[et['prefix']] = et['weight']
nodes.extend([nt['prefix'] for nt in plugin_meta[plugin['name']]['nodeTypes']])
# for nt in plugin_meta[plugin['name']]['nodeTypes']:
# nodes_weights[nt['prefix']] = nt['weight']
plugin_prefixes = {plugin_meta[p_name]['nodePrefix'].replace('\x00', ''): p_name for p_name in plugin_meta}
plugin_prefixes.update({plugin_meta[p_name]['edgePrefix'].replace('\x00', ''): p_name for p_name in plugin_meta})
# EDGES
df_ew = pd.DataFrame([self.weighted_graph['weightsJSON'][1]['edgeWeights'].keys(),
[v['backwards'] for v in self.weighted_graph['weightsJSON'][1]['edgeWeights'].values()],
[v['forwards'] for v in self.weighted_graph['weightsJSON'][1]['edgeWeights'].values()]
]).T
df_ew.columns = ['edge', 'backward', 'forward']
cred_edges = dict()
for e in edges:
cred_edges[e.replace('\x00', '')] = [
df_ew[df_ew.edge.str.startswith(e)].backward.sum(),
df_ew[df_ew.edge.str.startswith(e)].forward.sum()
]
self.cache['df_cred_eflow'] = pd.json_normalize(cred_edges).T
self.cache['df_cred_eflow']['backward'] = self.cache['df_cred_eflow'].iloc[:,0].apply(lambda x: x[0])
self.cache['df_cred_eflow']['forward'] = self.cache['df_cred_eflow'].iloc[:,0].apply(lambda x: x[1])
self.cache['df_cred_eflow']['plugin'] = self.cache['df_cred_eflow'].index.map(set_plugin)
self.cache['df_cred_eflow'].drop(columns=[0], inplace=True)
# NODES
df_nw = pd.DataFrame([self.weighted_graph['weightsJSON'][1]['nodeWeights'].keys(),
self.weighted_graph['weightsJSON'][1]['nodeWeights'].values()
]).T
df_nw.columns = ['node', 'weight']
cred_nodes = dict()
for n in nodes:
cred_nodes[n.replace('\x00', '')] = df_nw[df_nw.node.str.startswith(n)].weight.sum()
self.cache['df_cred_nflow'] = pd.json_normalize(cred_nodes).T
self.cache['df_cred_nflow'].columns = ['weight']
self.cache['df_cred_nflow']['plugin'] = self.cache['df_cred_nflow'].index.map(set_plugin)
return (self.cache['df_cred_nflow'], self.cache['df_cred_eflow'])
def __repr__(self) -> str:
return "<{} - ({} nodes & {} distributed CRED)>".format(self.__class__.__name__, self.total_nodes, self.distributed_cred)
| 2.96875 | 3 |
utils/generate_tsne_data.py | cBioCenter/chell-viz-contact | 3 | 12792765 | <filename>utils/generate_tsne_data.py
from MulticoreTSNE import MulticoreTSNE as TSNE
import numpy as np
data = np.loadtxt('pca.csv', delimiter=',')
tsne = TSNE(n_jobs=4)
Y = tsne.fit_transform(data)
np.savetxt('tsne_matrix.csv', Y, delimiter=",")
| 2.390625 | 2 |
gQuant/plugins/cusignal_plugin/greenflow_cusignal_plugin/filtering/resample_poly.py | t-triobox/gQuant | 0 | 12792766 | <reponame>t-triobox/gQuant
from ast import literal_eval
from fractions import Fraction
import numpy as np
import cupy as cp
from cusignal.filtering.resample import resample_poly as curesamp
from scipy.signal import resample_poly as siresamp
from greenflow.dataframe_flow import (Node, PortsSpecSchema, ConfSchema)
from greenflow.dataframe_flow.template_node_mixin import TemplateNodeMixin
from ..windows import _WINS_CONFIG
__all__ = ['CusignalResamplePolyNode']
_RESAMPLEPOLY_DESC = '''Resample `signal` along the given axis using polyphase
filtering. The signal is upsampled by the factor `up`, a zero-phase low-pass
FIR filter is applied, and then it is downsampled by the factor `down`.
The resulting sample rate is ``up / down`` times the original sample
rate. Values beyond the boundary of the signal are assumed to be zero
during the filtering step. Returns resampled array and new sample rate.
'''
class CusignalResamplePolyNode(TemplateNodeMixin, Node):
def init(self):
TemplateNodeMixin.init(self)
inports = {
'signal': {PortsSpecSchema.port_type: [cp.ndarray, np.ndarray]},
'samplerate': {
PortsSpecSchema.port_type: [int, float, np.float32,
np.float64],
PortsSpecSchema.optional: True
},
'window': {
PortsSpecSchema.port_type: [cp.ndarray, np.ndarray],
PortsSpecSchema.optional: True
},
}
outports = {
'signal_out': {PortsSpecSchema.port_type: '${port:signal}'},
'samplerate_out': {
PortsSpecSchema.port_type: [int, float, np.float32,
np.float64],
PortsSpecSchema.optional: True
}
}
self.template_ports_setup(in_ports=inports, out_ports=outports)
meta_outports = {'signal_out': {}, 'samplerate_out': {}}
self.template_meta_setup(out_ports=meta_outports)
def conf_schema(self):
padtype_enum = ['constant', 'line', 'mean', 'median', 'maximum',
'minimum']
json = {
'title': 'Polyphase Filter Resample Node',
'type': 'object',
'description': _RESAMPLEPOLY_DESC,
'properties': {
'new_samplerate': {
'type': 'number',
'description': 'Desired sample rate. Specify this or the '
'up/down parameters. This is used when `samplerate` ' # noqa: E131,E501
'is passed in via ports, otherwise up/down is used. '
'If both are set then this takes precedence over '
'up/down.'
},
'up': {
'type': 'integer',
'description': 'The upsampling factor.'
},
'down': {
'type': 'integer',
'description': 'The downsampling factor.'
},
'axis': {
'type': 'integer',
'description': 'The axis of `x` that is resampled. '
'Default is 0.', # noqa: E131,E501
'default': 0,
'minimum': 0,
},
'window': {
'type': 'string',
'description': 'Desired window to use to design the '
'low-pass filter, or the FIR filter coefficients to ' # noqa: E131,E501
'employ. Window can be specified as a string, a '
'tuple, or a list. If a string choose one of '
'available windows. If a tuple refer to '
'`cusignal.windows.get_window`. The tuple format '
'specifies the first argument as the string name of '
'the window, and the next arguments the needed '
'parameters. If `window` is a list it is assumed to '
'be the FIR filter coefficients. Note that the FIR '
'filter is applied after the upsampling step, so it '
'should be designed to operate on a signal at a '
'sampling frequency higher than the original by a '
'factor of `up//gcd(up, down)`. If the port window '
'is connected it takes precedence. Default '
'("kaiser", 5.0)',
'default': '("kaiser", 5.0)'
},
'gpupath': {
'type': 'boolean',
'description': 'gpupath - Optional path for filter design.'
' gpupath == False may be desirable if filter sizes ' # noqa: E131,E501
'are small.',
'default': True
},
'use_cpu': {
'type': 'boolean',
'description': 'use_cpu - Use CPU for computation via '
'scipy::signal.resample_poly. Default is False and ' # noqa: E131,E501
'runs on GPU via cusignal.',
'default': False
},
'padtype': {
'type': 'string',
'description': 'Only used when `use_cpu` is set. Scipy '
'padtype parameter of `resample_poly`. This is not ' # noqa: E131,E501
'currently exposed in cusignal.',
'enum': padtype_enum,
'default': 'constant'
},
'cval': {
'type': 'number',
'description': 'Only used when `use_cpu` is set. Value '
'to use if `padtype="constant"`. Default is zero.' # noqa: E131,E501
}
}
}
return ConfSchema(json=json)
def process(self, inputs):
signal_in = inputs['signal']
samplerate = inputs.get('samplerate', None)
new_samplerate = self.conf.get('new_samplerate', None)
if new_samplerate and samplerate:
ud = Fraction(new_samplerate / samplerate).limit_denominator()
up = ud.numerator
down = ud.denominator
else:
up = self.conf['up']
down = self.conf['down']
if samplerate:
samplerate = inputs['samplerate']
new_samplerate = samplerate * up / down
else:
new_samplerate = up / down
axis = self.conf.get('axis', 0)
if 'window' in inputs:
window = input['window']
else:
window = self.conf.get('window', ("kaiser", 5.0))
if isinstance(window, str):
windows_enum = list(_WINS_CONFIG.keys())
# window could be a simple string or python code for tuple
if window not in windows_enum:
# window should be a string that is python code
# evaluated to a tuple.
try:
window = literal_eval(window)
except Exception:
raise RuntimeError('Uknown window: {}'.format(window))
gpupath = self.conf.get('gpupath', True)
use_cpu = self.conf.get('use_cpu', False)
if use_cpu:
padtype = self.conf.get('padtype', 'constant')
cval = self.conf.get('cval')
signal_out = siresamp(
signal_in, up, down, axis=axis, window=window,
padtype=padtype, cval=cval)
else:
signal_out = curesamp(
signal_in, up, down, axis=axis, window=window, gpupath=gpupath)
return {'signal_out': signal_out,
'samplerate_out': new_samplerate}
| 1.757813 | 2 |
sample/admin.py | urm8/django-translations | 100 | 12792767 | from django.contrib import admin
from translations.admin import TranslatableAdmin, TranslationInline
from .models import Timezone, Continent, Country, City
class TimezoneAdmin(TranslatableAdmin):
inlines = [TranslationInline]
class ContinentAdmin(TranslatableAdmin):
inlines = [TranslationInline]
class CountryAdmin(TranslatableAdmin):
inlines = [TranslationInline]
class CityAdmin(TranslatableAdmin):
inlines = [TranslationInline]
admin.site.register(Timezone, TimezoneAdmin)
admin.site.register(Continent, ContinentAdmin)
admin.site.register(Country, CountryAdmin)
admin.site.register(City, CityAdmin)
| 2.0625 | 2 |
lib/bin/captainsplexx/initfs_tools/make_initfs.py | elementofprgress/Frostbite3_Editor | 56 | 12792768 | <filename>lib/bin/captainsplexx/initfs_tools/make_initfs.py
from struct import unpack
initfs=open("initfs_Win32","rb")
magicB=initfs.read(4)
magic=unpack(">I",magicB)[0]
if magic in (0x00D1CE00,0x00D1CE01): #the file is XOR encrypted and has a signature
signature=initfs.read(292)
key=[ord(initfs.read(1))^123 for i in xrange(260)] #bytes 257 258 259 are not used; XOR the key with 123 right away
initfs.seek(-260, 1)
keyB=initfs.read(260)
initfs.close()
changedinitfs=open("initfs_Win32.txt","rb")
textdata=changedinitfs.read()
changedinitfs.close()
t=open("initfs_Win32_new","wb")
data="".join([chr(key[i%257]^ord(textdata[i])) for i in xrange(len(textdata))]) #go through the data applying one key byte on one data
t.write(magicB)
t.write(signature)
t.write(keyB)
t.write(data)
t.close()
| 2.578125 | 3 |
roles/aliasses/molecule/default/tests/test_default.py | PW999/home-assistant-ansible | 0 | 12792769 | <reponame>PW999/home-assistant-ansible<filename>roles/aliasses/molecule/default/tests/test_default.py
import os
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
def test_alias(host):
host.run_expect([0], 'sudo -u molecule /bin/bash -vilc ll')
| 1.46875 | 1 |
py_kiwoom/kiwoom_youtube.py | ijhan21/hackathon_kiwoom | 0 | 12792770 | <filename>py_kiwoom/kiwoom_youtube.py
import os
import sys
from PyQt5.QAxContainer import *
from PyQt5.QtCore import *
from config.errorCode import *
from PyQt5.QtTest import *
from config.kiwoomType import *
from config.log_class import *
# from config.slack import *
class Kiwoom(QAxWidget):
def __init__(self):
super().__init__()
self.realType = RealType()
self.logging = Logging()
# self.slack = Slack() #슬랙 동작
#print("kiwoom() class start. ")
self.logging.logger.debug("Kiwoom() class start.")
####### event loop를 실행하기 위한 변수모음
self.login_event_loop = QEventLoop() #로그인 요청용 이벤트루프
self.detail_account_info_event_loop = QEventLoop() # 예수금 요청용 이벤트루프
self.calculator_event_loop = QEventLoop()
#########################################
########### 전체 종목 관리
self.all_stock_dict = {}
###########################
####### 계좌 관련된 변수
self.account_stock_dict = {}
self.not_account_stock_dict = {}
self.deposit = 0 #예수금
self.use_money = 0 #실제 투자에 사용할 금액
self.use_money_percent = 0.5 #예수금에서 실제 사용할 비율
self.output_deposit = 0 #출력가능 금액
self.total_profit_loss_money = 0 #총평가손익금액
self.total_profit_loss_rate = 0.0 #총수익률(%)
########################################
######## 종목 정보 가져오기
self.portfolio_stock_dict = {}
self.jango_dict = {}
########################
########### 종목 분석 용
self.calcul_data = []
##########################################
####### 요청 스크린 번호
self.screen_my_info = "2000" #계좌 관련한 스크린 번호
self.screen_calculation_stock = "4000" #계산용 스크린 번호
self.screen_real_stock = "5000" #종목별 할당할 스크린 번호
self.screen_meme_stock = "6000" #종목별 할당할 주문용스크린 번호
self.screen_start_stop_real = "1000" #장 시작/종료 실시간 스크린번호
########################################
######### 초기 셋팅 함수들 바로 실행
self.get_ocx_instance() #OCX 방식을 파이썬에 사용할 수 있게 변환해 주는 함수
self.event_slots() # 키움과 연결하기 위한 시그널 / 슬롯 모음
self.real_event_slot() # 실시간 이벤트 시그널 / 슬롯 연결
self.signal_login_commConnect() #로그인 요청 시그널 포함
self.get_account_info() #계좌번호 가져오기
self.detail_account_info() #예수금 요청 시그널 포함
self.detail_account_mystock() #계좌평가잔고내역 요청 시그널 포함
QTimer.singleShot(5000, self.not_concluded_account) #5초 뒤에 미체결 종목들 가져오기 실행
#########################################
QTest.qWait(10000)
self.read_code()
self.screen_number_setting()
QTest.qWait(5000)
#실시간 수신 관련 함수
self.dynamicCall("SetRealReg(QString, QString, QString, QString)", self.screen_start_stop_real, '', self.realType.REALTYPE['장시작시간']['장운영구분'], "0")
for code in self.portfolio_stock_dict.keys():
screen_num = self.portfolio_stock_dict[code]['스크린번호']
fids = self.realType.REALTYPE['주식체결']['체결시간']
self.dynamicCall("SetRealReg(QString, QString, QString, QString)", screen_num, code, fids, "1")
self.slack.notification(
pretext="주식자동화 프로그램 동작",
title="주식 자동화 프로그램 동작",
fallback="주식 자동화 프로그램 동작",
text="주식 자동화 프로그램이 동작 되었습니다."
)
def get_ocx_instance(self):
self.setControl("KHOPENAPI.KHOpenAPICtrl.1") # 레지스트리에 저장된 api 모듈 불러오기
def event_slots(self):
self.OnEventConnect.connect(self.login_slot) # 로그인 관련 이벤트
self.OnReceiveTrData.connect(self.trdata_slot) # 트랜잭션 요청 관련 이벤트
self.OnReceiveMsg.connect(self.msg_slot)
def real_event_slot(self):
self.OnReceiveRealData.connect(self.realdata_slot) # 실시간 이벤트 연결
self.OnReceiveChejanData.connect(self.chejan_slot) #종목 주문체결 관련한 이벤트
def signal_login_commConnect(self):
self.dynamicCall("CommConnect()") # 로그인 요청 시그널
self.login_event_loop.exec_() # 이벤트루프 실행
def login_slot(self, err_code):
self.logging.logger.debug(errors(err_code)[1])
#로그인 처리가 완료됐으면 이벤트 루프를 종료한다.
self.login_event_loop.exit()
def get_account_info(self):
account_list = self.dynamicCall("GetLoginInfo(QString)", "ACCNO") # 계좌번호 반환
account_num = account_list.split(';')[0]
self.account_num = account_num
self.logging.logger.debug("계좌번호 : %s" % account_num)
def detail_account_info(self, sPrevNext="0"):
self.dynamicCall("SetInputValue(QString, QString)", "계좌번호", self.account_num)
self.dynamicCall("SetInputValue(QString, QString)", "비밀번호", "0000")
self.dynamicCall("SetInputValue(QString, QString)", "비밀번호입력매체구분", "00")
self.dynamicCall("SetInputValue(QString, QString)", "조회구분", "1")
self.dynamicCall("CommRqData(QString, QString, int, QString)", "예수금상세현황요청", "opw00001", sPrevNext, self.screen_my_info)
self.detail_account_info_event_loop.exec_()
def detail_account_mystock(self, sPrevNext="0"):
self.dynamicCall("SetInputValue(QString, QString)", "계좌번호", self.account_num)
self.dynamicCall("SetInputValue(QString, QString)", "비밀번호", "0000")
self.dynamicCall("SetInputValue(QString, QString)", "비밀번호입력매체구분", "00")
self.dynamicCall("SetInputValue(QString, QString)", "조회구분", "1")
self.dynamicCall("CommRqData(QString, QString, int, QString)", "계좌평가잔고내역요청", "opw00018", sPrevNext, self.screen_my_info)
self.detail_account_info_event_loop.exec_()
def not_concluded_account(self, sPrevNext="0"):
self.dynamicCall("SetInputValue(QString, QString)", "계좌번호", self.account_num)
self.dynamicCall("SetInputValue(QString, QString)", "체결구분", "1")
self.dynamicCall("SetInputValue(QString, QString)", "매매구분", "0")
self.dynamicCall("CommRqData(QString, QString, int, QString)", "실시간미체결요청", "opt10075", sPrevNext, self.screen_my_info)
self.detail_account_info_event_loop.exec_()
def trdata_slot(self, sScrNo, sRQName, sTrCode, sRecordName, sPrevNext):
if sRQName == "예수금상세현황요청":
deposit = self.dynamicCall("GetCommData(QString, QString, int, QString)", sTrCode, sRQName, 0, "예수금")
self.deposit = int(deposit)
use_money = float(self.deposit) * self.use_money_percent
self.use_money = int(use_money)
self.use_money = self.use_money / 4
output_deposit = self.dynamicCall("GetCommData(QString, QString, int, QString)", sTrCode, sRQName, 0, "출금가능금액")
self.output_deposit = int(output_deposit)
self.logging.logger.debug("예수금 : %s" % self.output_deposit)
self.stop_screen_cancel(self.screen_my_info)
self.detail_account_info_event_loop.exit()
elif sRQName == "계좌평가잔고내역요청":
total_buy_money = self.dynamicCall("GetCommData(QString, QString, int, QString)", sTrCode, sRQName, 0, "총매입금액")
self.total_buy_money = int(total_buy_money)
total_profit_loss_money = self.dynamicCall("GetCommData(QString, QString, int, QString)", sTrCode, sRQName, 0, "총평가손익금액")
self.total_profit_loss_money = int(total_profit_loss_money)
total_profit_loss_rate = self.dynamicCall("GetCommData(QString, QString, int, QString)", sTrCode, sRQName, 0, "총수익률(%)")
self.total_profit_loss_rate = float(total_profit_loss_rate)
self.logging.logger.debug("계좌평가잔고내역요청 싱글데이터 : %s - %s - %s" % (total_buy_money, total_profit_loss_money, total_profit_loss_rate))
rows = self.dynamicCall("GetRepeatCnt(QString, QString)", sTrCode, sRQName)
for i in range(rows):
code = self.dynamicCall("GetCommData(QString, QString, int, QString)", sTrCode, sRQName, i, "종목번호") # 출력 : A039423 // 알파벳 A는 장내주식, J는 ELW종목, Q는 ETN종목
code = code.strip()[1:]
code_nm = self.dynamicCall("GetCommData(QString, QString, int, QString)", sTrCode, sRQName, i, "종목명") # 출럭 : 한국기업평가
stock_quantity = self.dynamicCall("GetCommData(QString, QString, int, QString)", sTrCode, sRQName, i, "보유수량") # 보유수량 : 000000000000010
buy_price = self.dynamicCall("GetCommData(QString, QString, int, QString)", sTrCode, sRQName, i, "매입가") # 매입가 : 000000000054100
learn_rate = self.dynamicCall("GetCommData(QString, QString, int, QString)", sTrCode, sRQName, i, "수익률(%)") # 수익률 : -000000001.94
current_price = self.dynamicCall("GetCommData(QString, QString, int, QString)", sTrCode, sRQName, i, "현재가") # 현재가 : 000000003450
total_chegual_price = self.dynamicCall("GetCommData(QString, QString, int, QString)", sTrCode, sRQName, i, "매입금액")
possible_quantity = self.dynamicCall("GetCommData(QString, QString, int, QString)", sTrCode, sRQName, i, "매매가능수량")
self.logging.logger.debug("종목코드: %s - 종목명: %s - 보유수량: %s - 매입가:%s - 수익률: %s - 현재가: %s" % (
code, code_nm, stock_quantity, buy_price, learn_rate, current_price))
if code in self.account_stock_dict: # dictionary 에 해당 종목이 있나 확인
pass
else:
self.account_stock_dict[code] = {}
code_nm = code_nm.strip()
stock_quantity = int(stock_quantity.strip())
buy_price = int(buy_price.strip())
learn_rate = float(learn_rate.strip())
current_price = int(current_price.strip())
total_chegual_price = int(total_chegual_price.strip())
possible_quantity = int(possible_quantity.strip())
self.account_stock_dict[code].update({"종목명": code_nm})
self.account_stock_dict[code].update({"보유수량": stock_quantity})
self.account_stock_dict[code].update({"매입가": buy_price})
self.account_stock_dict[code].update({"수익률(%)": learn_rate})
self.account_stock_dict[code].update({"현재가": current_price})
self.account_stock_dict[code].update({"매입금액": total_chegual_price})
self.account_stock_dict[code].update({'매매가능수량' : possible_quantity})
self.logging.logger.debug("sPreNext : %s" % sPrevNext)
print("계좌에 가지고 있는 종목은 %s " % rows)
if sPrevNext == "2":
self.detail_account_mystock(sPrevNext="2")
else:
self.detail_account_info_event_loop.exit()
elif sRQName == "실시간미체결요청":
rows = self.dynamicCall("GetRepeatCnt(QString, QString)", sTrCode, sRQName)
for i in range(rows):
code = self.dynamicCall("GetCommData(QString, QString, int, QString)", sTrCode, sRQName, i, "종목코드")
code_nm = self.dynamicCall("GetCommData(QString, QString, int, QString)", sTrCode, sRQName, i, "종목명")
order_no = self.dynamicCall("GetCommData(QString, QString, int, QString)", sTrCode, sRQName, i, "주문번호")
order_status = self.dynamicCall("GetCommData(QString, QString, int, QString)", sTrCode, sRQName, i,
"주문상태") # 접수,확인,체결
order_quantity = self.dynamicCall("GetCommData(QString, QString, int, QString)", sTrCode, sRQName, i,
"주문수량")
order_price = self.dynamicCall("GetCommData(QString, QString, int, QString)", sTrCode, sRQName, i,
"주문가격")
order_gubun = self.dynamicCall("GetCommData(QString, QString, int, QString)", sTrCode, sRQName, i,
"주문구분") # -매도, +매수, -매도정정, +매수정정
not_quantity = self.dynamicCall("GetCommData(QString, QString, int, QString)", sTrCode, sRQName, i,
"미체결수량")
ok_quantity = self.dynamicCall("GetCommData(QString, QString, int, QString)", sTrCode, sRQName, i,
"체결량")
code = code.strip()
code_nm = code_nm.strip()
order_no = int(order_no.strip())
order_status = order_status.strip()
order_quantity = int(order_quantity.strip())
order_price = int(order_price.strip())
order_gubun = order_gubun.strip().lstrip('+').lstrip('-')
not_quantity = int(not_quantity.strip())
ok_quantity = int(ok_quantity.strip())
if order_no in self.not_account_stock_dict:
pass
else:
self.not_account_stock_dict[order_no] = {}
self.not_account_stock_dict[order_no].update({'종목코드': code})
self.not_account_stock_dict[order_no].update({'종목명': code_nm})
self.not_account_stock_dict[order_no].update({'주문번호': order_no})
self.not_account_stock_dict[order_no].update({'주문상태': order_status})
self.not_account_stock_dict[order_no].update({'주문수량': order_quantity})
self.not_account_stock_dict[order_no].update({'주문가격': order_price})
self.not_account_stock_dict[order_no].update({'주문구분': order_gubun})
self.not_account_stock_dict[order_no].update({'미체결수량': not_quantity})
self.not_account_stock_dict[order_no].update({'체결량': ok_quantity})
self.logging.logger.debug("미체결 종목 : %s " % self.not_account_stock_dict[order_no])
self.detail_account_info_event_loop.exit()
elif sRQName == "주식일봉차트조회":
code = self.dynamicCall("GetCommData(QString, QString, int, QString)", sTrCode, sRQName, 0, "종목코드")
code = code.strip()
# data = self.dynamicCall("GetCommDataEx(QString, QString)", sTrCode, sRQName)
# [[‘’, ‘현재가’, ‘거래량’, ‘거래대금’, ‘날짜’, ‘시가’, ‘고가’, ‘저가’. ‘’], [‘’, ‘현재가’, ’거래량’, ‘거래대금’, ‘날짜’, ‘시가’, ‘고가’, ‘저가’, ‘’]. […]]
cnt = self.dynamicCall("GetRepeatCnt(QString, QString)", sTrCode, sRQName)
self.logging.logger.debug("남은 일자 수 %s" % cnt)
for i in range(cnt):
data = []
current_price = self.dynamicCall("GetCommData(QString, QString, int, QString)", sTrCode, sRQName, i, "현재가") # 출력 : 000070
value = self.dynamicCall("GetCommData(QString, QString, int, QString)", sTrCode, sRQName, i, "거래량") # 출력 : 000070
trading_value = self.dynamicCall("GetCommData(QString, QString, int, QString)", sTrCode, sRQName, i, "거래대금") # 출력 : 000070
date = self.dynamicCall("GetCommData(QString, QString, int, QString)", sTrCode, sRQName, i, "일자") # 출력 : 000070
start_price = self.dynamicCall("GetCommData(QString, QString, int, QString)", sTrCode, sRQName, i, "시가") # 출력 : 000070
high_price = self.dynamicCall("GetCommData(QString, QString, int, QString)", sTrCode, sRQName, i, "고가") # 출력 : 000070
low_price = self.dynamicCall("GetCommData(QString, QString, int, QString)", sTrCode, sRQName, i, "저가") # 출력 : 000070
data.append("")
data.append(current_price.strip())
data.append(value.strip())
data.append(trading_value.strip())
data.append(date.strip())
data.append(start_price.strip())
data.append(high_price.strip())
data.append(low_price.strip())
data.append("")
self.calcul_data.append(data.copy())
if sPrevNext == "2":
self.day_kiwoom_db(code=code, sPrevNext=sPrevNext)
else:
self.logging.logger.debug("총 일수 %s" % len(self.calcul_data))
pass_success = False
# 120일 이평선을 그릴만큼의 데이터가 있는지 체크
if self.calcul_data == None or len(self.calcul_data) < 120:
pass_success = False
else:
# 120일 이평선의 최근 가격 구함
total_price = 0
for value in self.calcul_data[:120]:
total_price += int(value[1])
moving_average_price = total_price / 120
# 오늘자 주가가 120일 이평선에 걸쳐있는지 확인
bottom_stock_price = False
check_price = None
if int(self.calcul_data[0][7]) <= moving_average_price and moving_average_price <= int(self.calcul_data[0][6]):
self.logging.logger.debug("오늘 주가 120이평선 아래에 걸쳐있는 것 확인")
bottom_stock_price = True
check_price = int(self.calcul_data[0][6])
# 과거 일봉 데이터를 조회하면서 120일 이평선보다 주가가 계속 밑에 존재하는지 확인
prev_price = None
if bottom_stock_price == True:
moving_average_price_prev = 0
price_top_moving = False
idx = 1
while True:
if len(self.calcul_data[idx:]) < 120: # 120일치가 있는지 계속 확인
self.logging.logger.debug("120일치가 없음")
break
total_price = 0
for value in self.calcul_data[idx:120+idx]:
total_price += int(value[1])
moving_average_price_prev = total_price / 120
if moving_average_price_prev <= int(self.calcul_data[idx][6]) and idx <= 20:
self.logging.logger.debug("20일 동안 주가가 120일 이평선과 같거나 위에 있으면 조건 통과 못함")
price_top_moving = False
break
elif int(self.calcul_data[idx][7]) > moving_average_price_prev and idx > 20: # 120일 이평선 위에 있는 구간 존재
self.logging.logger.debug("120일치 이평선 위에 있는 구간 확인됨")
price_top_moving = True
prev_price = int(self.calcul_data[idx][7])
break
idx += 1
# 해당부분 이평선이 가장 최근의 이평선 가격보다 낮은지 확인
if price_top_moving == True:
if moving_average_price > moving_average_price_prev and check_price > prev_price:
self.logging.logger.debug("포착된 이평선의 가격이 오늘자 이평선 가격보다 낮은 것 확인")
self.logging.logger.debug("포착된 부분의 저가가 오늘자 주가의 고가보다 낮은지 확인")
pass_success = True
if pass_success == True:
self.logging.logger.debug("조건부 통과됨")
code_nm = self.dynamicCall("GetMasterCodeName(QString)", code)
f = open("files/condition_stock.txt", "a", encoding="utf8")
f.write("%s\t%s\t%s\n" % (code, code_nm, str(self.calcul_data[0][1])))
f.close()
elif pass_success == False:
self.logging.logger.debug("조건부 통과 못함")
self.calcul_data.clear()
self.calculator_event_loop.exit()
def stop_screen_cancel(self, sScrNo=None):
self.dynamicCall("DisconnectRealData(QString)", sScrNo) # 스크린번호 연결 끊기
def get_code_list_by_market(self, market_code):
'''
종목코드 리스트 받기
#0:장내, 10:코스닥
:param market_code: 시장코드 입력
:return:
'''
code_list = self.dynamicCall("GetCodeListByMarket(QString)", market_code)
code_list = code_list.split(';')[:-1]
return code_list
def calculator_fnc(self):
'''
종목 분석관련 함수 모음
:return:
'''
code_list = self.get_code_list_by_market("10")
self.logging.logger.debug("코스닥 갯수 %s " % len(code_list))
for idx, code in enumerate(code_list):
self.dynamicCall("DisconnectRealData(QString)", self.screen_calculation_stock) # 스크린 연결 끊기
self.logging.logger.debug("%s / %s : KOSDAQ Stock Code : %s is updating... " % (idx + 1, len(code_list), code))
self.day_kiwoom_db(code=code)
def day_kiwoom_db(self, code=None, date=None, sPrevNext="0"):
QTest.qWait(3600) #3.6초마다 딜레이를 준다.
self.dynamicCall("SetInputValue(QString, QString)", "종목코드", code)
self.dynamicCall("SetInputValue(QString, QString)", "수정주가구분", "1")
if date != None:
self.dynamicCall("SetInputValue(QString, QString)", "기준일자", date)
self.dynamicCall("CommRqData(QString, QString, int, QString)", "주식일봉차트조회", "opt10081", sPrevNext, self.screen_calculation_stock) # Tr서버로 전송 -Transaction
self.calculator_event_loop.exec_()
def read_code(self):
if os.path.exists("files/condition_stock.txt"): # 해당 경로에 파일이 있는지 체크한다.
f = open("files/condition_stock.txt", "r", encoding="utf8") # "r"을 인자로 던져주면 파일 내용을 읽어 오겠다는 뜻이다.
lines = f.readlines() #파일에 있는 내용들이 모두 읽어와 진다.
for line in lines: #줄바꿈된 내용들이 한줄 씩 읽어와진다.
if line != "":
ls = line.split("\t")
stock_code = ls[0]
stock_name = ls[1]
stock_price = int(ls[2].split("\n")[0])
stock_price = abs(stock_price)
self.portfolio_stock_dict.update({stock_code:{"종목명":stock_name, "현재가":stock_price}})
f.close()
def merge_dict(self):
self.all_stock_dict.update({"계좌평가잔고내역": self.account_stock_dict})
self.all_stock_dict.update({'미체결종목': self.not_account_stock_dict})
self.all_stock_dict.update({'포트폴리오종목': self.portfolio_stock_dict})
def screen_number_setting(self):
screen_overwrite = []
#계좌평가잔고내역에 있는 종목들
for code in self.account_stock_dict.keys():
if code not in screen_overwrite:
screen_overwrite.append(code)
#미체결에 있는 종목들
for order_number in self.not_account_stock_dict.keys():
code = self.not_account_stock_dict[order_number]['종목코드']
if code not in screen_overwrite:
screen_overwrite.append(code)
#포트폴리로에 담겨있는 종목들
for code in self.portfolio_stock_dict.keys():
if code not in screen_overwrite:
screen_overwrite.append(code)
# 스크린번호 할당
cnt = 0
for code in screen_overwrite:
temp_screen = int(self.screen_real_stock)
meme_screen = int(self.screen_meme_stock)
if (cnt % 50) == 0:
temp_screen += 1
self.screen_real_stock = str(temp_screen)
if (cnt % 50) == 0:
meme_screen += 1
self.screen_meme_stock = str(meme_screen)
if code in self.portfolio_stock_dict.keys():
self.portfolio_stock_dict[code].update({"스크린번호": str(self.screen_real_stock)})
self.portfolio_stock_dict[code].update({"주문용스크린번호": str(self.screen_meme_stock)})
elif code not in self.portfolio_stock_dict.keys():
self.portfolio_stock_dict.update({code: {"스크린번호": str(self.screen_real_stock), "주문용스크린번호": str(self.screen_meme_stock)}})
cnt += 1
# 실시간 데이터 얻어오기
def realdata_slot(self, sCode, sRealType, sRealData):
if sRealType == "장시작시간":
fid = self.realType.REALTYPE[sRealType]['장운영구분'] # (0:장시작전, 2:장종료전(20분), 3:장시작, 4,8:장종료(30분), 9:장마감)
value = self.dynamicCall("GetCommRealData(QString, int)", sCode, fid)
if value == '0':
self.logging.logger.debug("장 시작 전")
elif value == '3':
self.logging.logger.debug("장 시작")
elif value == "2":
self.logging.logger.debug("장 종료, 동시호가로 넘어감")
elif value == "4":
self.logging.logger.debug("3시30분 장 종료")
for code in self.portfolio_stock_dict.keys():
self.dynamicCall("SetRealRemove(QString, QString)", self.portfolio_stock_dict[code]['스크린번호'], code)
QTest.qWait(5000)
self.file_delete()
self.calculator_fnc()
sys.exit()
elif sRealType == "주식체결":
a = self.dynamicCall("GetCommRealData(QString, int)", sCode, self.realType.REALTYPE[sRealType]['체결시간']) # 출력 HHMMSS
b = self.dynamicCall("GetCommRealData(QString, int)", sCode, self.realType.REALTYPE[sRealType]['현재가']) # 출력 : +(-)2520
b = abs(int(b))
c = self.dynamicCall("GetCommRealData(QString, int)", sCode, self.realType.REALTYPE[sRealType]['전일대비']) # 출력 : +(-)2520
c = abs(int(c))
d = self.dynamicCall("GetCommRealData(QString, int)", sCode, self.realType.REALTYPE[sRealType]['등락율']) # 출력 : +(-)12.98
d = float(d)
e = self.dynamicCall("GetCommRealData(QString, int)", sCode, self.realType.REALTYPE[sRealType]['(최우선)매도호가']) # 출력 : +(-)2520
e = abs(int(e))
f = self.dynamicCall("GetCommRealData(QString, int)", sCode, self.realType.REALTYPE[sRealType]['(최우선)매수호가']) # 출력 : +(-)2515
f = abs(int(f))
g = self.dynamicCall("GetCommRealData(QString, int)", sCode, self.realType.REALTYPE[sRealType]['거래량']) # 출력 : +240124 매수일때, -2034 매도일 때
g = abs(int(g))
h = self.dynamicCall("GetCommRealData(QString, int)", sCode, self.realType.REALTYPE[sRealType]['누적거래량']) # 출력 : 240124
h = abs(int(h))
i = self.dynamicCall("GetCommRealData(QString, int)", sCode, self.realType.REALTYPE[sRealType]['고가']) # 출력 : +(-)2530
i = abs(int(i))
j = self.dynamicCall("GetCommRealData(QString, int)", sCode, self.realType.REALTYPE[sRealType]['시가']) # 출력 : +(-)2530
j = abs(int(j))
k = self.dynamicCall("GetCommRealData(QString, int)", sCode, self.realType.REALTYPE[sRealType]['저가']) # 출력 : +(-)2530
k = abs(int(k))
if sCode not in self.portfolio_stock_dict:
self.portfolio_stock_dict.update({sCode:{}})
self.portfolio_stock_dict[sCode].update({"체결시간": a})
self.portfolio_stock_dict[sCode].update({"현재가": b})
self.portfolio_stock_dict[sCode].update({"전일대비": c})
self.portfolio_stock_dict[sCode].update({"등락율": d})
self.portfolio_stock_dict[sCode].update({"(최우선)매도호가": e})
self.portfolio_stock_dict[sCode].update({"(최우선)매수호가": f})
self.portfolio_stock_dict[sCode].update({"거래량": g})
self.portfolio_stock_dict[sCode].update({"누적거래량": h})
self.portfolio_stock_dict[sCode].update({"고가": i})
self.portfolio_stock_dict[sCode].update({"시가": j})
self.portfolio_stock_dict[sCode].update({"저가": k})
if sCode in self.account_stock_dict.keys() and sCode not in self.jango_dict.keys():
asd = self.account_stock_dict[sCode]
meme_rate = (b - asd['매입가']) / asd['매입가'] * 100
if asd['매매가능수량'] > 0 and (meme_rate > 5 or meme_rate < -5):
order_success = self.dynamicCall(
"SendOrder(QString, QString, QString, int, QString, int, int, QString, QString)",
["신규매도", self.portfolio_stock_dict[sCode]["주문용스크린번호"], self.account_num, 2, sCode, asd['매매가능수량'], 0, self.realType.SENDTYPE['거래구분']['시장가'], ""]
)
if order_success == 0:
self.logging.logger.debug("매도주문 전달 성공")
del self.account_stock_dict[sCode]
else:
self.logging.logger.debug("매도주문 전달 실패")
elif sCode in self.jango_dict.keys():
jd = self.jango_dict[sCode]
meme_rate = (b - jd['매입단가']) / jd['매입단가'] * 100
if jd['주문가능수량'] > 0 and (meme_rate > 5 or meme_rate < -5):
order_success = self.dynamicCall(
"SendOrder(QString, QString, QString, int, QString, int, int, QString, QString)",
["신규매도", self.portfolio_stock_dict[sCode]["주문용스크린번호"], self.account_num, 2, sCode, jd['주문가능수량'], 0, self.realType.SENDTYPE['거래구분']['시장가'], ""]
)
if order_success == 0:
self.logging.logger.debug("매도주문 전달 성공")
else:
self.logging.logger.debug("매도주문 전달 실패")
elif d > 2.0 and sCode not in self.jango_dict:
self.logging.logger.debug("매수조건 통과 %s " % sCode)
result = (self.use_money * 0.1) / e
quantity = int(result)
order_success = self.dynamicCall(
"SendOrder(QString, QString, QString, int, QString, int, int, QString, QString)",
["신규매수", self.portfolio_stock_dict[sCode]["주문용스크린번호"], self.account_num, 1, sCode, quantity, e, self.realType.SENDTYPE['거래구분']['지정가'], ""]
)
if order_success == 0:
self.logging.logger.debug("매수주문 전달 성공")
else:
self.logging.logger.debug("매수주문 전달 실패")
not_meme_list = list(self.not_account_stock_dict)
for order_num in not_meme_list:
code = self.not_account_stock_dict[order_num]["종목코드"]
meme_price = self.not_account_stock_dict[order_num]['주문가격']
not_quantity = self.not_account_stock_dict[order_num]['미체결수량']
order_gubun = self.not_account_stock_dict[order_num]['주문구분']
if order_gubun == "매수" and not_quantity > 0 and e > meme_price:
order_success = self.dynamicCall(
"SendOrder(QString, QString, QString, int, QString, int, int, QString, QString)",
["매수취소", self.portfolio_stock_dict[sCode]["주문용스크린번호"], self.account_num, 3, code, 0, 0, self.realType.SENDTYPE['거래구분']['지정가'], order_num]
)
if order_success == 0:
self.logging.logger.debug("매수취소 전달 성공")
else:
self.logging.logger.debug("매수취소 전달 실패")
elif not_quantity == 0:
del self.not_account_stock_dict[order_num]
# 실시간 체결 정보
def chejan_slot(self, sGubun, nItemCnt, sFidList):
if int(sGubun) == 0: #주문체결
account_num = self.dynamicCall("GetChejanData(int)", self.realType.REALTYPE['주문체결']['계좌번호'])
sCode = self.dynamicCall("GetChejanData(int)", self.realType.REALTYPE['주문체결']['종목코드'])[1:]
stock_name = self.dynamicCall("GetChejanData(int)", self.realType.REALTYPE['주문체결']['종목명'])
stock_name = stock_name.strip()
origin_order_number = self.dynamicCall("GetChejanData(int)", self.realType.REALTYPE['주문체결']['원주문번호']) # 출력 : defaluse : "000000"
order_number = self.dynamicCall("GetChejanData(int)", self.realType.REALTYPE['주문체결']['주문번호']) # 출럭: 0115061 마지막 주문번호
order_status = self.dynamicCall("GetChejanData(int)", self.realType.REALTYPE['주문체결']['주문상태']) # 출력: 접수, 확인, 체결
order_quan = self.dynamicCall("GetChejanData(int)", self.realType.REALTYPE['주문체결']['주문수량']) # 출력 : 3
order_quan = int(order_quan)
order_price = self.dynamicCall("GetChejanData(int)", self.realType.REALTYPE['주문체결']['주문가격']) # 출력: 21000
order_price = int(order_price)
not_chegual_quan = self.dynamicCall("GetChejanData(int)", self.realType.REALTYPE['주문체결']['미체결수량']) # 출력: 15, default: 0
not_chegual_quan = int(not_chegual_quan)
order_gubun = self.dynamicCall("GetChejanData(int)", self.realType.REALTYPE['주문체결']['주문구분']) # 출력: -매도, +매수
order_gubun = order_gubun.strip().lstrip('+').lstrip('-')
chegual_time_str = self.dynamicCall("GetChejanData(int)", self.realType.REALTYPE['주문체결']['주문/체결시간']) # 출력: '151028'
chegual_price = self.dynamicCall("GetChejanData(int)", self.realType.REALTYPE['주문체결']['체결가']) # 출력: 2110 default : ''
if chegual_price == '':
chegual_price = 0
else:
chegual_price = int(chegual_price)
chegual_quantity = self.dynamicCall("GetChejanData(int)", self.realType.REALTYPE['주문체결']['체결량']) # 출력: 5 default : ''
if chegual_quantity == '':
chegual_quantity = 0
else:
chegual_quantity = int(chegual_quantity)
current_price = self.dynamicCall("GetChejanData(int)", self.realType.REALTYPE['주문체결']['현재가']) # 출력: -6000
current_price = abs(int(current_price))
first_sell_price = self.dynamicCall("GetChejanData(int)", self.realType.REALTYPE['주문체결']['(최우선)매도호가']) # 출력: -6010
first_sell_price = abs(int(first_sell_price))
first_buy_price = self.dynamicCall("GetChejanData(int)", self.realType.REALTYPE['주문체결']['(최우선)매수호가']) # 출력: -6000
first_buy_price = abs(int(first_buy_price))
######## 새로 들어온 주문이면 주문번호 할당
if order_number not in self.not_account_stock_dict.keys():
self.not_account_stock_dict.update({order_number: {}})
self.not_account_stock_dict[order_number].update({"종목코드": sCode})
self.not_account_stock_dict[order_number].update({"주문번호": order_number})
self.not_account_stock_dict[order_number].update({"종목명": stock_name})
self.not_account_stock_dict[order_number].update({"주문상태": order_status})
self.not_account_stock_dict[order_number].update({"주문수량": order_quan})
self.not_account_stock_dict[order_number].update({"주문가격": order_price})
self.not_account_stock_dict[order_number].update({"미체결수량": not_chegual_quan})
self.not_account_stock_dict[order_number].update({"원주문번호": origin_order_number})
self.not_account_stock_dict[order_number].update({"주문구분": order_gubun})
self.not_account_stock_dict[order_number].update({"주문/체결시간": chegual_time_str})
self.not_account_stock_dict[order_number].update({"체결가": chegual_price})
self.not_account_stock_dict[order_number].update({"체결량": chegual_quantity})
self.not_account_stock_dict[order_number].update({"현재가": current_price})
self.not_account_stock_dict[order_number].update({"(최우선)매도호가": first_sell_price})
self.not_account_stock_dict[order_number].update({"(최우선)매수호가": first_buy_price})
elif int(sGubun) == 1: #잔고
account_num = self.dynamicCall("GetChejanData(int)", self.realType.REALTYPE['잔고']['계좌번호'])
sCode = self.dynamicCall("GetChejanData(int)", self.realType.REALTYPE['잔고']['종목코드'])[1:]
stock_name = self.dynamicCall("GetChejanData(int)", self.realType.REALTYPE['잔고']['종목명'])
stock_name = stock_name.strip()
current_price = self.dynamicCall("GetChejanData(int)", self.realType.REALTYPE['잔고']['현재가'])
current_price = abs(int(current_price))
stock_quan = self.dynamicCall("GetChejanData(int)", self.realType.REALTYPE['잔고']['보유수량'])
stock_quan = int(stock_quan)
like_quan = self.dynamicCall("GetChejanData(int)", self.realType.REALTYPE['잔고']['주문가능수량'])
like_quan = int(like_quan)
buy_price = self.dynamicCall("GetChejanData(int)", self.realType.REALTYPE['잔고']['매입단가'])
buy_price = abs(int(buy_price))
total_buy_price = self.dynamicCall("GetChejanData(int)", self.realType.REALTYPE['잔고']['총매입가']) # 계좌에 있는 종목의 총매입가
total_buy_price = int(total_buy_price)
meme_gubun = self.dynamicCall("GetChejanData(int)", self.realType.REALTYPE['잔고']['매도매수구분'])
meme_gubun = self.realType.REALTYPE['매도수구분'][meme_gubun]
first_sell_price = self.dynamicCall("GetChejanData(int)", self.realType.REALTYPE['잔고']['(최우선)매도호가'])
first_sell_price = abs(int(first_sell_price))
first_buy_price = self.dynamicCall("GetChejanData(int)", self.realType.REALTYPE['잔고']['(최우선)매수호가'])
first_buy_price = abs(int(first_buy_price))
if sCode not in self.jango_dict.keys():
self.jango_dict.update({sCode:{}})
self.jango_dict[sCode].update({"현재가": current_price})
self.jango_dict[sCode].update({"종목코드": sCode})
self.jango_dict[sCode].update({"종목명": stock_name})
self.jango_dict[sCode].update({"보유수량": stock_quan})
self.jango_dict[sCode].update({"주문가능수량": like_quan})
self.jango_dict[sCode].update({"매입단가": buy_price})
self.jango_dict[sCode].update({"총매입가": total_buy_price})
self.jango_dict[sCode].update({"매도매수구분": meme_gubun})
self.jango_dict[sCode].update({"(최우선)매도호가": first_sell_price})
self.jango_dict[sCode].update({"(최우선)매수호가": first_buy_price})
if stock_quan == 0:
del self.jango_dict[sCode]
#송수신 메세지 get
def msg_slot(self, sScrNo, sRQName, sTrCode, msg):
self.logging.logger.debug("스크린: %s, 요청이름: %s, tr코드: %s --- %s" %(sScrNo, sRQName, sTrCode, msg))
#파일 삭제
def file_delete(self):
if os.path.isfile("files/condition_stock.txt"):
os.remove("files/condition_stock.txt") | 2.421875 | 2 |
src/kong/json_field.py | paulgessinger/kong | 3 | 12792771 | <reponame>paulgessinger/kong
"""
Polyfill for a JSON field in SQLite.
Newer versions of sqlite (>= 3.9.0) have a native JSON extension, which we use.
If the sqlite version is lower, we roll a less-optimal replacement.
"""
from typing import Dict, cast
import peewee as pw
from peewee import sqlite3
import json
if sqlite3.sqlite_version_info < (3, 9, 0): # type: ignore
class JSONField(pw.CharField): # pragma: no cover
"""
Polyfill class to provide a JSON field
"""
def db_value(self, value: Dict) -> str:
"""
Convert a value to a string for storage in a `CharField`
:param value: The value to store
:return: The JSON string
"""
return json.dumps(value)
def python_value(self, value: str) -> Dict:
"""
Convert a string value from the database back to what it was.
:param value: The string value
:return: Parsed JSON value
"""
return cast(Dict, json.loads(value))
else: # pragma: no cover
# flake8: noqa
from playhouse.sqlite_ext import JSONField # type: ignore
| 2.984375 | 3 |
idataapi_transform/DataProcess/DataWriter/MySQLWriter.py | markqiu/idataapi-transform | 41 | 12792772 | <reponame>markqiu/idataapi-transform
import json
import asyncio
import random
import logging
import traceback
from .BaseWriter import BaseWriter
class MySQLWriter(BaseWriter):
def __init__(self, config):
super().__init__()
self.config = config
self.total_miss_count = 0
self.success_count = 0
self.table_checked = False
self.key_fields = list()
self.auto_increment_keys = set()
async def write(self, responses):
await self.config.get_mysql_pool_cli() # init mysql pool
miss_count = 0
original_length = len(responses)
if self.config.filter:
target_responses = list()
for i in responses:
i = self.config.filter(i)
if i:
target_responses.append(i)
else:
miss_count += 1
responses = target_responses
if not responses:
self.finish_once(miss_count, original_length)
return
# After filtered, still have responses to write
if not self.table_checked:
await self.table_check(responses)
if await self.perform_write(responses):
self.finish_once(miss_count, original_length)
def __exit__(self, exc_type, exc_val, exc_tb):
self.config.free_resource()
logging.info("%s write done, total filtered %d item, total write %d item" %
(self.config.name, self.total_miss_count, self.success_count))
def __enter__(self):
return self
def finish_once(self, miss_count, original_length):
self.total_miss_count += miss_count
self.success_count += original_length
logging.info("%s write %d item, filtered %d item" % (self.config.name, original_length - miss_count, miss_count))
async def table_check(self, responses):
await self.config.cursor.execute("SHOW TABLES LIKE '%s'" % (self.config.table, ))
result = await self.config.cursor.fetchone()
if result is None:
await self.create_table(responses)
# check field
await self.config.cursor.execute("DESC %s" % (self.config.table, ))
results = await self.config.cursor.fetchall()
for field in results:
if "auto_increment" in field:
self.auto_increment_keys.add(field[0])
fields = set(i[0] for i in results)
self.key_fields = list(i[0] for i in results)
real_keys = set(responses[0].keys())
difference_set = real_keys.difference(fields)
if difference_set:
# real keys not subset of fields
raise ValueError("Field %s not in MySQL Table: %s" % (str(difference_set), self.config.table))
self.table_checked = True
async def create_table(self, responses):
test_response = dict()
for response in responses[:50]:
for k, v in response.items():
if k not in test_response:
test_response[k] = v
elif test_response[k] is None:
test_response[k] = v
elif isinstance(v, dict) or isinstance(v, list):
if len(json.dumps(test_response[k])) < len(json.dumps(v)):
test_response[k] = v
elif v is not None and test_response[k] < v:
test_response[k] = v
sql = """
CREATE TABLE `%s` (
""" % (self.config.table, )
first_field = True
for key, value in responses[0].items():
if "Count" in key:
field_type = "BIGINT"
elif value is None:
field_type = "TEXT"
elif key in ("content", ) or isinstance(value, dict) or isinstance(value, list):
field_type = "TEXT"
elif isinstance(value, bool):
field_type = "BOOLEAN"
elif isinstance(value, int):
field_type = "BIGINT"
elif isinstance(value, float):
field_type = "DOUBLE"
# varchar can store at most 65536 bytes, utf8 occupy 1-8 bytes per character,
# so length should be less than 65536 / 8 = 8192
# assume this field (the shortest length) * 4 <= the longest length(8192)
elif len(value) > 2048:
field_type = "TEXT"
else:
length = len(value) * 4
if length < 256:
length = 256
field_type = "VARCHAR(%d)" % (length, )
sql += ("\t" if first_field else "\t\t") + "`%s` %s" % (key, field_type)
if key == "id":
sql += " NOT NULL,\n"
else:
sql += ",\n"
if first_field:
first_field = False
tail_sql = """
\tPRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=%s
""" % (self.config.charset, )
sql += tail_sql
logging.info("Creating table: %s\n%s", self.config.table, sql)
await self.config.cursor.execute(sql)
await self.config.connection.commit()
logging.info("table created")
async def perform_write(self, responses):
sql = "REPLACE INTO %s VALUES " % (self.config.table, )
normal_sql = False
sql_without_auto_increment_keys = list()
for each in responses:
need_specific_sql = False
keys = list()
curr_sql = '('
for field in self.key_fields:
if field in self.auto_increment_keys and field not in each:
need_specific_sql = True
continue
val = each[field]
keys.append(field)
if isinstance(val, dict) or isinstance(val, list):
val = json.dumps(val)
if val is None:
curr_sql += 'NULL,'
else:
curr_sql += repr(val) + ","
curr_sql = curr_sql[:-1] + '),\n'
if need_specific_sql:
sql_keys = "("
for each_sql_key in keys:
sql_keys += each_sql_key + ","
sql_keys = sql_keys[:-1] + ")"
sql_without_auto_increment_keys.append("REPLACE INTO %s%s VALUES " % (self.config.table, sql_keys) + curr_sql[:-2])
else:
normal_sql = True
sql += curr_sql
sql = sql[:-2]
try_time = 0
while try_time < self.config.max_retry:
try:
ret_sql = ""
if normal_sql:
ret_sql += sql + ";\n"
if sql_without_auto_increment_keys:
ret_sql += ";\n".join(sql_without_auto_increment_keys)
ret_sql += ";"
await self.config.cursor.execute(ret_sql)
await self.config.cursor.connection.commit()
return True
except Exception as e:
try_time += 1
if try_time < self.config.max_retry:
logging.error("retry: %d, %s" % (try_time, str(e)))
await asyncio.sleep(random.uniform(self.config.random_min_sleep, self.config.random_max_sleep))
else:
logging.error("Give up MySQL writer: %s, After retry: %d times, still fail to write, "
"total write %d items, total filtered: %d items, reason: %s" %
(self.config.name, self.config.max_retry, self.success_count, self.total_miss_count,
str(traceback.format_exc())))
return False
| 2.3125 | 2 |
src/main.py | prakhar154/Cassava-Leaf-Disease-Classification | 0 | 12792773 | import pandas as pd
import numpy as np
from sklearn.metrics import accuracy_score
import torch
from torch.utils.data import DataLoader
import torch.optim as optim
from model import CassavaModel
from loss import DenseCrossEntropy
import dataset
from config import *
def train_one_fold(fold, model, optimizer):
df = pd.read_csv('./input/train_ohe.csv')
train_df = df[df.kfold != fold].reset_index(drop=True)
valid_df = df[df.kfold == fold].reset_index(drop=True)
train_dataset = dataset.CassavaDataset(train_df, device=DEVICE)
valid_dataset = dataset.CassavaDataset(valid_df, device=DEVICE)
train_dataloader = DataLoader(train_dataset, batch_size=BATCH_SIZE, num_workers=4, shuffle=True)
vaid_dataloader = DataLoader(valid_dataset, batch_size=BATCH_SIZE, num_workers=4, shuffle=True)
device = torch.device(DEVICE)
criterion = DenseCrossEntropy()
train_fold_results = []
for epoch in range(EPOCHS):
model.train()
t_loss = 0
for step, batch in enumerate(train_dataloader):
img = batch[0]
label = batch[1]
img = img.to(DEVICE, dtype=torch.float)
label = label.to(DEVICE, dtype=torch.float)
outputs = model(img)
# print(f'outputs \n {outputs}')
loss = criterion(outputs, label.squeeze(-1))
loss.backward()
t_loss += loss.item()
optimizer.step()
optimizer.zero_grad()
model.eval()
val_loss = 0
val_preds = None
val_labels = None
for step, batch in enumerate(vaid_dataloader):
img = batch[0]
label = batch[1]
if val_labels is None:
val_labels = label.clone().squeeze(-1)
else:
val_labels = torch.cat((val_labels, label.squeeze(-1)), dim=0)
img = img.to(DEVICE, dtype=torch.float)
label = label.to(DEVICE, dtype=torch.float)
with torch.no_grad():
outputs = model(img)
loss = criterion(outputs, label.squeeze(-1))
val_loss += loss.item()
preds = torch.softmax(outputs, dim=1).data.cuda()
if val_preds is None:
val_preds = preds
else:
val_preds = torch.cat((val_preds, preds), dim=0)
val_preds = torch.argmax(val_preds, dim=1)
print(f'EPOCH : {epoch}, train_loss: {t_loss}, valid_loss: {val_loss}')
train_fold_results.append({
'fold': fold,
'epoch': epoch,
'train_loss': t_loss / len(train_dataloader),
'valid_loss': val_loss / len(vaid_dataloader)
})
return val_preds, train_fold_results
def k_fold_train(folds):
model = CassavaModel()
model.to(DEVICE)
plist = [{'params':model.parameters(), 'lr':5e-5}]
optimizer = optim.Adam(plist)
df = pd.read_csv('./input/train_ohe.csv')
oof_preds = np.zeros((df.shape[0]))
train_results = []
for i in range(folds):
valid_idx = df[df.kfold == i].index
val_preds, train_fold_results = train_one_fold(i, model, optimizer)
oof_preds[valid_idx] = val_preds.numpy()
train_results += train_fold_results
torch.save({
'fold': i,
'lr': optimizer.state_dict()['params_groups'][0]['lr'],
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict()
}, f'./model/baseline/val_loss {train_results[i].val_loss}.pth')
if __name__ == '__main__':
k_fold_train(5)
| 2.484375 | 2 |
tools/gdb/print_list.py | prattmic/F4OS | 42 | 12792774 | <gh_stars>10-100
import gdb
class Print_List(gdb.Command):
"""Prints out an F4OS linked list in a pretty format"""
def __init__(self):
super(Print_List, self).__init__("print-list", gdb.COMMAND_DATA, gdb.COMPLETE_SYMBOL)
def invoke(self, arg, from_tty):
head = gdb.parse_and_eval(arg)
self.print_list(head)
def print_list(self, head):
malformed = False
seen = []
print "%x ->" % head.address,
node = head['next']
seen.append(node)
while node != head.address:
print "%x ->" % node,
node = node['next']
if node in seen:
malformed = True
break
seen.append(node)
print "%x" % node
if malformed:
print "(Loop detected. Malformed list?)"
Print_List()
| 3.078125 | 3 |
cd4ml/feature_set.py | camila-contreras/CD4ML-Scenarios | 113 | 12792775 | <reponame>camila-contreras/CD4ML-Scenarios<gh_stars>100-1000
import logging
def _exclude(fields, excluded):
return [field for field in fields if field not in excluded]
def _combine_dicts(*args):
results = {}
for arg in args:
results.update(arg)
return results
class FeatureSetBase:
"""
Generic interface for feature sets
"""
def __init__(self, identifier_field, target_field):
# fields to be filled out in derived class
self.logger = logging.getLogger(__name__)
self.params = None
self.info = None
self.identifier_field = identifier_field
self.target_field = target_field
def fields_excluded_from_features(self):
id_target = [self.identifier_field, self.target_field]
return id_target + self.params['extra_information_fields']
def _exclude_non_features(self, fields):
return _exclude(fields, self.fields_excluded_from_features())
def base_feature_fields_numerical(self):
fields = self.params['base_fields_numerical']
return self._exclude_non_features(fields)
def base_feature_fields_categorical(self):
fields = sorted(self.params['base_categorical_n_levels_dict'].keys())
return self._exclude_non_features(fields)
def base_feature_fields(self):
return self.base_feature_fields_numerical() + self.base_feature_fields_categorical()
def derived_feature_fields_numerical(self):
return self.params['derived_fields_numerical']
def derived_feature_fields_categorical(self):
return sorted(self.params['derived_categorical_n_levels_dict'].keys())
def derived_feature_fields(self):
return self.derived_feature_fields_numerical() + self.derived_feature_fields_categorical()
def available_feature_fields_numerical(self):
return self.base_feature_fields_numerical() + self.derived_feature_fields_numerical()
def available_feature_fields_categorical(self):
return self.base_feature_fields_categorical() + self.derived_feature_fields_categorical()
def encoded_feature_fields_numerical(self):
return _exclude(self.available_feature_fields_numerical(), self.params['encoder_excluded_fields'])
def encoded_feature_fields_categorical(self):
return _exclude(self.available_feature_fields_categorical(), self.params['encoder_excluded_fields'])
def encoded_feature_fields(self):
return self.encoded_feature_fields_numerical() + self.encoded_feature_fields_categorical()
def omitted_feature_fields_for_input(self):
encoded = self.encoded_feature_fields()
return [field for field in encoded if field not in self.base_feature_fields()]
# feature transformations
def base_features_numerical(self, processed_row):
return {k: processed_row[k] for k in self.base_feature_fields_numerical()}
def base_features_categorical(self, processed_row):
return {k: processed_row[k] for k in self.base_feature_fields_categorical()}
def base_features(self, processed_row):
return {k: processed_row[k] for k in self.base_feature_fields()}
def derived_features_categorical(self, processed_row):
# TODO: override
assert isinstance(processed_row, dict)
return {}
def derived_features_numerical(self, processed_row):
# TODO: override
assert isinstance(processed_row, dict)
return {}
def derived_features(self, processed_row):
num = self.derived_features_numerical(processed_row)
cat = self.derived_features_categorical(processed_row)
return _combine_dicts(num, cat)
def features(self, processed_row):
base = self.base_features(processed_row)
derv = self.derived_features(processed_row)
return _combine_dicts(base, derv)
def ml_fields(self):
categorical_n_levels_dict = self.params['base_categorical_n_levels_dict'].copy()
categorical_n_levels_dict.update(self.params['derived_categorical_n_levels_dict'])
cat_encoded = {k: v for k, v in categorical_n_levels_dict.items()
if k in self.encoded_feature_fields_categorical()}
numeric_fields = self.encoded_feature_fields_numerical()
intersection = set(cat_encoded.keys()).intersection(numeric_fields)
if intersection:
self.logger.info('categorical')
self.logger.info(cat_encoded)
self.logger.info('numerical')
self.logger.info(numeric_fields)
self.logger.info('intersection')
self.logger.info(intersection)
raise ValueError('categorical and numeric overlap')
return {'categorical': cat_encoded,
'numerical': numeric_fields,
'target_name': self.target_field}
| 2.328125 | 2 |
utils/configuration.py | jelenko5/cgccli | 0 | 12792776 | <reponame>jelenko5/cgccli
import os
import sys
import click
import utils.logger as logger
from utils.const import CONFIG_FILE_PATH
if sys.version_info[0] == 2:
import ConfigParser as configparser
else:
import configparser
config = configparser.ConfigParser()
def get_config_path():
homedir = os.environ.get('HOME', None)
if not homedir:
click.echo('Home Directory Not found!! Set Environment `HOME` ')
exit()
logger.debug('Home Directory: {}'.format(homedir))
config_file = os.path.join(homedir + CONFIG_FILE_PATH)
logger.debug('Config File Location: {}'.format(config_file))
if not os.path.exists(config_file):
click.echo('ERROR: No Config file present')
try:
os.makedirs(os.path.dirname(config_file))
except OSError as exc: # Guard against race condition
click.echo('Directory found! but not config file')
logger.debug('Creating config file')
file = open(config_file, 'w')
file.write('[{}]'.format('global'))
file.close()
logger.debug(config.read(config_file))
return config_file
config_file = get_config_path()
def set_env(profile, key, value):
if not config.has_section(profile):
config.add_section(profile)
config.set(profile, key, value)
write_config()
def add_profile(profile):
if config.has_section(profile):
click.echo('Section [{}] already exists!!'.format(profile))
return
config.add_section(profile)
write_config()
def write_config():
with open(config_file, 'w') as configfile:
config.write(configfile)
def get_env(profile, key):
if has_env(profile, key):
return config.get(profile, key)
logger.debug('Not found in current profile')
click.echo('Value not found in {profile} use `cgccli config` command'.format(profile=profile))
exit()
def has_env(profile, key):
if profile:
logger.debug('Searching in profile : {}'.format(profile))
logger.debug('Searching key {}'.format(key))
return config.has_option(profile, key)
return False
def get_profiles():
return config.sections()
| 2.375 | 2 |
tests/fqe_operator_test.py | MichaelBroughton/OpenFermion-FQE | 33 | 12792777 | # Copyright 2020 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for FqeOperator."""
from fqe.fqe_ops import fqe_operator
from fqe import wavefunction
def test_operator():
"""Testing abstract FqeOperator class using a dummy class"""
# pylint: disable=useless-super-delegation
class TestFQEOperator(fqe_operator.FqeOperator):
"""
This class is just to make sure the abstract FqeOperator class is tested.
"""
def contract(
self,
brastate: "wavefunction.Wavefunction",
ketstate: "wavefunction.Wavefunction",
) -> complex:
return super().contract(brastate, ketstate)
def representation(self) -> str:
return super().representation()
def rank(self) -> int:
return super().rank()
test = TestFQEOperator()
wfn = wavefunction.Wavefunction([[1, 1, 1]])
assert round(abs(0.0 + 0.0j - test.contract(wfn, wfn)), 7) == 0
assert "fqe-operator" == test.representation()
assert 0 == test.rank()
| 2.328125 | 2 |
file_converters/ifcjson/__init__.py | IFCJSON-Team/IFC2JSON_python | 15 | 12792778 | <gh_stars>10-100
from ifcjson.ifc2json4 import IFC2JSON4
from ifcjson.ifc2json5a import IFC2JSON5a
# from ifcjson.to_ifcopenshell import JSON2IFC
| 1.265625 | 1 |
tests/all/backends/redis/client.py | stuaxo/python-stdnet | 61 | 12792779 | '''Test additional commands for redis client.'''
import json
from hashlib import sha1
from stdnet import getdb
from stdnet.backends import redisb
from stdnet.utils import test, flatzset
def get_version(info):
if 'redis_version' in info:
return info['redis_version']
else:
return info['Server']['redis_version']
class test_script(redisb.RedisScript):
script = (redisb.read_lua_file('commands.utils'),
'''\
local js = cjson.decode(ARGV[1])
return cjson.encode(js)''')
def callback(self, request, result, args, **options):
return json.loads(result.decode(request.encoding))
class TestCase(test.TestWrite):
multipledb = 'redis'
def setUp(self):
client = self.backend.client
self.client = client.prefixed(self.namespace)
def tearDown(self):
return self.client.flushdb()
def make_hash(self, key, d):
for k, v in d.items():
self.client.hset(key, k, v)
def make_list(self, name, l):
l = tuple(l)
self.client.rpush(name, *l)
self.assertEqual(self.client.llen(name), len(l))
def make_zset(self, name, d):
self.client.zadd(name, *flatzset(kwargs=d))
class TestExtraClientCommands(TestCase):
def test_coverage(self):
c = self.backend.client
self.assertEqual(c.prefix, '')
size = yield c.dbsize()
self.assertTrue(size >= 0)
def test_script_meta(self):
script = redisb.get_script('test_script')
self.assertTrue(script.script)
sha = sha1(script.script.encode('utf-8')).hexdigest()
self.assertEqual(script.sha1,sha)
def test_del_pattern(self):
c = self.client
items = ('bla',1,
'bla1','ciao',
'bla2','foo',
'xxxx','moon',
'blaaaaaaaaaaaaaa','sun',
'xyyyy','earth')
yield self.async.assertTrue(c.execute_command('MSET', *items))
N = yield c.delpattern('bla*')
self.assertEqual(N, 4)
yield self.async.assertFalse(c.exists('bla'))
yield self.async.assertFalse(c.exists('bla1'))
yield self.async.assertFalse(c.exists('bla2'))
yield self.async.assertFalse(c.exists('blaaaaaaaaaaaaaa'))
yield self.async.assertEqual(c.get('xxxx'), b'moon')
N = yield c.delpattern('x*')
self.assertEqual(N, 2)
def testMove2Set(self):
yield self.multi_async((self.client.sadd('foo', 1, 2, 3, 4, 5),
self.client.lpush('bla', 4, 5, 6, 7, 8)))
r = yield self.client.execute_script('move2set', ('foo', 'bla'), 's')
self.assertEqual(len(r), 2)
self.assertEqual(r[0], 2)
self.assertEqual(r[1], 1)
yield self.multi_async((self.client.sinterstore('res1', 'foo', 'bla'),
self.client.sunionstore('res2', 'foo', 'bla')))
m1 = yield self.client.smembers('res1')
m2 = yield self.client.smembers('res2')
m1 = sorted((int(r) for r in m1))
m2 = sorted((int(r) for r in m2))
self.assertEqual(m1, [4,5])
self.assertEqual(m2, [1,2,3,4,5,6,7,8])
def testMove2ZSet(self):
client = self.client
yield self.multi_async((client.zadd('foo',1,'a',2,'b',3,'c',4,'d',5,'e'),
client.lpush('bla','d','e','f','g')))
r = yield client.execute_script('move2set', ('foo','bla'), 'z')
self.assertEqual(len(r), 2)
self.assertEqual(r[0], 2)
self.assertEqual(r[1], 1)
yield self.multi_async((client.zinterstore('res1', ('foo', 'bla')),
client.zunionstore('res2', ('foo', 'bla'))))
m1 = yield client.zrange('res1', 0, -1)
m2 = yield client.zrange('res2', 0, -1)
self.assertEqual(sorted(m1), [b'd', b'e'])
self.assertEqual(sorted(m2), [b'a',b'b',b'c',b'd',b'e',b'f',b'g'])
def testMoveSetSet(self):
r = yield self.multi_async((self.client.sadd('foo',1,2,3,4,5),
self.client.sadd('bla',4,5,6,7,8)))
r = yield self.client.execute_script('move2set', ('foo', 'bla'), 's')
self.assertEqual(len(r), 2)
self.assertEqual(r[0], 2)
self.assertEqual(r[1], 0)
def testMove2List2(self):
yield self.multi_async((self.client.lpush('foo',1,2,3,4,5),
self.client.lpush('bla',4,5,6,7,8)))
r = yield self.client.execute_script('move2set', ('foo','bla'), 's')
self.assertEqual(len(r), 2)
self.assertEqual(r[0], 2)
self.assertEqual(r[1], 2)
def test_bad_execute_script(self):
self.assertRaises(redisb.RedisError, self.client.execute_script, 'foo', ())
# ZSET SCRIPTING COMMANDS
def test_zdiffstore(self):
yield self.multi_async((self.make_zset('aa', {'a1': 1, 'a2': 1, 'a3': 1}),
self.make_zset('ba', {'a1': 2, 'a3': 2, 'a4': 2}),
self.make_zset('ca', {'a1': 6, 'a3': 5, 'a4': 4})))
n = yield self.client.zdiffstore('za', ['aa', 'ba', 'ca'])
self.assertEqual(n, 1)
r = yield self.client.zrange('za', 0, -1, withscores=True)
self.assertEquals(list(r), [(b'a2', 1)])
def test_zdiffstore_withscores(self):
yield self.multi_async((self.make_zset('ab', {'a1': 6, 'a2': 1, 'a3': 2}),
self.make_zset('bb', {'a1': 1, 'a3': 1, 'a4': 2}),
self.make_zset('cb', {'a1': 3, 'a3': 1, 'a4': 4})))
n = yield self.client.zdiffstore('zb', ['ab', 'bb', 'cb'], withscores=True)
self.assertEqual(n, 2)
r = yield self.client.zrange('zb', 0, -1, withscores=True)
self.assertEquals(list(r), [(b'a2', 1), (b'a1', 2)])
def test_zdiffstore2(self):
c = self.client
yield self.multi_async((c.zadd('s1', 1, 'a', 2, 'b', 3, 'c', 4, 'd'),
c.zadd('s2', 6, 'a', 9, 'b', 100, 'c')))
r = yield c.zdiffstore('s3', ('s1', 's2'))
self.async.assertEqual(c.zcard('s3'), 1)
r = yield c.zrange('s3', 0, -1)
self.assertEqual(r, [b'd'])
def test_zdiffstore_withscores2(self):
c = self.client
yield self.multi_async((c.zadd('s1', 1, 'a', 2, 'b', 3, 'c', 4, 'd'),
c.zadd('s2', 6, 'a', 2, 'b', 100, 'c')))
r = yield c.zdiffstore('s3', ('s1', 's2'), withscores=True)
self.async.assertEqual(c.zcard('s3'), 3)
r = yield c.zrange('s3', 0, -1, withscores=True)
self.assertEqual(dict(r), {b'a': -5.0, b'c': -97.0, b'd': 4.0})
def test_zpop_byrank(self):
yield self.client.zadd('foo',1,'a',2,'b',3,'c',4,'d',5,'e')
res = yield self.client.zpopbyrank('foo',0)
rem = yield self.client.zrange('foo',0,-1)
self.assertEqual(len(rem),4)
self.assertEqual(rem,[b'b',b'c',b'd',b'e'])
self.assertEqual(res,[b'a'])
res = yield self.client.zpopbyrank('foo',0,2)
self.assertEqual(res,[b'b',b'c',b'd'])
rem = yield self.client.zrange('foo',0,-1)
self.assertEqual(rem,[b'e'])
def test_zpop_byscore(self):
yield self.client.zadd('foo', 1, 'a', 2, 'b', 3, 'c', 4, 'd', 5, 'e')
res = yield self.client.zpopbyscore('foo', 2)
rem = yield self.client.zrange('foo', 0, -1)
self.assertEqual(len(rem), 4)
self.assertEqual(rem, [b'a', b'c', b'd', b'e'])
self.assertEqual(res, [b'b'])
res = yield self.client.zpopbyscore('foo', 0, 4.5)
self.assertEqual(res, [b'a', b'c', b'd'])
rem = yield self.client.zrange('foo', 0, -1)
self.assertEqual(rem, [b'e']) | 2.234375 | 2 |
src/07_mongoengine/service_central/nosql/mongo_setup.py | jabelk/mongodb-for-python-developers | 0 | 12792780 | <reponame>jabelk/mongodb-for-python-developers
import mongoengine
def global_init():
# this is where would pass in creds and port and such
# name= is the database db name
# when we define our classes we will refer to the "core" connection
# default localhost and port
mongoengine.register_connection(alias='core', name='demo_dealership')
# could have multiple like
# mongoengine.register_connection(alias='analytics', name='anotherDBname')
| 2.703125 | 3 |
djangopreviewcard/cardviewinfo.py | hnjm/preview-card | 1 | 12792781 | from .mediasourcetype import MediaSourceType
class CardViewInfo:
def __init__(self, ms_type=MediaSourceType.NONE, url="", image_url="", title="", description=""):
self.__ms_type = ms_type
self.__url = url
self.__image_url = image_url
self.__title = title
self.__description = description
self.__error = ""
@property
def ms_type(self):
return self.__ms_type
@ms_type.setter
def ms_type(self, new_ms_type):
self.__ms_type = new_ms_type
@property
def url(self):
return self.__url
@url.setter
def url(self, new_url):
self.__url = new_url
@property
def image_url(self):
return self.__image_url
@image_url.setter
def image_url(self, new_image_url):
self.__image_url = new_image_url
@property
def title(self):
return self.__title
@title.setter
def title(self, new_title):
self.__title = new_title
@property
def description(self):
return self.__description
@description.setter
def description(self, new_description):
self.__description = new_description
@property
def error(self):
return self.__error
@error.setter
def error(self, new_error):
self.__error = new_error
def __str__(self):
return "{}\n{}\n{}\n{}\n{}\n{}\n{}\n".format(
"--------------------------------------------------------------------------------------------------------",
"ms_type\t\t{}".format(self.__ms_type),
"url\t\t\t{}".format(self.__url),
"image_url\t{}".format(self.__image_url),
"title\t\t{}".format(self.__title),
"desc\t\t{}".format(self.__description),
"error\t\t{}".format(self.__error),
"--------------------------------------------------------------------------------------------------------"
)
| 2.453125 | 2 |
src/main.py | SimonK1/Zen-Garden-Evolutionary-Algorithm | 0 | 12792782 | <filename>src/main.py<gh_stars>0
import random
import copy
import sys
test1 = [
[00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00],
[00, 00, 00, 00, 00, -1, 00, 00, 00, 00, 00, 00],
[00, -1, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00],
[00, 00, 00, 00, -1, 00, 00, 00, 00, 00, 00, 00],
[00, 00, -1, 00, 00, 00, 00, 00, 00, 00, 00, 00],
[00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00],
[00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00],
[00, 00, 00, 00, 00, 00, 00, 00, -1, -1, 00, 00],
[00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00],
[00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00],
[00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00],
]
test2 = [
[00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00],
[00, 00, 00, 00, 00, -1, 00, 00, 00, 00, 00, 00],
[00, -1, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00],
[00, 00, 00, 00, -1, 00, 00, 00, 00, 00, 00, 00],
[00, 00, -1, 00, 00, 00, 00, 00, 00, 00, 00, 00],
[00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00],
[00, 00, 00, 00, 00, 00, -1, -1, -1, 00, 00, 00],
[00, 00, 00, 00, 00, 00, -1, 00, -1, 00, 00, 00],
[00, 00, 00, 00, 00, 00, -1, -1, -1, 00, 00, 00],
[00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00],
[00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00],
]
test3 = [
[00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00],
[00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00],
[00, -1, -1, -1, -1, 00, 00, 00, 00, 00, 00, 00],
[00, 00, 00, 00, -1, 00, 00, 00, 00, 00, 00, 00],
[00, -1, -1, -1, -1, 00, 00, 00, 00, 00, 00, 00],
[00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00],
[00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00],
[00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00],
[00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00],
[00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00],
[00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00],
]
test4 = [
[00, 00, 00, 00, 00, 00, 00, -1, 00, -1, 00, 00],
[00, 00, 00, 00, 00, 00, 00, -1, -1, -1, 00, 00],
[00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00],
[00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00],
[00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00],
[00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00],
[00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00],
[00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00],
[00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00],
[00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00],
[00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00],
]
class GGarden:
def __init__(self):
self.generateRandomMap()
self.countrocks()
self.fitness()
def generateRandomMap(self):
self.m = random.randrange(2, 11)
self.n = random.randrange(2, 11)
self.map = []
for i in range(self.m):
row = []
for j in range(self.n):
if random.random() < 0.1:
row.append(-1)
else:
row.append(0)
self.map.append(row)
def countrocks(self):
self.rocks = 0
for i in self.map:
for j in i:
if j == -1:
self.rocks += 1
def fitness(self):
self.max_fitness = 0
for i in self.map:
for j in i:
if j == 0:
self.max_fitness += 1
class Garden:
def __init__(self, map):
self.fillMap(map)
self.countrocks()
self.fitness()
def fillMap(self, map):
self.m = len(map)
self.n = len(map[0])
self.map = copy.deepcopy(map)
def countrocks(self):
self.rocks = 0
for i in self.map:
for j in i:
if j == -1:
self.rocks += 1
def fitness(self):
self.max_fitness = 0
for i in self.map:
for j in i:
if j == 0:
self.max_fitness += 1
class Gene:
def __init__(self, garden):
row = garden.m
column = garden.n
number = random.randrange((row + column) + (row + column))
# Each direction has its own function for movement
if number < column: # Down
self.down(number)
elif column <= number < row + column: # Left
self.left(column, number)
elif row + column <= number < column + column + row: # up
self.up(row, column, number)
else: # Right
self.right(row, column, number)
self.rotate()
# Moving down
def down(self, number):
self.start = (0, number)
self.goTo = '1' # Representing dow
# Moving left
def left(self, column, number):
self.start = (number - column, column - 1)
self.goTo = '2' # Representing left
# Moving up
def up(self, row, column, number):
self.start = (row - 1, column + column + row - number - 1)
self.goTo = '3' # Representing up
# Moving up
def right(self, row, column, number):
self.start = ((row + column) + (row + column) - number - 1, 0)
self.goTo = '4' # Representing right
def rotate(self):
self.rotation = [
int(a)
for a in bin(random.randrange(1024))
[2:].zfill(10)
]
class Chromosome:
def __init__(self, garden, start=True):
self.garden = garden
self.fGarden = Garden(self.garden.map)
self.genesFill(garden, start)
def genesFill(self, garden, start):
self.genes = []
helper = garden.m + garden.n + garden.rocks
if start == True:
for i in range(helper):
self.genes.append(Gene(self.garden))
self.algo()
def fitnessFunc(self):
self.fitness = 0
itera = sum(self.fGarden.map, [])
for x in itera:
if x > 0:
self.fitness += 1
def algo(self):
g = self.fGarden
number = 0
# Start of genes iterations
for gene in self.genes:
position = list(gene.start)
goTo = gene.goTo
x = 0
# We check if we can enter the garden
if g.map[position[0]][position[1]] != 0:
continue
number += 1
while (1):
# Rake the tile
g.map[position[0]][position[1]] = number
# We chose next tile to move on
if goTo == '3':
position[0] -= 1
elif goTo == '1':
position[0] += 1
elif goTo == '2':
position[1] -= 1
elif goTo == '4':
position[1] += 1
# We check if it it not edge of the map
if position[0] not in range(g.m) or position[1] not in range(g.n):
break
# We check if it is not raken - if not we move there
if g.map[position[0]][position[1]] == 0:
continue
# If we fing any obstacle we change movement direction
if goTo == '3':
position[0] += 1
elif goTo == '1':
position[0] -= 1
elif goTo == '2':
position[1] += 1
elif goTo == '4':
position[1] -= 1
# We choose new tile
if goTo == '3' or goTo == '1':
helper = ([position[0], position[1] - 1], [position[0], position[1] + 1])
else:
helper = ([position[0] - 1, position[1]], [position[0] + 1, position[1]])
# We check surrounding tiles
nv = []
for p in helper:
try:
nv.append(g.map[p[0]][p[1]])
except IndexError:
nv.append('X')
# If we find one not raken
if nv.count(0) == 1:
position = helper[nv.index(0)]
# if we find two not raken
elif nv.count(0) == 2:
position = helper[gene.rotation[x]]
x += 1
if x == len(gene.rotation):
x = 0
# If everything is raken
else:
if 'X' not in nv:
self.fitnessFunc()
return
break
if goTo == '3' or goTo == '1':
if helper.index(position) == 0:
goTo = '2'
else:
goTo = '4'
else:
if helper.index(position) == 0:
goTo = '3'
else:
goTo = '1'
self.fitnessFunc()
# Mutation of chromosomes
def mutate(self, newChr):
for i in range(len(newChr.genes)):
# New chromosome
number = random.random()
if number < 0.1:
newChr.genes[i] = Gene(self.garden)
# New rotations
elif number < 0.2:
newChr.genes[i].rotate()
# Crossing of new chromosomes
def crossing(self, other):
# Create new chromosome with empty genes
newChr = Chromosome(self.garden, False)
# Crossing process
mutateNum = random.random()
crossNum = random.random()
pivotPoint = random.randrange(len(self.genes))
if crossNum < 0.85:
# Crossing second type - 1 - Choosing random genes from both
newChr.genes = []
for i in range(len(self.genes)):
newChr.genes.append(random.choice((self.genes[i], other.genes[i])))
elif crossNum < 0.425:
# Crossing first type - 2 - first part is from Chromosome 1, second is from Chromosome 2
newChr.genes = self.genes[:pivotPoint] + other.genes[pivotPoint:]
else:
# Crossing third type - 3 - No crossing
newChr.genes = random.choice((self.genes, other.genes))
# Mutations
if mutateNum < 0.5:
self.mutate(newChr)
newChr.algo()
return newChr
def solveMap(map):
# Inicialisation of variables
generation = []
# Garden creation
garden = Garden(map)
# Creation of starting set of chromosones
for i in range(50):
generation.append(Chromosome(garden))
# Generations creation
for i in range(1000):
# Saving the best chromosome
bestChr = max(generation, key=lambda x: x.fitness)
nextGeneration = [bestChr]
# Check if we didnt find solution
if bestChr.fitness == garden.max_fitness:
break
# Creating more chromosones to fill generation
for j in range(49):
# Choose random chromosomes from current generation
chromosome1, chromosome2 = sorted(random.sample(generation, 4), key=lambda x: x.fitness)[2:4]
# Create very new Chromosome by crossing and mutation
nextGeneration.append(chromosome1.crossing(chromosome2))
generation = nextGeneration
number = i
# Formatted Print
finalprint(garden, bestChr, number)
def generateMap():
# Inicialisation of variables
generation = []
# Garden creation
garden = GGarden()
# Creation of starting set of chromosones
for i in range(50):
generation.append(Chromosome(garden))
# Generations creation
for i in range(800):
# Saving the best chromosome
bestChr = max(generation, key=lambda x: x.fitness)
nextGeneration = [bestChr]
# Check if we didnt find solution
if bestChr.fitness == garden.max_fitness:
break
# Creating more chromosones to fill generation
for j in range(49):
# Choose random chromosomes from current generation
chromosome1, chromosome2 = sorted(random.sample(generation, 4), key=lambda x: x.fitness)[2:4]
# Create very new Chromosome by crossing and mutation
nextGeneration.append(chromosome1.crossing(chromosome2))
generation = nextGeneration
number = i
# Formatted Print
finalprint(garden, bestChr, number)
def finalprint(garden, best, number):
print()
print()
# Printing all necesarry generation information
print('Generations:%4d Max-Fitness:%4d Best-Fitness:%4d' % (number + 1, garden.max_fitness, best.fitness))
print('Tiles Left: %d' % (garden.max_fitness - best.fitness))
print("_________________________________________________________")
print("Initial Garden")
# Transforming garden into clear grided output
helper = ""
for x in garden.map:
for y in x:
if y == -1:
helper += ' K '
else:
helper += '%2d ' % y
helper += '\n'
print(helper)
print("_________________________________________________________")
print("Final Result")
# Transforming solved garden into clear grided output
helper = ""
for x in best.fGarden.map:
for y in x:
if y == -1:
helper += ' K '
else:
helper += '%2d ' % y
helper += '\n'
print(helper)
print("_________________________________________________________")
sys.exit()
print("----------------->Welcome to Zen Garden<-----------------")
print("")
print("Choose to load a map from a file or generate a random one")
print("_________________________________________________________")
print("Write: File - Load from file")
print("Write: Generate - Generate random map")
print("Write: Test - Test mode")
print("_________________________________________________________")
inp = input()
# File Function - loading from File
if inp == "File" or inp == "file":
print("Counting...")
file = []
# File opening
f = open("garden.txt", "r")
pocet = 0
# Transforming chars into 2D array of integers
while(1):
row = []
riadok = f.readline()
if riadok == '':
break
pocet += 1
riadok = riadok.split()
for i in riadok:
if i == '00':
row.append(0)
if i == '-1':
row.append(-1)
file.append(row)
solveMap(file)
elif inp == "Generate" or inp == "generate": # Generate Function - generate Random garden
print("Counting...")
generateMap()
elif inp == "Test" or inp == "test": # Test Function - Choose from availbale tests
print("_________________________________________________________")
print("Choose the number of test from 1 - 4")
print("Test1 - Model test")
print("Test2 - Unsolvable test")
print("Test3 - Test with staying in the garden")
print("Test4 - Test with one tile exit")
print("_________________________________________________________")
inp = input()
if inp == '1':
print("Counting...")
solveMap(test1)
elif inp == '2':
print("Counting...")
solveMap(test2)
elif inp == '3':
print("Counting...")
solveMap(test3)
elif inp == '4':
print("Counting...")
solveMap(test4)
else:
print("You entered wrong command")
sys.exit()
else:
print("You entered wrong command")
sys.exit()
| 2.078125 | 2 |
birthday/cron.py | joehalloran/birthday_project | 0 | 12792783 | <reponame>joehalloran/birthday_project
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# [START imports]
import datetime
from google.appengine.api import users
from google.appengine.ext import ndb
from google.appengine.api import mail
import webapp2
import utils
from settings import appSettings
from models import Owner, Birthday
class Summary(webapp2.RequestHandler):
def get(self):
now = datetime.date.today()
currentMonthDay = "%02d" % (now.month) + "%02d" % (now.day)
query = Birthday.query(projection=[Birthday.owner.identity], distinct=True)
allUsers = query.fetch();
birthdays = []
for user in allUsers:
q1 = Birthday.query(
user.owner.identity == Birthday.owner.identity
).order(
Birthday.monthday
)
q2 = q1.filter(
Birthday.monthday >= currentMonthDay
)
q3 = q1.filter(
Birthday.monthday < currentMonthDay
)
thisYearBDays = q2.fetch()
nextYearBDays = q3.fetch()
birthdays = thisYearBDays + nextYearBDays
body = "Up coming birthdays:...."
for birthday in birthdays:
toEmail = birthday.owner.email
body = body + birthday.firstName + birthday.lastName + "<br />"
body = body + birthday.date.strftime("%B %d") + "<hr />"
mail.send_mail(sender=appSettings["sender_address"],
to=toEmail,
subject="Your upcoming birthdays",
body=body)
self.response.write("You have run birthdays cron job")
| 2.53125 | 3 |
picammodapipkg/camproject.py | nkosinathintuli/mypackage | 0 | 12792784 | """This module contains the functions for taking video recordings
and enabling/disabling light triggered automatic recording
"""
import anvil.server
import picamera
import takeImg
import adc
import smtplib
import motionDetect
motionState = False
server = smtplib.SMTP('smtp.gmail.com',587)
server.starttls()
server.login("<EMAIL>","cam25project")
online=False
online2=False
started=False
anvil.server.connect("BP2P6K2WEAVZG7EMXI56O6AU-2HHQWLD3DEXHTCGB")
camera = picamera.PiCamera();
camera.resolution = (1024, 768);
message = " "
message2=" "
@anvil.server.callable
def takevideo():
"""Callable function to set/reset a flag to enable/disable
a video recording"""
global online
if online==False:
online=True
start_stop(online)
elif online==True:
online=False
start_stop(online)
@anvil.server.callable
def takelightvideo():
"""Callable function to set/reset a flag to enable/disable
automatic recording based on light intensity
"""
global message
global online2
if online2==False:
online2=True
message="light induced recording enabled"
elif online2==True:
online2=False
message="light induced recording disabled"
def start_stop(on1):
"""This is the function that does the actual video recording"""
global message
global server
if on1==True:
camera.start_recording('Desktop/footage.h264')
message="recording started"
elif on1==False:
camera.stop_recording()
message="footage captured"
mse="Subject: {}\n\n{}".format("NEW FOOTAGE CAPTURED",
"New video recording has been captured")
server.sendmail("<EMAIL>","<EMAIL>",mse)
server.quit()
def light_induced(ldr):
"""This starts/stops video recording if
ldr<=200 and ldr>200 respectively
"""
global started
if started==False:
if ldr<=200:
camera.start_recording('Desktop/lightfootage.h264')
started=True
elif started==True:
if ldr>200:
camera.stop_recording()
started=False
server = smtplib.SMTP('smtp.gmail.com',587)
server.starttls()
server.login("<EMAIL>","cam25project")
mse="Subject: {}\n\n{}".format("NEW LIGHT INDUCED FOOTAGE CAPTURED",
"New light triggered video captured")
server.sendmail("<EMAIL>","<EMAIL>",mse)
server.quit()
@anvil.server.callable
def display_message():
"""Callable function to display appropriate alert on button press"""
global message
return message
try:
while True:
if online2==True:
light_induced(adc.readadc(0))
print "light = ",adc.readadc(0)
except KeyboardInterrupt:
print("program terminated") # This is the output when the program is terminated
| 2.84375 | 3 |
gcf/gcf.py | pangjie/gcf | 18 | 12792785 | <reponame>pangjie/gcf<filename>gcf/gcf.py
# !/usr/bin/env python
# -*- coding: utf-8 -*-
import click
from . import gcf_mining as gm
from . import gcf_scrape as gs
@click.group(invoke_without_command=True, no_args_is_help=True)
@click.option('--dj', '-d', default='',
help=u'Search by dj, such as -d \'西蒙#四十二\' or \'西蒙\'.\
Use #, if you need a dj group.')
@click.option('--prog', '-p', default='',
help='Search by program, such as -p \'gadio\'.\
Use #, if you need multiple programs.')
@click.option('--title', '-tt', default='',
help=u'Search by program, such as -tt \'的\'.')
@click.option('--time', '-t', nargs=2, default=('19850611', '20850611'),
help='Search by release time, such as -t 20180101 20180302')
@click.option('--expt', '-e', default=False, is_flag=True,
help='Export data to a cvs file')
@click.option('--col', '-c', default='date#title#dj',
help='Show data with specific columns as -tb \'date#title#dj\'')
@click.option('--path', '-pt', default='',
help='Give the path to loading another cvs data.')
@click.option('--recent', '-r', default=9999,
help='Show the most recent gadios.')
@click.pass_context
def gcf(ctx, dj, prog, time, title, expt, col, path, recent):
"""\b
_____ _____ ______
/ ____| / ____| | ____|
| | __ | | | |__
| | |_ | | | | __|
| |__| | | |____ | |
\_____| \_____| |_|
G-Cores(g-cores.com) Fans --- A Gadio Info Tool
"""
if ctx.invoked_subcommand == 'update':
return
col = map((lambda x: 'radio_' + str(x)), filter(None, col.split('#')))
dj_pd = gm.kw_mining(gm.df_pk(path),
u'radio_dj',
filter(None, dj.split('#')))
prog_pd = gm.kw_mining(dj_pd,
u'radio_program',
filter(None, prog.split('#')))
title_pd = gm.kw_mining(prog_pd,
u'radio_title',
filter(None, title.split('#')))
time_pd = gm.timing_mining(title_pd, time[0], time[1])
final_pd = gm.recent(time_pd, recent)
if expt:
final_pd.to_csv('./gcf.csv', encoding='utf-8', index=False)
if ctx.invoked_subcommand == 'career':
ctx.obj['final_pd'] = final_pd
ctx.obj['path'] = path
return
if ctx.invoked_subcommand == 'statistic':
ctx.obj['final_pd'] = final_pd
return
if ctx.invoked_subcommand is None:
gm.df_print(final_pd, col)
return
return
@gcf.command()
@click.option('--deep', default=False, is_flag=True,
help='Reload the whole data.')
@click.pass_context
def update(ctx, deep):
"""Update data to the last gadio"""
gs.scrape_radio_list(deep)
return
@gcf.command()
@click.option('--size', '-s', default='Y',
type=click.Choice(['Y', 'Q', 'M']),
help='Choice the statistic size: Y(year),Q(quarter),M(month).')
@click.option('--perc', '-p', default=False, is_flag=True,
help='Show data with the percentage of whole set.')
@click.option('--ratio', '-r', default=2, type=float,
help='Graphic Ratio. Smaller value, more width graphic.')
@click.pass_context
def career(ctx, size, perc, ratio):
"""Show results with the career mode"""
gm.care_mode(ctx.obj['final_pd'], ctx.obj['path'], size, perc, ratio)
return
@gcf.command()
@click.pass_context
def statistic(ctx):
"""Show results with statistic mode"""
gm.stat_mode(ctx.obj['final_pd'])
return
@gcf.command()
@click.pass_context
def drop(ctx):
"""Drop all radio information"""
gs.drop_raido()
return
def main():
gcf(obj={})
if __name__ == '__main__':
main()
| 2.25 | 2 |
src/tngsdk/traffic/rest.py | sonata-nfv/ng-sdk-traffic | 0 | 12792786 | <gh_stars>0
# Copyright (c) 2018 5GTANGO, QUOBIS SL.
# ALL RIGHTS RESERVED.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Neither the name of the 5GTANGO, QUOBIS SL.
# nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# This work has been performed in the framework of the 5GTANGO project,
# funded by the European Commission under Grant number 761493 through
# the Horizon 2020 and 5G-PPP programmes. The authors would like to
# acknowledge the contributions of their colleagues of the SONATA
# partner consortium (www.5gtango.eu).
import os
import logging
import simplejson as json
from flask import Flask, jsonify, request
from tngsdk.traffic import traffic
LOG = logging.getLogger(os.path.basename(__file__))
app = Flask(__name__)
# Generate traffic generation object
@app.route('/api/trafficgen/v1/trafficObject', methods=['POST'])
def generate_tgo():
try:
body = json.loads(request.data)
except ValueError:
response = jsonify("JSON format error in request parameters")
response.status_code = 400
return response
res = traffic.save_trafficObject(body)
if (res['status'] == 200):
response = jsonify({"resource_uuid": res['uuid']})
else:
response = jsonify(res['message'])
response.status_code = res['status']
return response
# Get list of traffic generation objects
@app.route('/api/trafficgen/v1/trafficObject', methods=['GET'])
def get_list():
res = traffic.list_trafficObjects()
response = jsonify(res['data'])
response.status_code = res['status']
return response
# Get traffic generation object
@app.route('/api/trafficgen/v1/trafficObject/<resource_uuid>', methods=['GET'])
def get_tgo(resource_uuid):
res = traffic.get_trafficObject(resource_uuid)
response = jsonify(res['data'])
response.status_code = res['status']
return response
# Delete traffic generation object
@app.route('/api/trafficgen/v1/trafficObject/<resource_uuid>',
methods=['DELETE'])
def delete_tgo(resource_uuid):
res = traffic.delete_trafficObject(resource_uuid)
response = jsonify(res['data'])
response.status_code = res['status']
return response
# Create traffic flow from existing traffic generation object
@app.route('/api/trafficgen/v1/flows/<int:resource_uuid>', methods=['POST'])
def generate_flow(resource_uuid):
# TODO create a traffic flow from a traffic generation object
return "Creating traffic flow from existing traffic generation object \
with id " + str(resource_uuid)
# Get traffic flow status
@app.route('/api/trafficgen/v1/flows/<int:flow_uuid>', methods=['GET'])
def get_status(flow_uuid):
# TODO get traffic flow status
return "Getting traffic flow status from id " + str(flow_uuid)
# Start/Stops existing traffic flow
@app.route('/api/trafficgen/v1/flows/<int:flow_uuid>', methods=['PUT'])
def manage_flow(flow_uuid):
# TODO start or stop a traffic flow
return "Starting/Stopping traffic flow with id " + str(flow_uuid)
# Removes traffic flow
@app.route('/api/trafficgen/v1/flows/<int:flow_uuid>', methods=['DELETE'])
def remove_flow(flow_uuid):
# TODO remove a traffic flow
return "Deleting traffic flow with id " + str(flow_uuid)
def serve(args):
app.run(host=args.service_address,
port=int(args.service_port),
debug=args.verbose)
return
| 1.867188 | 2 |
0x02-python-import_modules/100-my_calculator.py | oluwaseun-ebenezer/holbertonschool-higher_level_programming | 0 | 12792787 | <reponame>oluwaseun-ebenezer/holbertonschool-higher_level_programming
#!/usr/bin/python3
# 100-my_calculator.py
if __name__ == "__main__":
"""Handle basic arithmetic operations."""
from calculator_1 import add, sub, mul, div
import sys
if len(sys.argv) - 1 != 3:
print("Usage: ./100-my_calculator.py <a> <operator> <b>")
sys.exit(1)
ops = {"+": add, "-": sub, "*": mul, "/": div}
if sys.argv[2] not in list(ops.keys()):
print("Unknown operator. Available operators: +, -, * and /")
sys.exit(1)
a = int(sys.argv[1])
b = int(sys.argv[3])
print("{} {} {} = {}".format(a, sys.argv[2], b, ops[sys.argv[2]](a, b)))
| 3.921875 | 4 |
src/Africa/migrations/0006_dish_images01.py | MCN10/Demo1 | 0 | 12792788 | # Generated by Django 3.0.8 on 2020-07-31 12:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Africa', '0005_auto_20200731_1203'),
]
operations = [
migrations.AddField(
model_name='dish',
name='images01',
field=models.ImageField(default=1, upload_to='main_product/'),
preserve_default=False,
),
]
| 1.523438 | 2 |
makahiki/apps/widgets/status/users/views.py | justinslee/Wai-Not-Makahiki | 1 | 12792789 | """handles request for user gchart widget."""
from apps.managers.challenge_mgr import challenge_mgr
from apps.widgets.status.models import DailyStatus
def supply(request, page_name):
"""supply view_objects for user status."""
_ = page_name
_ = request
#todays_users = Profile.objects.filter(last_visit_date=datetime.datetime.today())
rounds_info = challenge_mgr.get_all_round_info()
start = rounds_info["competition_start"]
daily_status = DailyStatus.objects.filter(short_date__gte=start).order_by('short_date')
prior_day_users = 0
for status in daily_status:
status.display_date = "%d/%d" % (status.short_date.month, status.short_date.day)
status.new_users = status.setup_users - prior_day_users
prior_day_users = status.setup_users
return {
"daily_status": daily_status,
}
| 2.453125 | 2 |
Modulo 01/exercicos/d003.py | euyag/python-cursoemvideo | 2 | 12792790 | print('===== DESAFIO 003 =====')
n1 = int(input('digite um valor: '))
n2 = int(input('digite um valor: '))
s = n1 + n2
print(f'a soma entre {n1} e {n2} é {s}') | 3.796875 | 4 |
Pipes/sensor for counting the heating and quantity/sensor-master/examples/webthings/ds18b20-sensor.py | ReEn-Neom/ReEn.Neom-source-code- | 0 | 12792791 | <gh_stars>0
from webthing import Thing, Property, Value, SingleThing, WebThingServer
import logging
import tornado.ioloop
from sensor import DS18B20
def run_server():
ds18 = DS18B20('28-03199779f5a1')
celsius = Value(ds18.temperature().C)
thing = Thing(
'urn:dev:ops:temperature-sensor',
'DS18B20',
['TemperatureSensor'])
thing.add_property(
Property(
thing,
'celsius',
celsius,
metadata={
'@type': 'TemperatureProperty',
'title': 'Celsius',
'type': 'number',
'unit': '°C',
'readOnly': True }))
server = WebThingServer(SingleThing(thing), port=8888)
def update():
t = ds18.temperature()
celsius.notify_of_external_update(t.C)
timer = tornado.ioloop.PeriodicCallback(update, 3000)
timer.start()
try:
logging.info('starting the server')
server.start()
except KeyboardInterrupt:
logging.debug('stopping update task')
timer.stop()
logging.info('stopping the server')
server.stop()
logging.info('done')
if __name__ == '__main__':
logging.basicConfig(
level=10,
format="%(asctime)s %(filename)s:%(lineno)s %(levelname)s %(message)s"
)
run_server()
| 2.328125 | 2 |
src/main_test.py | AliManjotho/tf_hci_pose | 0 | 12792792 | import cv2
from src.utils.heatmap import getHeatmaps
from src.visualization.visualize import visualizeAllHeatmap, visualizeBackgroundHeatmap
keypoints = [ [[100, 100, 2], [105,105, 2]] ]
image = cv2.imread('images/person.jpg')
hmaps = getHeatmaps(image, keypoints, 7)
visualizeAllHeatmap(image, hmaps)
visualizeBackgroundHeatmap(image, hmaps)
cv2.waitKey(0)
| 2.578125 | 3 |
impact.py | jackscape/image-bot | 1 | 12792793 | '''
i actually didn't write this. credit to https://github.com/lipsumar/meme-caption
'''
from PIL import Image
from PIL import ImageFont
from PIL import ImageDraw
import sys
img = Image.open(sys.argv[1])
draw = ImageDraw.Draw(img)
def drawText(msg, pos):
fontSize = img.width//10
lines = []
font = ImageFont.truetype("impact.ttf", fontSize)
w, h = draw.textsize(msg, font)
imgWidthWithPadding = img.width * 0.99
# 1. how many lines for the msg to fit ?
lineCount = 1
if(w > imgWidthWithPadding):
lineCount = int(round((w / imgWidthWithPadding) + 1))
if lineCount > 2:
while 1:
fontSize -= 2
font = ImageFont.truetype("impact.ttf", fontSize)
w, h = draw.textsize(msg, font)
lineCount = int(round((w / imgWidthWithPadding) + 1))
print("try again with fontSize={} => {}".format(fontSize, lineCount))
if lineCount < 3 or fontSize < 10:
break
print("img.width: {}, text width: {}".format(img.width, w))
print("Text length: {}".format(len(msg)))
print("Lines: {}".format(lineCount))
# 2. divide text in X lines
lastCut = 0
isLast = False
for i in range(0,lineCount):
if lastCut == 0:
cut = (len(msg) / lineCount) * i
else:
cut = lastCut
if i < lineCount-1:
nextCut = (len(msg) / lineCount) * (i+1)
else:
nextCut = len(msg)
isLast = True
print("cut: {} -> {}".format(cut, nextCut))
# make sure we don't cut words in half
if nextCut == len(msg) or msg[int(nextCut)] == " ":
print("may cut")
else:
print("may not cut")
while msg[int(nextCut)] != " ":
nextCut += 1
print("new cut: {}".format(nextCut))
line = msg[int(cut):int(nextCut)].strip()
# is line still fitting ?
w, h = draw.textsize(line, font)
if not isLast and w > imgWidthWithPadding:
print("overshot")
nextCut -= 1
while msg[nextCut] != " ":
nextCut -= 1
print("new cut: {}".format(nextCut))
lastCut = nextCut
lines.append(msg[int(cut):int(nextCut)].strip())
print(lines)
# 3. print each line centered
lastY = -h
if pos == "bottom":
lastY = img.height - h * (lineCount+1) - 10
for i in range(0,lineCount):
w, h = draw.textsize(lines[i], font)
textX = img.width/2 - w/2
#if pos == "top":
# textY = h * i
#else:
# textY = img.height - h * i
textY = lastY + h
offset = fontSize//28
draw.text((textX-offset, textY-offset),lines[i],(0,0,0),font=font)
draw.text((textX+offset, textY-offset),lines[i],(0,0,0),font=font)
draw.text((textX+offset, textY+offset),lines[i],(0,0,0),font=font)
draw.text((textX-offset, textY+offset),lines[i],(0,0,0),font=font)
draw.text((textX, textY),lines[i],(255,255,255),font=font)
lastY = textY
return
drawText(sys.argv[2].upper(), "top")
drawText(sys.argv[3].upper(), "bottom")
img.save(sys.argv[4])
| 3 | 3 |
generic_iterative_stemmer/training/stemming/__init__.py | asaf-kali/generic-iterative-stemmer | 0 | 12792794 | from .stem_generator import StemDict, StemGenerator, reduce_stem_dict # noqa
from .stemming_trainer import StemmingTrainer, get_stats_path # noqa
from .corpus_stemmer import * # noqa
from .ft_stemming_trainer import FastTextStemmingTrainer # noqa
from .w2v_stemming_trainer import Word2VecStemmingTrainer # noqa
| 1.0625 | 1 |
lizard/core/rtl/pipeline_arbiter.py | cornell-brg/lizard | 50 | 12792795 | <filename>lizard/core/rtl/pipeline_arbiter.py
from pymtl import *
from lizard.util.rtl.interface import UseInterface
from lizard.util.rtl.method import MethodSpec
from lizard.util.rtl.case_mux import CaseMux, CaseMuxInterface
from lizard.util.rtl.arbiters import ArbiterInterface, PriorityArbiter
from lizard.util.rtl.pipeline_stage import PipelineStageInterface
def PipelineArbiterInterface(OutType):
return PipelineStageInterface(OutType, None)
class PipelineArbiter(Model):
def __init__(s, interface, clients):
UseInterface(s, interface)
reqs = []
for client in clients:
reqs.extend([
MethodSpec(
'{}_peek'.format(client),
args=None,
rets={
'msg': s.interface.MsgType,
},
call=False,
rdy=True,
),
MethodSpec(
'{}_take'.format(client),
args=None,
rets=None,
call=True,
rdy=False,
),
])
s.require(*reqs)
ninputs = len(clients)
s.index_peek_msg = [Wire(s.interface.MsgType) for _ in range(ninputs)]
s.index_peek_rdy = [Wire(1) for _ in range(ninputs)]
s.index_take_call = [Wire(1) for _ in range(ninputs)]
for i, client in enumerate(clients):
s.connect(s.index_peek_msg[i], getattr(s, '{}_peek'.format(client)).msg)
s.connect(s.index_peek_rdy[i], getattr(s, '{}_peek'.format(client)).rdy)
s.connect(getattr(s, '{}_take'.format(client)).call, s.index_take_call[i])
s.arb = PriorityArbiter(ArbiterInterface(ninputs))
s.mux = CaseMux(
CaseMuxInterface(s.interface.MsgType, Bits(ninputs), ninputs),
[1 << i for i in range(ninputs)])
@s.combinational
def compute_ready():
s.peek_rdy.v = (s.arb.grant_grant != 0)
for i in range(ninputs):
s.connect(s.arb.grant_reqs[i], s.index_peek_rdy[i])
# call an input if granted and we are being called
@s.combinational
def compute_call(i=i):
s.index_take_call[i].v = s.arb.grant_grant[i] & s.take_call
s.connect(s.mux.mux_in_[i], s.index_peek_msg[i])
s.connect(s.mux.mux_default, 0)
s.connect(s.mux.mux_select, s.arb.grant_grant)
s.connect(s.peek_msg, s.mux.mux_out)
| 2.0625 | 2 |
biomedical_image_segmentation/model/model.py | luiskuhn/biomedical_image_segmentation | 1 | 12792796 | <gh_stars>1-10
import torch
import torch.nn as nn
import torch.nn.functional as F
def create_model():
return Net()
def create_parallel_model():
return DataParallelPassthrough(Net())
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 32, 3, 1)
self.conv2 = nn.Conv2d(32, 64, 3, 1)
self.dropout1 = nn.Dropout2d(0.25)
self.fc1 = nn.Linear(9216, 128)
self.dropout2 = nn.Dropout2d(0.25)
self.fc2 = nn.Linear(128, 10)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, 2)
x = torch.flatten(self.dropout1(x), 1)
x = F.relu(self.fc1(x))
x = self.dropout2(x)
x = self.fc2(x)
output = F.log_softmax(x, dim=1)
return output
def log_weights(self, step, writer):
writer.add_histogram('weights/conv1/weight', self.conv1.weight.data, step)
writer.add_histogram('weights/conv1/bias', self.conv1.bias.data, step)
writer.add_histogram('weights/conv2/weight', self.conv2.weight.data, step)
writer.add_histogram('weights/conv2/bias', self.conv2.bias.data, step)
writer.add_histogram('weights/fc1/weight', self.fc1.weight.data, step)
writer.add_histogram('weights/fc1/bias', self.fc1.bias.data, step)
writer.add_histogram('weights/fc2/weight', self.fc2.weight.data, step)
writer.add_histogram('weights/fc2/bias', self.fc2.bias.data, step)
class DataParallelPassthrough(torch.nn.DataParallel):
"""
This class solves https://github.com/pytorch/pytorch/issues/16885
Basically, to allow the access of a model wrapped under DataParallel one needs to always
access the underlying attributes with .module (e.g. model.module.someattr)
"""
def __getattr__(self, name):
try:
return super().__getattr__(name)
except AttributeError:
return getattr(self.module, name)
| 2.703125 | 3 |
ipec/cnn/layers.py | wwwbbb8510/ippso | 9 | 12792797 | import numpy as np
from ipec.ip.core import parse_subnet_str
from ipec.ip.core import IPStructure
from ipec.ip.core import Interface
from ipec.ip.encoder import Encoder
from ipec.ip.decoder import Decoder
from ipec.ip.core import max_decimal_value_of_binary
# convolutional layer fields
CONV_FIELDS = {
'filter_size': 5,
'num_of_feature_maps': 7,
'stride_size': 4,
'mean': 9,
'std_dev': 9
}
# convolutional layer subnet
CONV_SUBNET = '0.0.0.0.0/6'
# pooling layer fields
POOLING_FIELDS = {
'kernel_size': 5,
'stride_size': 4,
'type': 1
}
# pooling layer subnet
POOLING_SUBNET = '4.32.0.0.0/30'
# fully-connected layer fields
FULLYCONNECTED_FIELDS = {
'num_of_neurons': 11,
'mean': 9,
'std_dev': 9
}
# fully-connected layer subnet
FULLYCONNECTED_SUBNET = '4.0.0.0.0/11'
# disabled layer fields
DISABLED_FIELDS = {
'disabled': 10,
}
# disabled layer subnet
DISABLED_SUBNET = '4.32.0.4.0/30'
def initialise_cnn_layers_3_bytes():
"""
initialise cnn layers with 3 bytes IP
:return:
"""
# convolutional layer fields
conv_fields = {
'filter_size': 3, #8
'num_of_feature_maps': 7, #128
'stride_size': 2, #4
'mean': 4, #(0~15-7)/8
'std_dev': 4 # 0~16/16
#total bits: 20
}
# convolutional layer subnet
conv_subnet = '0.0.0/4'
# pooling layer fields
pooling_fields = {
'kernel_size': 2,
'stride_size': 2,
'type': 1,
'placeholder': 14
# total bits: 19
}
# pooling layer subnet
pooling_subnet = '16.0.0/5'
# fully-connected layer fields
fullyconnected_fields = {
'num_of_neurons': 11,
'mean': 4,
'std_dev': 4
# total bits: 19
}
# fully-connected layer subnet
fullyconnected_subnet = '24.0.0/5'
# disabled layer fields
disabled_fields = {
'disabled': 19,
}
# disabled layer subnet
disabled_subnet = '32.0.0/5'
return {
'conv': ConvLayer(conv_subnet,conv_fields),
'pooling': PoolingLayer(pooling_subnet, pooling_fields),
'full': FullyConnectedLayer(fullyconnected_subnet, fullyconnected_fields),
'disabled': DisabledLayer(disabled_subnet, disabled_fields)
}
def initialise_cnn_layers_with_xavier_weights():
"""
initialise cnn layers with xavier weight initialisation
:return:
"""
# convolutional layer fields
conv_fields = {
'filter_size': 3, #8
'num_of_feature_maps': 7, #128
'stride_size': 2, #4
#total bits: 12
}
# convolutional layer subnet
conv_subnet = '0.0/4'
# pooling layer fields
pooling_fields = {
'kernel_size': 2,
'stride_size': 2,
'type': 1,
'placeholder': 6
# total bits: 11
}
# pooling layer subnet
pooling_subnet = '16.0/5'
# fully-connected layer fields
fullyconnected_fields = {
'num_of_neurons': 11,
# total bits: 11
}
# fully-connected layer subnet
fullyconnected_subnet = '24.0/5'
# disabled layer fields
disabled_fields = {
'disabled': 11,
}
# disabled layer subnet
disabled_subnet = '32.0/5'
return {
'conv': ConvLayer(conv_subnet,conv_fields),
'pooling': PoolingLayer(pooling_subnet, pooling_fields),
'full': FullyConnectedLayer(fullyconnected_subnet, fullyconnected_fields),
'disabled': DisabledLayer(disabled_subnet, disabled_fields)
}
class BaseCNNLayer:
"""
BaseCNNLayer class
"""
def __init__(self, str_subnet, fields):
"""
constructor
:param str_subnet: subnet string, e.g. 127.0.0.1/24
:type str_subnet: string
:param fields: a dict of (field_name, num_of_bits) pair
:type fields: dict
"""
self.str_subnet = str_subnet
self.fields = fields
self.subnet = parse_subnet_str(str_subnet)
self.ip_structure = IPStructure(fields)
self.encoder = Encoder(self.ip_structure, self.subnet)
self.decoder = Decoder()
def encode_2_interface(self, field_values):
"""
encode filed values to an IP interface
:param field_values: field values
:type field_values: a dict of (field_name, field_value) pairs
:return: the layer interface
:rtype: Interface
"""
interface = self.encoder.encode_2_interface(field_values)
return interface
def decode_2_field_values(self, interface):
"""
decode an IP interface to field values
:param interface: an IP interface
:type interface: Interface
:return: a dict of (field_name, field_value) pairs
:rtype: dict
"""
field_values = self.decoder.decode_2_field_values(interface)
return field_values
def generate_random_interface(self):
"""
generate an IP interface with random settings
:rtype: Interface
:return: an IP interface
"""
field_values = {}
for field_name in self.fields:
num_of_bits = self.fields[field_name]
max_value = max_decimal_value_of_binary(num_of_bits)
rand_value = np.random.randint(0, max_value+1)
field_values[field_name] = rand_value
return self.encode_2_interface(field_values)
def check_interface_in_type(self, interface):
"""
check whether the interface belongs to this type
:param interface: an IP interface
:type interface: Interface
:return: boolean
:rtype: bool
"""
return self.subnet.check_ip_in_subnet(interface.ip)
class ConvLayer(BaseCNNLayer):
"""
ConvLayer class
"""
def __init__(self, str_subnet=None, fields=None):
"""
constructor
:param str_subnet: subnet string, e.g. 127.0.0.1/24
:type str_subnet: string
:param fields: a dict of (field_name, num_of_bits) pair
:type fields: dict
"""
if str_subnet is None:
str_subnet = CONV_SUBNET
if fields is None:
fields = CONV_FIELDS
super(ConvLayer, self).__init__(str_subnet, fields)
class PoolingLayer(BaseCNNLayer):
"""
PoolingLayer class
"""
def __init__(self, str_subnet=None, fields=None):
"""
constructor
:param str_subnet: subnet string, e.g. 127.0.0.1/24
:type str_subnet: string
:param fields: a dict of (field_name, num_of_bits) pair
:type fields: dict
"""
if str_subnet is None:
str_subnet = POOLING_SUBNET
if fields is None:
fields = POOLING_FIELDS
super(PoolingLayer, self).__init__(str_subnet, fields)
class FullyConnectedLayer(BaseCNNLayer):
"""
FullyConnectedLayer class
"""
def __init__(self, str_subnet=None, fields=None):
"""
constructor
:param str_subnet: subnet string, e.g. 127.0.0.1/24
:type str_subnet: string
:param fields: a dict of (field_name, num_of_bits) pair
:type fields: dict
"""
if str_subnet is None:
str_subnet = FULLYCONNECTED_SUBNET
if fields is None:
fields = FULLYCONNECTED_FIELDS
super(FullyConnectedLayer, self).__init__(str_subnet, fields)
class DisabledLayer(BaseCNNLayer):
"""
DisabledLayer class
"""
def __init__(self, str_subnet=None, fields=None):
"""
constructor
:param str_subnet: subnet string, e.g. 127.0.0.1/24
:type str_subnet: string
:param fields: a dict of (field_name, num_of_bits) pair
:type fields: dict
"""
if str_subnet is None:
str_subnet = DISABLED_SUBNET
if fields is None:
fields = DISABLED_FIELDS
super(DisabledLayer, self).__init__(str_subnet, fields)
| 2.28125 | 2 |
src/ground/grain-server.backup/mcc/iridium/devutil/mo_receiver.py | shostakovichs-spacecraft-factory/cansat-2018-2019 | 5 | 12792798 | <reponame>shostakovichs-spacecraft-factory/cansat-2018-2019
import typing
import logging
import sys
import argparse
from socketserver import BaseRequestHandler
from ..network.mo_server import MOServiceServer
from ..messages.mobile_originated import MOMessage
_log = logging.getLogger(__name__)
class ReqHandler(BaseRequestHandler):
def handle(self):
msg: MOMessage = self.request
hdr = msg.header
if hdr:
_log.info(
"hdr: \n"
f"\t mtmsn: {hdr.mtmsn}\n"
f"\t momsn: {hdr.momsn}\n"
f"\t session status: {hdr.session_status}\n"
f"\t imei: {hdr.imei}\n"
f"\t cdr: {hdr.cdr}\n"
f"\t time of session: {hdr.time_of_session}\n"
)
else:
_log.warning("no header present")
loc = msg.loc_info
if loc:
_log.info(
"loc info:\n"
f"\t lon: {loc.lon}\n"
f"\t lat: {loc.lat}\n"
f"\t CEP r: {loc.CEP_radius}\n"
)
else:
_log.warning("no location info present")
pay = msg.payload
if pay:
_log.info(f"payload: {pay.raw_payload}")
else:
_log.warning("no payload present")
# noinspection PyBroadException
def main(iface: str, port: int, blog_stream: typing.BinaryIO = None):
server = MOServiceServer(
server_address=(iface, port,),
request_handler_cls=ReqHandler,
bind_and_activate=True,
send_ack=True,
blog_stream=blog_stream,
)
rc = 0
try:
_log.info("starting server")
server.serve_forever()
except KeyboardInterrupt:
_log.info("server stopped by ctrl+c")
except Exception:
rc = 1
_log.exception("an error occured")
finally:
server.shutdown()
return rc
def main_exec():
logging.basicConfig(level=logging.DEBUG, stream=sys.stdout)
parser = argparse.ArgumentParser("devutil SBD receiver", add_help=True)
parser.add_argument("--iface", action="store", dest="iface", nargs="?", type=str, default="0.0.0.0")
parser.add_argument("--port", action="store", dest="port", nargs="?", type=int, required=True)
parser.add_argument("--blog-file", action="store", dest="blogfile", nargs="?", type=argparse.FileType('wb'),
default=None)
args = parser.parse_args(sys.argv[1:])
# Если нам предлагают писать данные в stdout, то оно не бинарное а текстовое
# добудем из него бинарный поток (аттрибут buffer)
stream = None
if args.blogfile is not None:
_log.info("using blog stream as %s", args.blogfile)
stream = getattr(args.blogfile, "buffer", args.blogfile)
arg_iface = args.iface
arg_port = args.port
return main(arg_iface, arg_port, stream)
if __name__ == "__main__":
exit(main_exec())
| 2.140625 | 2 |
xgds_planner2/defaultSettings.py | xgds/xgds_planner2 | 1 | 12792799 | <filename>xgds_planner2/defaultSettings.py
#__BEGIN_LICENSE__
# Copyright (c) 2015, United States Government, as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All rights reserved.
#
# The xGDS platform is licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0.
#
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#__END_LICENSE__
from geocamUtil.SettingsUtil import getOrCreateArray
"""
This app may define some new parameters that can be modified in the
Django settings module. Let's say one such parameter is FOO. The
default value for FOO is defined in this file, like this:
FOO = 'my default value'
If the admin for the site doesn't like the default value, they can
override it in the site-level settings module, like this:
FOO = 'a better value'
Other modules can access the value of FOO like this:
from django.conf import settings
print settings.FOO
Don't try to get the value of FOO from django.conf.settings. That
settings object will not know about the default value!
###
# DJANGO-PIPELINE ADDENDUM:
For this module to work, the site-level config file (siteSettings.py), must
merge the XGDS_PLANNER_PIPELINE_JS and XGDS_PLANNER_PIPELINE_CSS settings
into global PIPELINE_{JS|CSS} settings dicts.
If no other django-pipeline includes are defined,
the relevant siteSettings.py section might look like this:
PIPELINE_JS = {}
PIPELINE_JS.update(plannerSettings.XGDS_PLANNER_PIPELINE_JS)
PIPELINE_CSS = {}
PIPELINE_CSS.update(plannerSettings.XGDS_PLANNER_PIPELINE_CSS)
#
###
"""
import os
from geocamUtil.SettingsUtil import getOrCreateDict
XGDS_PLANNER_OFFLINE = False # Don't load google earth if this is true
XGDS_PLANNER_MAP_ROTATION_HANDLES = True
XGDS_PLANNER_DIRECTIONAL_STATIONS = True
# 'external/js/jquery/jquery.migrate.min.js',
XGDS_PLANNER_PIPELINE_JS = {
'planner_app': {
'source_filenames': ('jquery/dist/jquery.min.js',
'jquery-migrate-official/src/migrate.js',
'jquery-ui-dist/jquery-ui.min.js',
'handlebars/dist/handlebars.min.js',
'backbone/backbone.js',
'backbone.wreqr/lib/backbone.wreqr.min.js',
'backbone.babysitter/lib/backbone.babysitter.min.js',
'backbone-relational/backbone-relational.js',
'backbone-forms/distribution/backbone-forms.min.js',
'backbone.marionette/lib/backbone.marionette.min.js',
'string-format/lib/string-format.js',
'usng/usng.js',
'proj4/dist/proj4.js',
'xgds_map_server/js/util/handlebars-helpers.js',
'xgds_map_server/js/util/geo.js',
'xgds_map_server/js/util/forms.js',
'xgds_planner2/js/plannerApp.js',
'xgds_planner2/js/plannerModels.js',
'xgds_planner2/js/olPlannerStyles.js',
'xgds_planner2/js/plannerLinksViews.js',
'xgds_planner2/js/plannerToolsViews.js',
'xgds_planner2/js/plannerScheduleViews.js',
'xgds_planner2/js/plannerViews.js',
'xgds_planner2/js/map_viewer/olMapViews.js',
'xgds_planner2/js/olStationViews.js',
'xgds_planner2/js/olSegmentViews.js',
'xgds_planner2/js/olPlanViews.js',
'xgds_planner2/js/simulatorDriver.js'
),
'output_filename': 'js/compiled_planner_app.js'
},
# must create 'simulator' entry in top-level siteSettings.py
#TODO update, qunit is installed with bower
'xgds_planner2_testing': {
'source_filenames': (
'external/js/qunit-1.12.0.js',
'xgds_planner2/js/tests.js',
),
'output_filename': 'js/planner_tests.js'
}
}
XGDS_PLANNER_PIPELINE_CSS = {
'planner_app': {
'source_filenames': (
'jquery-ui-dist/jquery-ui.min.css',
# for some reason compressing this in the css does not work so it's separate in the planner_app
# 'backbone-forms/distribution/templates/old.css',
'xgds_planner2/css/planner.css',
#'xgds_planner2/css/forms_adjust.css',
),
'output_filename': 'css/planner_app.css',
'template_name': 'xgds_planner2/pipelineCSS.css',
},
'xgds_planner2_testing': {
'source_filenames': (
'qunit/qunit/qunit.css',
),
'output_filename': 'css/planner_tests.css',
},
}
PIPELINE = getOrCreateDict('PIPELINE')
PIPELINE['CSS'] = XGDS_PLANNER_PIPELINE_CSS
PIPELINE['JAVASCRIPT'] = getOrCreateDict('PIPELINE.JAVASCRIPT')
# if we are using the planner we want to add uploadJson into the custom map for right now. Really it should not be jammed in that file.
PIPELINE['JAVASCRIPT']['custom_map'] = {'source_filenames': ('xgds_planner2/js/uploadJson.js',
'xgds_map_server/js/map_viewer/olShowMapCoords.js',
'xgds_map_server/js/map_viewer/olInitialLayers.js'
),
'output_filename': 'js/custom_map.js'
}
# Override this compilation of javascript files for your planner and simulator
PIPELINE['JAVASCRIPT']['simulator'] = {'source_filenames': ('xgds_planner2/js/planner/genericVehicleSimulator.js', # This trailing comma is critical because this makes it a tuple
),
'output_filename': 'js/simulator.js',
}
_thisDir = os.path.dirname(__file__)
# Set to true to make the bearing distance be in crs units
XGDS_PLANNER_CRS_UNITS_DEFAULT = False
# list of (formatCode, extension, exporterClass). This is the entire list of everything that
# xgds_planner provides; remove anything you won't be using.
XGDS_PLANNER_PLAN_EXPORTERS = (
('xpjson', '.json', 'xgds_planner2.planExporter.XpjsonPlanExporter'),
('bearing_distance', '.bdj', 'xgds_planner2.planExporter.BearingDistanceJsonPlanExporter'),
('bearing_distance', '.cbdj', 'xgds_planner2.planExporter.BearingDistanceCRSJsonPlanExporter'),
('crsjson', '.crsjson', 'xgds_planner2.planExporter.CrsJsonPlanExporter'),
('kml', '.kml', 'xgds_planner2.kmlPlanExporter.KmlPlanExporter'),
('stats', '-stats.json', 'xgds_planner2.statsPlanExporter.StatsPlanExporter'),
# ('pml', '.pml', 'xgds_planner2.pmlPlanExporter.PmlPlanExporter'),
)
# list of (formatCode, extension, importerClass)
XGDS_PLANNER_PLAN_IMPORTERS = (
('kml', '.kml', 'xgds_planner2.kmlPlanImporter.KmlLineStringPlanImporter'),
('csv', '.csv', 'xgds_planner2.csvPlanImporter.CSVPlanImporter'),
('json', '.json', 'xgds_planner2.planImporter.XPJsonPlanImporter'),
)
# kml root from xgds_map_server
XGDS_PLANNER_LAYER_FEED_URL = "/xgds_map_server/treejson/"
XGDS_PLANNER_LINE_WIDTH_PIXELS = 3
XGDS_PLANNER_PLAN_MODEL = "xgds_planner2.Plan"
XGDS_PLANNER_PLAN_MONIKER = "Plan"
XGDS_PLANNER_PLAN_EXECUTION_MODEL = "xgds_planner2.PlanExecution"
XGDS_PLANNER_STATION_MONIKER = "Station"
XGDS_PLANNER_STATION_MONIKER_PLURAL = XGDS_PLANNER_STATION_MONIKER + 's'
XGDS_PLANNER_SEGMENT_MONIKER = "Segment"
XGDS_PLANNER_SEGMENT_MONIKER_PLURAL = XGDS_PLANNER_SEGMENT_MONIKER + 's'
XGDS_PLANNER_COMMAND_MONIKER = "Command"
XGDS_PLANNER_COMMAND_MONIKER_PLURAL = XGDS_PLANNER_COMMAND_MONIKER + 's'
#TODO to have a default site frame in the creation form, set this to the site id from your WITHIN plan library.
XGDS_PLANNER_DEFAULT_SITE = ('IRG', 'Ames')
# Method to add stuff to context for plan editor, override and register your own method if you need it.
# It must add a json dictionary called extras
XGDS_PLANNER_EDITOR_CONTEXT_METHOD = 'xgds_planner2.views.addToEditorContext'
# Method to add stuff to planExecution if you are not doing the basic planExecution.
# This gets invoked from schedulePlans call in views.py
XGDS_PLANNER_SCHEDULE_EXTRAS_METHOD = None
# OVERRIDE this in your sitesettings to have a custom plan create, note that since it's in site settings you can't have a reverse lookup.
XGDS_PLANNER_CREATE_URL = "/xgds_planner2/plan/create"
# Schema used to be set in the settings, now they are set in the PlanSchema database table.
# XGDS_PLANNER_SCHEMAS = [
# ]
# XGDS_PLANNER_SCHEMAS: A list of XPJSON schemas available in the
# planner. Notes:
#
# * @schemaSource and @librarySource are paths relative to the PROJ_ROOT
# base directory for the site. They point to the XPJSON PlanSchema and
# PlanLibrary source files. One of the steps within 'manage.py prep'
# is 'prepapps'. During that step, those files are processed by
# compileXpjson.py and the simplified/canonical versions are written
# to the build/static/xgds_planner2 directory. The client-side JS
# reads the simplified versions from there.
#
# * @simulatorUrl is relative to STATIC_URL. It should point to a JavaScript
# file that defines the simulator model for the schema. The model is loaded
# as part of the client-side planner JS.
#
# * @simulator is the JavaScript name of the simulator module defined by
# the file at @simulatorUrl.
#
XGDS_PLANNER_SCHEMAS = {
# "test": {
# "schemaSource": "apps/xgds_planner2/testing/examplePlanSchema.json",
# "librarySource": "apps/xgds_planner2/testing/examplePlanLibrary.json",
# "simulatorUrl": "xgds_planner2/testing/exampleSimulator.js",
# "simulator": "xgds_planner2.ExampleSimulator",
# },
"GenericVehicle": {
"schemaSource": "apps/xgds_planner2/js/xgds_planner2/planner/genericVehiclePlanSchema.json",
"librarySource": "apps/xgds_planner2/js/xgds_planner2/planner/genericVehiclePlanLibrary.json",
"simulatorUrl": "xgds_planner2/js/planner/genericVehicleSimulator.js",
"simulator": "genericVehicle.Simulator", # the namespace within the simulator js
}
}
# XGDS_PLANNER_COMMAND_RENDERER_SCRIPTS javascript files to be included by the mapviews.js
# to support custom command rendering
XGDS_PLANNER_COMMAND_RENDERER_SCRIPTS = ()
# XGDS_PLANNER_COMMAND_RENDERERS - A dict of Command type to javascript file to be used in the mapviews.js
# to render a command in a custom way.
# see xgds_kn for example
XGDS_PLANNER_COMMAND_RENDERERS = {}
# If this is defined (true) then include the scheduling & flight management features in display
# IMPORTANT YOU MUST INCLUDE THIS IN SITE SETTINGS
# TEMPLATE_CONTEXT_PROCESSORS = (global_settings.TEMPLATE_CONTEXT_PROCESSORS + (
# ...
# 'geocamUtil.context_processors.settings'
XGDS_PLANNER_SCHEDULE_INCLUDED = None
# Test skipping variables. Set to true if code somewhere else overrides
# some functionality in the planner.
XGDS_PLANNER_TEST_SKIP_INDEX = False
XGDS_PLANNER_TEST_SKIP_EDIT = False
XGDS_PLANNER_TEST_SKIP_DOC = False
XGDS_PLANNER_TEST_SKIP_PLAN_REST = False
XGDS_PLANNER_TEST_SKIP_PLAN_EXPORT = False
XGDS_PLANNER_TEST_SKIP_CREATE_PLAN_PAGE = False
XGDS_PLANNER_TEST_SKIP_CREATE_PLAN = False
XGDS_PLANNER_HANDLEBARS_DIRS = [os.path.join('xgds_planner2', 'templates', 'handlebars'),
os.path.join('xgds_map_server', 'templates', 'handlebars', 'search')]
XGDS_PLANNER_PLAN_BEARING_HANDLEBAR_PATH = 'xgds_planner2/templates/xgds_planner2/bearingDistancePlan.handlebars'
# XGDS_PLANNER_LINKS_LOADED_CALLBACK: The fully qualified name of an
# extra JavaScript callback to call after the links tab is loaded.
XGDS_PLANNER_LINKS_LOADED_CALLBACK = 'null'
# This is used to hold a map of site frames, so we can convert lat/long to the closest site frame.
# It is initialized by calling views.getSiteFrames().
XGDS_PLANNER_SITE_FRAMES = []
XGDS_MAP_SERVER_JS_MAP = getOrCreateDict('XGDS_MAP_SERVER_JS_MAP')
XGDS_MAP_SERVER_JS_MAP['Plan'] = {'ol': 'xgds_planner2/js/olPlanMap.js',
'model': XGDS_PLANNER_PLAN_MODEL,
'hiddenColumns': ['stations', 'type', 'id']}
XGDS_DATA_MASKED_FIELDS = getOrCreateDict('XGDS_DATA_MASKED_FIELDS')
XGDS_DATA_MASKED_FIELDS['xgds_planner2'] = {'Plan': ['uuid',
'dateModified',
'jsonPlan',
'deleted',
'readOnly',
'numStations',
'numSegments',
'numCommands',
'stats'
]
}
# If you have callbacks to be connected to the planner, register them as follows
# XGDS_PLANNER_CALLBACK = [(MODIFY,'my.planner.modify.callback', PYTHON),
# (SAVE,'my.planner.save.callback', JAVASCRIPT)]
# they will be executed in order either when the plan is modified or when the plan is saved.
# If it is a Python method, it will happen on the back end after modification or save.
# If it is a javascript method, it will happen on the front end after modification or save.
# If it is an 'exec' method, it will happen on the back end after modification or save.
MODIFY = 'Modify'
SAVE = 'Save'
DELETE = 'Delete'
JAVASCRIPT = 'JavaScript'
PYTHON = 'Python'
EXEC = 'Exec'
XGDS_PLANNER_CALLBACK = []
# If you will be plotting values in the flot plot chart, register functions here.
# You must also then include the javascript library that has that function implemented.
# Dictionary should be: legible name: namespace of library
XGDS_PLANNER_PLOTS = {}
# Uncomment the below to see plannerSamplePlot.js, and include it in planner_app_base
# XGDS_PLANNER_PLOTS['Sample'] = 'sample_plot'
# Turn on to enable plan validation support and UI
XGDS_PLANNER_VALIDATION = False | 2.015625 | 2 |
_lever_utils/foo/helpers/mail/mail_helper.py | 0lever/utils | 1 | 12792800 | <filename>_lever_utils/foo/helpers/mail/mail_helper.py
# -*- coding:utf-8 -*-
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import smtplib
import os
import poplib
from email.parser import Parser
from email.header import decode_header
from email.utils import parseaddr
from datetime import datetime
class Mail(object):
def __init__(self, server, port, username, password, sender):
self._server = server
self._port = port
self._username = username
self._password = password
self._sender = sender
def send(self, subject, to, cc=[], text=None, html=None, files=[]):
try:
# 构造邮件对象MIMEMultipart对象
# mixed为附件邮件类型
msg = MIMEMultipart('mixed')
msg['Subject'] = subject
msg['From'] = self._sender
msg['To'] = ";".join(to)
msg['Cc'] = ";".join(cc)
mime_text = MIMEText(html, 'html', 'utf-8') if html is not None else MIMEText(text, 'plain', 'utf-8')
msg.attach(mime_text)
for file in files:
if isinstance(file, str):
file = file.decode("utf-8")
basename = os.path.basename(file)
# 构造附件
sendfile = open(file, 'rb').read()
text_att = MIMEText(sendfile, 'base64', 'utf-8')
text_att["Content-Type"] = 'application/octet-stream'
text_att["Content-Disposition"] = 'attachment; filename=%s' % basename.encode("gb2312")
msg.attach(text_att)
# 发送邮件
smtp = smtplib.SMTP_SSL(self._server, self._port)
smtp.set_debuglevel(0)
smtp.ehlo()
smtp.login(self._username, self._password)
err = smtp.sendmail(self._sender, to+cc, msg.as_string())
smtp.close()
if not err:
send_result = True, None
else:
send_result = False, err
except Exception, e:
send_result = False, e
return send_result
class MailServer(object):
SF = "%Y-%m-%d %H:%M:%S"
pop3_server = None
args_pop_server = None
args_user = None
args_password = None
def __init__(self, pop_server, user, password):
self.args_pop_server = pop_server
self.args_user = user
self.args_password = password
self._restart()
def quit(self):
if self.pop3_server is not None:
self.pop3_server.quit()
self.pop3_server = None
def _restart(self):
self.quit()
tmp_pop3_server = poplib.POP3(self.args_pop_server)
tmp_pop3_server.user(self.args_user)
tmp_pop3_server.pass_(self.args_password)
self.pop3_server = tmp_pop3_server
def get(self, *args):
self._restart()
res = {}
for arg in args:
if arg == 'stat':
res[arg] = self.pop3_server.stat()[0]
elif arg == 'list':
res[arg] = self.pop3_server.list()
elif arg == 'latest':
mails = self.pop3_server.list()[1]
resp, lines, octets = self.pop3_server.retr(len(mails))
msg = Parser().parsestr(b'\r\n'.join(lines))
res[arg] = self._parse_message(msg)
elif type(arg) == int:
mails = self.pop3_server.list()[1]
if arg > len(mails):
res[arg] = None
continue
resp, lines, octets = self.pop3_server.retr(arg)
msg = Parser().parsestr(b'\r\n'.join(lines))
res[arg] = self._parse_message(msg)
else:
res[arg] = None
return res
def _parse_message(self, msg):
result = {}
# Subject
subject_tmp = msg.get('Subject', '')
value, charset = decode_header(subject_tmp)[0]
if charset:
value = value.decode(charset)
result['Subject'] = value
# 'From', 'To', 'Cc'
for header in ['From', 'To', 'Cc']:
result[header] = []
temp = msg.get(header, '')
temp_list = temp.split(',')
for i in temp_list:
if i == '':
continue
name, addr = parseaddr(i)
value, charset = decode_header(name)[0]
if charset:
value = value.decode(charset)
tmp_addr_info = dict(name=value, addr=addr)
result[header].append(tmp_addr_info)
try:
result['Date'] = datetime.strptime(msg.get('Date', ''), "%a, %d %b %Y %H:%M:%S +0800").strftime(self.SF)
except Exception,e:
result['Date'] = str(msg.get('Date', ''))
result['Files'] = []
result['Bodys'] = []
for par in msg.walk():
name = par.get_filename()
if name:
data = par.get_payload(decode=True)
result['Files'].append(dict(name=name, data=data))
else:
body = par.get_payload(decode=True)
if body is not None:
result['Bodys'].append(dict(body=body))
return result
| 2.484375 | 2 |
src/database_processor/db_common.py | Brian-Pho/RVST598_Speech_Emotion_Recognition | 2 | 12792801 | """
This file holds common functions across all database processing such as
calculating statistics.
"""
import numpy as np
from src import em_constants as emc
def is_outlier(wav, lower, upper):
"""
Checks if an audio sample is an outlier. Bounds are inclusive.
:param wav: The audio time series data points
:param lower: The lower bound
:param upper: The upper bound
:return: Boolean
"""
return False if lower <= len(wav) <= upper else True
def get_label(filename, delimiter, index, db_emo_map):
"""
Gets the k-hot encoded label from a sample's filename.
:param filename: The sample's filename
:param delimiter: The delimiter used in the filename
:param index: Where in the filename the label/emotion is located
:param db_emo_map: The database-specific emotion mapping
:return: The label k-hot encoded to this program's standard emotion map or
False if the label doesn't map to the standard emotions
"""
label = filename.split(delimiter)[index]
standard_emotion = db_emo_map[label]
emotion_id = [emc.EMOTION_MAP[standard_emotion]]
return k_hot_encode_label(emotion_id)
def repr_label(label):
"""
Represents a label in a filename-friendly format. Mostly used in the
"read_to_melspecgram()" function to write out labels in the filename.
Sample input:
[1. 0. 0. 0. 0. 0. 0.]
Sample output:
"1_0_0_0_0_0_0"
:param label: Numpy array representing the k-hot encoded label
:return: String representation of the label
"""
return "_".join(str(emo) for emo in label)
def k_hot_encode_label(label):
"""
K-hot encodes a label. Takes a list of emotion IDs and returns a list
encoding the most voted for emotion.
Sample input:
[0, 1, 2, 0, 6, 2]
Sample output:
[1, 0, 1, 0, 0, 0, 0]
:param label: List of labels to encode
:return: List of k-hot encoded labels or False if the label is unused
"""
# If there's only one label/vote, then use the quicker method of encoding
if len(label) == 1:
return _one_hot_encode_label(label)
# Convert the emotion numbers into an array where the index is the emotion
# and the value is the number of votes for that emotion
unique, counts = np.unique(label, return_counts=True)
k_hot_label = np.zeros(emc.NUM_EMOTIONS)
for emo_index, emo_count in zip(unique, counts):
k_hot_label[emo_index] = emo_count
# Only count the emotions with the highest amount of votes
k_hot_label = k_hot_label / np.max(k_hot_label)
k_hot_label = np.floor(k_hot_label).astype(int)
# If they're all zero, then this sample doesn't fit with the set of labels
# that we're considering so drop it
if not np.any(k_hot_label):
print("No usable label.")
return False
return k_hot_label
def _one_hot_encode_label(label):
"""
One hot encodes a label. Private function to quickly one-hot encode a label.
Sample input:
[4]
Sample output:
[0, 0, 0, 0, 1, 0, 0]
:param label: A list with one label (length is one)
:return: One-hot encoding of the label
"""
one_hot_label = np.zeros(emc.NUM_EMOTIONS, dtype=int)
one_hot_label[label[0]] = 1
return one_hot_label
def inverse_k_hot_encode_label(k_hot_label):
"""
Inverses a k-hot encoded label back into emotion ids.
Sample input:
[1, 0, 0, 0, 1, 0, 0]
Sample output:
[0, 4]
:param k_hot_label: A list of the k-hot encoded label
:return: A list of the emotion ids in the label
"""
return np.where(k_hot_label == 1)[0]
| 3.515625 | 4 |
attendance_app/urls.py | mattmorz/attendance_system | 0 | 12792802 | <gh_stars>0
from django.conf.urls import url
from django.urls import path, include
from . import views
urlpatterns = [
path('', views.index, name='index'),
path('sign-in', views.signup, name='sign-in'),
path('home-page', views.loggedIn, name='home-page'),
path('log-out', views.logout_view, name='log-out'),
] | 1.617188 | 2 |
aas_core_codegen/csharp/__init__.py | gillistephan/aas-core-codegen | 5 | 12792803 | """Generate C# code based on the intermediate meta-model."""
| 1.109375 | 1 |
code/trlda/python/tests/batchlda_test.py | lucastheis/trlda | 23 | 12792804 | import unittest
from time import time
from pickle import load, dump
from tempfile import mkstemp
from random import choice, randint
from string import ascii_letters
from numpy import corrcoef, random, abs, max, asarray, round, zeros_like
from trlda.models import BatchLDA
from trlda.utils import sample_dirichlet
class Tests(unittest.TestCase):
def test_basics(self):
W = 102
D = 1010
K = 11
alpha = .27
eta = 3.1
model = BatchLDA(num_words=W, num_topics=K, alpha=alpha, eta=eta)
self.assertEqual(K, model.num_topics)
self.assertEqual(K, model.alpha.size)
self.assertEqual(W, model.num_words)
self.assertEqual(alpha, model.alpha.ravel()[randint(0, K - 1)])
self.assertEqual(eta, model.eta)
with self.assertRaises(RuntimeError):
model.alpha = random.rand(K + 1)
alpha = random.rand(K, 1)
model.alpha = alpha
self.assertLess(max(abs(model.alpha.ravel() - alpha.ravel())), 1e-20)
def test_empirical_bayes_alpha(self):
model = BatchLDA(
num_words=4,
num_topics=2,
alpha=[.2, .05],
eta=.2)
model.lambdas = [
[100, 100, 1e-16, 1e-16],
[1e-16, 1e-16, 100, 100]]
documents = model.sample(num_documents=100, length=20)
# set alpha to wrong values
model.alpha = [4., 4.]
model.update_parameters(documents,
max_epochs=10,
max_iter_inference=200,
update_lambda=False,
update_alpha=True,
emp_bayes_threshold=0.)
# make sure empirical Bayes went in the right direction
self.assertGreater(model.alpha[0], model.alpha[1])
self.assertLess(model.alpha[0], 4.)
self.assertLess(model.alpha[1], 4.)
def test_empirical_bayes_eta(self):
for eta, initial_eta in [(.045, .2), (.41, .2)]:
model = BatchLDA(
num_words=100,
num_topics=10,
alpha=[.1, .1],
eta=initial_eta)
# this will sample a beta with the given eta
model.lambdas = zeros_like(model.lambdas) + eta
documents = model.sample(500, 10)
model.update_parameters(documents,
max_epochs=10,
update_eta=True,
emp_bayes_threshold=0.)
# optimization should at least walk in the right direction and don't explode
self.assertLess(abs(model.eta - eta), abs(model.eta - initial_eta))
def test_pickle(self):
model0 = BatchLDA(
num_words=300,
num_topics=50,
alpha=random.rand(),
eta=random.rand())
tmp_file = mkstemp()[1]
# save model
with open(tmp_file, 'w') as handle:
dump({'model': model0}, handle)
# load model
with open(tmp_file) as handle:
model1 = load(handle)['model']
# make sure parameters haven't changed
self.assertEqual(model0.num_words, model1.num_words)
self.assertEqual(model0.num_topics, model1.num_topics)
self.assertLess(max(abs(model0.lambdas - model1.lambdas)), 1e-20)
self.assertLess(max(abs(model0.alpha - model1.alpha)), 1e-20)
self.assertLess(abs(model0.eta - model1.eta), 1e-20)
if __name__ == '__main__':
unittest.main()
| 2.28125 | 2 |
custom/icds_reports/tests/agg_tests/reports/test_poshan_progress_dashboard_data.py | tobiasmcnulty/commcare-hq | 1 | 12792805 | from django.test import TestCase
from custom.icds_reports.reports.poshan_progress_dashboard_data import get_poshan_progress_dashboard_data
class TestPPDData(TestCase):
def test_get_ppr_data_comparative_month(self):
self.maxDiff = None
data = get_poshan_progress_dashboard_data(
'icds-cas',
2017,
5,
2,
'month',
'comparative',
{
'aggregation_level': 2,
'state_id': 'st1',
},
False
)
expected = {'ICDS CAS Coverage': [[{'Best performers': [{'place': 'd1',
'value': '142.40%'}],
'Worst performers': [{'place': 'd1',
'value': '142.40%'}],
'indicator': 'AWC Open'},
{'Best performers': [{'place': 'd1', 'value': '1.62%'}],
'Worst performers': [{'place': 'd1',
'value': '1.62%'}],
'indicator': 'Home Visits'}]],
'Service Delivery': [
[{'Best performers': [{'place': 'd1', 'value': '1.45%'}],
'Worst performers': [{'place': 'd1', 'value': '1.45%'}],
'indicator': 'Pre-school Education'},
{'Best performers': [{'place': 'd1', 'value': '66.74%'}],
'Worst performers': [{'place': 'd1',
'value': '66.74%'}],
'indicator': 'Weighing efficiency'}],
[{'Best performers': [{'place': 'd1', 'value': '1.47%'}],
'Worst performers': [{'place': 'd1', 'value': '1.47%'}],
'indicator': 'Height Measurement Efficiency'},
{'Best performers': [{'place': 'd1', 'value': '72.97%'}],
'Worst performers': [{'place': 'd1',
'value': '72.97%'}],
'indicator': 'Counselling'}],
[{'Best performers': [{'place': 'd1', 'value': '28.67%'}],
'Worst performers': [{'place': 'd1',
'value': '28.67%'}],
'indicator': 'Take Home Ration'},
{'Best performers': [{'place': 'd1', 'value': '0.83%'}],
'Worst performers': [{'place': 'd1', 'value': '0.83%'}],
'indicator': 'Supplementary Nutrition'}]]}
self.assertDictEqual(expected, data)
def test_get_ppr_data_comparative_quarter(self):
self.maxDiff = None
data = get_poshan_progress_dashboard_data(
'icds-cas',
2017,
None,
2,
'quarter',
'comparative',
{
'aggregation_level': 1,
},
False
)
expected = {'ICDS CAS Coverage': [[{'Best performers': [{'place': 'st1',
'value': '64.80%'},
{'place': 'st2',
'value': '47.76%'},
{'place': 'st7',
'value': '0.00%'}],
'Worst performers': [{'place': 'st7',
'value': '0.00%'},
{'place': 'st2',
'value': '47.76%'},
{'place': 'st1',
'value': '64.80%'}],
'indicator': 'AWC Open'},
{'Best performers': [{'place': 'st1', 'value': '0.66%'},
{'place': 'st2', 'value': '0.00%'},
{'place': 'st7',
'value': '0.00%'}],
'Worst performers': [{'place': 'st7',
'value': '0.00%'},
{'place': 'st2',
'value': '0.00%'},
{'place': 'st1',
'value': '0.66%'}],
'indicator': 'Home Visits'}]],
'Service Delivery': [[{'Best performers': [{'place': 'st2', 'value': '8.41%'},
{'place': 'st1', 'value': '2.52%'},
{'place': 'st7', 'value': '0.00%'}],
'Worst performers': [{'place': 'st7', 'value': '0.00%'},
{'place': 'st1', 'value': '2.52%'},
{'place': 'st2',
'value': '8.41%'}],
'indicator': 'Pre-school Education'},
{'Best performers': [{'place': 'st2', 'value': '70.40%'},
{'place': 'st1', 'value': '67.39%'},
{'place': 'st7', 'value': '0.00%'}],
'Worst performers': [{'place': 'st7', 'value': '0.00%'},
{'place': 'st1', 'value': '67.39%'},
{'place': 'st2',
'value': '70.40%'}],
'indicator': 'Weighing efficiency'}],
[{'Best performers': [{'place': 'st2', 'value': '2.89%'},
{'place': 'st1', 'value': '1.44%'},
{'place': 'st7', 'value': '0.00%'}],
'Worst performers': [{'place': 'st7', 'value': '0.00%'},
{'place': 'st1', 'value': '1.44%'},
{'place': 'st2',
'value': '2.89%'}],
'indicator': 'Height Measurement Efficiency'},
{'Best performers': [{'place': 'st1', 'value': '60.32%'},
{'place': 'st2', 'value': '57.97%'},
{'place': 'st7', 'value': '0.00%'}],
'Worst performers': [{'place': 'st7', 'value': '0.00%'},
{'place': 'st2', 'value': '57.97%'},
{'place': 'st1',
'value': '60.32%'}],
'indicator': 'Counselling'}],
[{'Best performers': [{'place': 'st2', 'value': '34.75%'},
{'place': 'st1', 'value': '14.60%'},
{'place': 'st7', 'value': '0.00%'}],
'Worst performers': [{'place': 'st7', 'value': '0.00%'},
{'place': 'st1', 'value': '14.60%'},
{'place': 'st2',
'value': '34.75%'}],
'indicator': 'Take Home Ration'},
{'Best performers': [{'place': 'st2', 'value': '1.10%'},
{'place': 'st1', 'value': '0.95%'},
{'place': 'st7', 'value': '0.00%'}],
'Worst performers': [{'place': 'st7', 'value': '0.00%'},
{'place': 'st1', 'value': '0.95%'},
{'place': 'st2',
'value': '1.10%'}],
'indicator': 'Supplementary Nutrition'}]]}
self.assertDictEqual(expected, data)
def test_get_ppr_data_aggregated_month(self):
self.maxDiff = None
data = get_poshan_progress_dashboard_data(
'icds-cas',
2017,
5,
2,
'month',
'aggregated',
{
'aggregation_level': 1,
},
False
)
expected = {'ICDS CAS Coverage': {'% Number of Days AWC Were opened': '118.18%',
'% of Home Visits': '0.79%',
'Number of AWCs Launched': 22,
'Number of Blocks Covered': 5,
'Number of Districts Covered': 4,
'Number of States Covered': 3},
'Service Delivery': {
'% of children between 3-6 years provided PSE for atleast 21+ days': '6.66%',
'% of children between 3-6 years provided SNP for atleast 21+ days': '1.51%',
'% of children between 6 months -3 years, P&LW provided THR for atleast 21+ days': '43.65%',
'% of trimester three women counselled on immediate and EBF': '72.15%',
'Height Measurement Efficiency': '3.24%',
'Weighing efficiency': '70.27%'}}
self.assertDictEqual(expected, data)
def test_get_ppr_data_aggregated_quarter(self):
self.maxDiff = None
data = get_poshan_progress_dashboard_data(
'icds-cas',
2017,
None,
2,
'quarter',
'aggregated',
{
'aggregation_level': 1,
},
False
)
expected = {'ICDS CAS Coverage': {'% Number of Days AWC Were opened': '53.27%',
'% of Home Visits': '0.32%',
'Number of AWCs Launched': 22,
'Number of Blocks Covered': 5,
'Number of Districts Covered': 4,
'Number of States Covered': 3},
'Service Delivery': {
'% of children between 3-6 years provided PSE for atleast 21+ days': '5.54%',
'% of children between 3-6 years provided SNP for atleast 21+ days': '1.08%',
'% of children between 6 months -3 years, P&LW provided THR for atleast 21+ days': '25.32%',
'% of trimester three women counselled on immediate and EBF': '59.09%',
'Height Measurement Efficiency': '2.24%',
'Weighing efficiency': '68.81%'}}
self.assertDictEqual(expected, data)
| 2.046875 | 2 |
examples/embeddingInFLTK.py | chiluf/visvis.dev | 0 | 12792806 | #!/usr/bin/env python
"""
This example illustrates embedding a visvis figure in an FLTK application.
"""
import fltk
import visvis as vv
# Create a visvis app instance, which wraps an fltk application object.
# This needs to be done *before* instantiating the main window.
app = vv.use('fltk')
class MainWindow(fltk.Fl_Window):
def __init__(self):
fltk.Fl_Window.__init__(self, 560, 420, "Embedding in FLTK")
# Make a panel with a button
but = fltk.Fl_Button(10,10,70,30, 'Click me')
but.callback(self._Plot)
# Make figure to draw stuff in
self.fig = vv.backends.backend_fltk.Figure(100,10,560-110,420-20, "")
# Make box for resizing
box = fltk.Fl_Box(fltk.FL_NO_BOX,100,50, 560-110,420-60,"")
self.resizable(box)
box.hide()
# Finish
self.end()
self.show()
self.fig._widget.show()
def _Plot(self, event):
# Make sure our figure is the active one
# If only one figure, this is not necessary.
#vv.figure(self.fig.nr)
# Clear it
vv.clf()
# Plot
vv.plot([1,2,3,1,6])
vv.legend(['this is a line'])
# Two ways to create the application and start the main loop
if True:
# The visvis way. Will run in interactive mode when used in IEP or IPython.
app.Create()
m = MainWindow()
app.Run()
else:
# The native way.
m = MainWindow()
fltk.Fl.run()
| 2.953125 | 3 |
mathutils/xform.py | saridut/FloriPy | 0 | 12792807 | #!/usr/bin/env python
import math
import numpy as np
from . import linalg as la
from . import eulang
#Euler angle sequence: XYZ (world). First rotation about X, second rotation
#about Y, and the third rotation about Z axis of the world(i.e. fixed) frame.
#This is the same as the sequence used in Blender.
#In contrast, the XYZ sequence is understood in the Aerospace community as:
#First rotation about Z-axis, second rotation about Y-axis, and the third
#rotation about X-axis of the body frame.
#Axis_angle------------------------------------------------------------
def fix_axis_angle(axis, angle, normalize=True):
if normalize:
norm = np.linalg.norm(axis)
if not math.isclose(norm, 1.0, abs_tol=1e-14, rel_tol=1e-14):
axis /= norm
angle = math.fmod(angle, 2*math.pi)
if angle < 0.0:
angle = -angle
axis = -axis
if angle > math.pi:
angle = 2*math.pi - angle
axis = -axis
return (axis, angle)
def get_rand_axis_angle():
'''
Generates a random pair of axis-angle. The axis is a random vector from
the surface of a unit sphere. Algorithm from Allen & Tildesley p. 349.
'''
axis = np.zeros((3,))
#Generate angle: A uniform random number from [0.0, 2*pi)
angle = 2.0*math.pi*np.random.random()
while True:
#Generate two uniform random numbers from [-1, 1)
zeta1 = 2.0*np.random.random() - 1.0
zeta2 = 2.0*np.random.random() - 1.0
zetasq = zeta1**2 + zeta2**2
if zetasq <= 1.0:
break
rt = np.sqrt(1.0-zetasq)
axis[0] = 2.0*zeta1*rt
axis[1] = 2.0*zeta2*rt
axis[2] = 1.0 - 2.0*zetasq
return fix_axis_angle(axis, angle)
def axis_angle_to_quat(axis, angle):
w = math.cos(angle/2)
v = math.sin(angle/2)*axis
q = np.array([w, v[0], v[1], v[2]])
return normalize_quat(q)
def axis_angle_to_euler(axis, angle, seq='XYZ', world=True):
rotmat = get_rotmat_axis_angle(axis, angle)
euler = factorize_rotmat(rotmat, seq=seq, world=world)
return euler
def axis_angle_to_dcm(axis, angle):
dcm = get_shiftmat_axis_angle(axis, angle, forward=True)
return dcm
def any_to_axis_angle(orientation):
ori_repr = orientation['repr']
if ori_repr == 'quat':
quat = np.array(orientation['quat'])
axis, angle = quat_to_axis_angle(quat)
elif ori_repr == 'euler':
euler = np.array(orientation['euler'])
seq = orientation['seq']
world = orientation['world']
axis, angle = euler_to_axis_angle(euler, seq=seq, world=world)
elif ori_repr == 'axis_angle':
axis = np.array(orientation['axis'])
angle = orientation['angle']
elif ori_repr == 'dcm':
axis, angle = dcm_to_axis_angle(orientation['dcm'])
else:
raise ValueError(
'Unrecognized orientation repr {0}'.format(ori_repr))
return axis, angle
def rotate_vector_axis_angle(v, axis, angle):
'''
Rotates vectors about axis by angle.
'''
rotmat = get_rotmat_axis_angle(axis, angle)
return np.dot(v, rotmat.T)
def get_rotmat_axis_angle(axis, angle):
R = np.zeros((3,3))
sin = np.sin(angle)
cos = np.cos(angle)
icos = 1.0 - cos
R[0,0] = axis[0]*axis[0]*icos + cos
R[0,1] = axis[0]*axis[1]*icos - axis[2]*sin
R[0,2] = axis[0]*axis[2]*icos + axis[1]*sin
R[1,0] = axis[0]*axis[1]*icos + axis[2]*sin
R[1,1] = axis[1]*axis[1]*icos + cos
R[1,2] = axis[1]*axis[2]*icos - axis[0]*sin
R[2,0] = axis[2]*axis[0]*icos - axis[1]*sin
R[2,1] = axis[1]*axis[2]*icos + axis[0]*sin
R[2,2] = axis[2]*axis[2]*icos + cos
return R
def extract_axis_angle_from_rotmat(rotmat):
trace = np.trace(rotmat)
angle = math.acos((trace-1)/2)
if angle > 0:
if angle < math.pi:
u0 = rotmat[2,1] - rotmat[1,2]
u1 = rotmat[0,2] - rotmat[2,0]
u2 = rotmat[1,0] - rotmat[0,1]
else:
#Find the largest entry in the diagonal of rotmat
k = np.argmax(np.diag(rotmat))
if k == 0:
u0 = math.sqrt(rotmat[0,0]-rotmat[1,1]-rotmat[2,2]+1)/2
s = 1.0/(2*u0)
u1 = s*rotmat[0,1]
u2 = s*rotmat[0,2]
elif k == 1:
u1 = math.sqrt(rotmat[1,1]-rotmat[0,0]-rotmat[2,2]+1)/2
s = 1.0/(2*u1)
u0 = s*rotmat[0,1]
u2 = s*rotmat[1,2]
elif k == 2:
u2 = math.sqrt(rotmat[2,2]-rotmat[0,0]-rotmat[1,1]+1)/2
s = 1.0/(2*u2)
u0 = s*rotmat[0,2]
u1 = s*rotmat[1,2]
else:
u0 = 1.0
u1 = 0.0
u2 = 0.0
return fix_axis_angle(np.array([u0, u1, u2]), angle, normalize=True)
def shift_vector_axis_angle(v, axis, angle, forward=False):
shiftmat = get_shiftmat_axis_angle(axis, angle, forward=forward)
return np.dot(v, shiftmat.T)
def shift_tensor2_axis_angle(a, axis, angle, forward=False):
shiftmat = get_shiftmat_axis_angle(axis, angle, forward=forward)
return np.einsum('ip,jq,pq', shiftmat, shiftmat, a)
def shift_tensor3_axis_angle(a, axis, angle, forward=False):
shiftmat = get_shiftmat_axis_angle(axis, angle, forward=forward)
return np.einsum('ip,jq,kr,pqr', shiftmat, shiftmat, shiftmat, a)
def get_shiftmat_axis_angle(axis, angle, forward=False):
shiftmat = get_rotmat_axis_angle(-axis, angle)
if not forward:
shiftmat = shiftmat.T
return shiftmat
#Direction cosine matrix-----------------------------------------------
def dcm_from_axes(A, B):
'''
Returns the direction cosine matrix of axes(i.e. frame) B w.r.t.
axes(i.e. frame) A.
Parameters
----------
A : (3,3) ndarray
The rows of A represent the orthonormal basis vectors of frame A.
B : (3,3) ndarray
The rows of B represent the orthonormal basis vectors of frame B.
Returns
-------
(3,3) ndarray
The dcm of frame B w.r.t. frame A.
'''
return np.dot(B, A.T)
def dcm_to_quat(dcm):
mat = get_rotmat_dcm(dcm)
axis, angle = extract_axis_angle_from_rotmat(mat)
return axis_angle_to_quat(axis, angle)
def dcm_to_euler(dcm, seq='XYZ', world=True):
mat = get_rotmat_dcm(dcm)
euler = factorize_rotmat(mat, seq=seq, world=world)
return euler
def dcm_to_axis_angle(dcm):
mat = get_rotmat_dcm(dcm)
axis, angle = extract_axis_angle_from_rotmat(mat)
return (axis, angle)
def any_to_dcm(orientation):
ori_repr = orientation['repr']
if ori_repr == 'quat':
quat = np.array(orientation['quat'])
dcm = quat_to_dcm(quat)
elif ori_repr == 'euler':
euler = np.array(orientation['euler'])
seq = orientation['seq']
world = orientation['world']
dcm = euler_to_dcm(euler, seq=seq, world=world)
elif ori_repr == 'axis_angle':
axis = np.array(orientation['axis'])
angle = orientation['angle']
dcm = axis_angle_to_dcm(axis, angle)
elif ori_repr == 'dcm':
dcm = dcm_to_quat(orientation['dcm'])
else:
raise ValueError(
'Unrecognized orientation repr {0}'.format(ori_repr))
return dcm
def rotate_vector_dcm(v, dcm):
rotmat = get_rotmat_dcm(dcm)
return np.dot(v, rotmat.T)
def get_rotmat_dcm(dcm):
return dcm.T
def shift_vector_dcm(v, dcm, forward=False):
shiftmat = get_shiftmat_dcm(dcm, forward=forward)
return np.dot(v, shiftmat.T)
def shift_tensor2_dcm(a, dcm, forward=False):
shiftmat = get_shiftmat_dcm(dcm, forward=forward)
return np.einsum('ip,jq,pq', shiftmat, shiftmat, a)
def shift_tensor3_dcm(a, dcm, forward=False):
shiftmat = get_shiftmat_dcm(dcm, forward=forward)
return np.einsum('ip,jq,kr,pqr', shiftmat, shiftmat, shiftmat, a)
def get_shiftmat_dcm(dcm, forward=False):
shiftmat = dcm
if not forward:
shiftmat = shiftmat.T
return shiftmat
#Euler angle-----------------------------------------------------------
def factorize_rotmat(rotmat, seq='XYZ', world=True):
return eulang.factor_rotmat(rotmat, seq=seq, world=world)
def euler_to_euler(euler, seq, world, to_seq, to_world):
rotmat = get_rotmat_euler(euler, seq=seq, world=world)
return factorize_rotmat(rotmat, seq=to_seq, world=to_world)
def euler_to_quat(euler, seq='XYZ', world=True):
axis, angle = euler_to_axis_angle(euler, seq=seq, world=world)
return axis_angle_to_quat(axis, angle)
def euler_to_dcm(euler, seq='XYZ', world=True):
dcm = get_shiftmat_euler(euler, seq=seq, world=world, forward=True)
return dcm
def euler_to_axis_angle(euler, seq='XYZ', world=True):
rotmat = get_rotmat_euler(euler, seq=seq, world=world)
axis, angle = extract_axis_angle_from_rotmat(rotmat)
return (axis, angle)
def any_to_euler(orientation, to_seq, to_world):
ori_repr = orientation['repr']
if ori_repr == 'quat':
quat = np.array(orientation['quat'])
euler = quat_to_euler(quat, seq=to_seq, world=to_world)
elif ori_repr == 'euler':
euler = np.array(orientation['euler'])
seq = orientation['seq']
world = orientation['world']
euler = euler_to_euler(euler, seq, world, to_seq, to_world)
elif ori_repr == 'axis_angle':
axis = np.array(orientation['axis'])
angle = orientation['angle']
euler = axis_angle_to_euler(axis, angle, seq=to_seq, world=to_world)
elif ori_repr == 'dcm':
euler = dcm_to_euler(orientation['dcm'], seq=to_seq, world=to_world)
else:
raise ValueError(
'Unrecognized orientation repr {0}'.format(ori_repr))
return euler
def rotate_vector_euler(v, euler, seq='XYZ', world=True):
'''
Rotates vectors about axis by angle.
'''
rotmat = get_rotmat_euler(euler, seq=seq, world=world)
return np.dot(v, rotmat.T)
def get_rotmat_euler(euler, seq='XYZ', world=True):
return eulang.rotmat_euler(euler, seq=seq, world=world)
def shift_vector_euler(v, euler, seq='XYZ', world=True, forward=False):
shiftmat = get_shiftmat_euler(euler, seq=seq, world=world, forward=forward)
return np.dot(v, shiftmat.T)
def shift_tensor2_euler(a, euler, forward=False):
shiftmat = get_shiftmat_euler(euler, forward=forward)
return np.einsum('ip,jq,pq', shiftmat, shiftmat, a)
def shift_tensor3_euler(a, euler, forward=False):
shiftmat = get_shiftmat_euler(euler, forward=forward)
return np.einsum('ip,jq,kr,pqr', shiftmat, shiftmat, shiftmat, a)
def get_shiftmat_euler(euler, seq='XYZ', world=True, forward=False):
rotmat = get_rotmat_euler(euler, seq=seq, world=world)
if forward:
shiftmat = rotmat.T
else:
shiftmat = rotmat
return shiftmat
#Quaternion-----------------------------------------------------------
def get_rand_quat():
q = np.random.random((4,))
return normalize_quat(q)
def get_identity_quat():
return np.array([1.0, 0.0, 0.0, 0.0])
def get_rand_quat():
axis, angle = get_rand_axis_angle()
return axis_angle_to_quat(axis, angle)
def get_perturbed_quat(q):
raise NotImplementedError
def quat_to_axis_angle(q):
angle = 2*math.acos(q[0])
sin = math.sqrt(1.0-q[0]**2)
if angle > 0.0:
if angle < math.pi:
axis = q[1:4]/sin
else:
rotmat = get_rotmat_quat(q)
axis, angle = extract_axis_angle_from_rotmat(rotmat)
else:
axis = np.array([1.0, 0.0, 0.0])
return fix_axis_angle(axis, angle, normalize=True)
def quat_to_euler(q, seq='XYZ', world=True):
rotmat = get_rotmat_quat(q)
return factorize_rotmat(rotmat, seq=seq, world=world)
def quat_to_dcm(q):
return get_shiftmat_quat(q, forward=True)
def any_to_quat(orientation):
ori_repr = orientation['repr']
if ori_repr == 'quat':
quat = np.array(orientation['quat'])
elif ori_repr == 'euler':
euler = np.array(orientation['euler'])
seq = orientation['seq']
world = orientation['world']
quat = euler_to_quat(euler, seq=seq, world=world)
elif ori_repr == 'axis_angle':
axis = np.array(orientation['axis'])
angle = orientation['angle']
quat = axis_angle_to_quat(axis, angle)
elif ori_repr == 'dcm':
quat = dcm_to_quat(orientation['dcm'])
else:
raise ValueError(
'Unrecognized orientation repr {0}'.format(ori_repr))
return quat
def rotate_vector_quat(v, q):
rotmat = get_rotmat_quat(q)
return np.dot(v, rotmat.T)
def get_rotmat_quat(q):
rotmat = np.empty((3,3))
q0sq = q[0]**2
q1sq = q[1]**2
q2sq = q[2]**2
q3sq = q[3]**2
q0q1 = q[0]*q[1]
q0q2 = q[0]*q[2]
q0q3 = q[0]*q[3]
q1q2 = q[1]*q[2]
q1q3 = q[1]*q[3]
q2q3 = q[2]*q[3]
rotmat[0,0] = 2*(q0sq + q1sq) - 1.0
rotmat[0,1] = 2*(q1q2 - q0q3)
rotmat[0,2] = 2*(q1q3 + q0q2)
rotmat[1,0] = 2*(q1q2 + q0q3)
rotmat[1,1] = 2*(q0sq + q2sq) - 1.0
rotmat[1,2] = 2*(q2q3 - q0q1)
rotmat[2,0] = 2*(q1q3 - q0q2)
rotmat[2,1] = 2*(q2q3 + q0q1)
rotmat[2,2] = 2*(q0sq + q3sq) - 1.0
return rotmat
def shift_vector_quat(v, q, forward=False):
shiftmat = get_shiftmat_quat(q, forward=forward)
return np.dot(v, shiftmat.T)
def shift_tensor2_quat(a, quat, forward=False):
shiftmat = get_shiftmat_quat(quat, forward=forward)
return np.einsum('ip,jq,pq', shiftmat, shiftmat, a)
def shift_tensor3_quat(a, quat, forward=False):
shiftmat = get_shiftmat_quat(quat, forward=forward)
return np.einsum('ip,jq,kr,pqr', shiftmat, shiftmat, shiftmat, a)
def get_shiftmat_quat(q, forward=False):
if forward:
shiftmat = get_rotmat_quat(get_conjugated_quat(q))
else:
shiftmat = get_rotmat_quat(q)
return shiftmat
def conjugate_quat(q):
'''
Conjugates a quaternion in-place.
'''
q[1:4] = -q[1:4]
return q
def get_conjugated_quat(q):
'''
Conjugates a quaternion and returns a copy.
'''
p = np.copy(q)
p[1:4] = -p[1:4]
return p
def invert_quat(q):
'''
Inverts a quaternion in-place.
'''
return conjugate_quat(q)
def get_inverted_quat(q):
'''
Inverts a quaternion and returns it as a new instance.
'''
p = np.copy(q)
return conjugate_quat(p)
def normalize_quat(q):
'''
Normalizes a quaternion in-place.
'''
q /= np.linalg.norm(q)
return q
def get_normalized_quat(q):
'''
Normalizes a quaternion and returns it as a copy.
'''
p = np.copy(q)
return normalize_quat(p)
def quat_is_normalized(q):
norm = np.linalg.norm(q)
if math.isclose(norm, 1.0, rel_tol=1e-14):
return True
else:
return False
def get_quat_prod(p, q):
p0, p1, p2, p3 = tuple(p)
prod_mat = np.array([[p0, -p1, -p2, -p3],
[p1, p0, -p3, p2],
[p2, p3, p0, -p1],
[p3, -p2, p1, p0]])
pq = normalize_quat(np.dot(prod_mat, q))
return pq
def interpolate_quat(q1, q2, t):
theta = get_angle_between_quat(q1, q2)
q = (q1*math.sin((1.0-t)*theta)
+ q2*math.sin(t*theta))/math.sin(theta)
return normalize_quat(q)
def get_angle_between_quat(p, q):
'''
Returns the angle between two quaternions p and q.
'''
return math.acos(np.dot(p,q))
def quat_deriv_to_ang_vel(q, qdot):
mat = quat_deriv_to_ang_vel_mat(q)
return np.dot(mat, qdot)
def quat_deriv_to_ang_vel_mat(q):
q0, q1, q2, q3 = tuple(q)
return 2*np.array([[-q1, q0, -q3, q2],
[-q2, q3, q0, -q1],
[-q3, -q2, q1, q0]])
def ang_vel_to_quat_deriv(q, ang_vel):
mat = ang_vel_to_quat_deriv_mat(q)
qdot = np.dot(mat, ang_vel)
return qdot
def ang_vel_to_quat_deriv_mat(q):
q0, q1, q2, q3 = tuple(q)
return 0.5*np.array([[-q1, -q2, -q3],
[ q0, q3, -q2],
[-q3, q0, q1],
[ q2, -q1, q0]])
#Other functions------------------------------------------------------
def translate(v, delta):
'''
Translates vectors inplace by delta.
'''
n = v.shape[0]
for i in range(n):
v[i,:] += delta
return v
def align(v, old, new):
'''
old and new represent coordinate axes. They must be unit vectors.
'''
assert old.shape[0] == new.shape[0]
n = old.shape[0]
if n == 1:
angle = math.acos(np.dot(old, new))
axis = la.unitized(np.cross(old, new))
return rotate_vector_axis_angle(v, axis, angle)
elif n == 2:
z_old = la.unitized(np.cross(old[0,:], old[1,:]))
z_new = la.unitized(np.cross(new[0,:], new[1,:]))
axes_old = np.vstack((old, z_old))
axes_new = np.vstack((new, z_new))
dcm = dcm_from_axes(axes_old, axes_new)
return rotate_vector_dcm(v, dcm)
elif n == 3:
dcm = dcm_from_axes(old, new)
return rotate_vector_dcm(v, dcm)
def mat_is_dcm(mat):
return mat_is_rotmat(mat)
def mat_is_rotmat(mat):
det_is_one = math.isclose(np.linalg.det(mat), 1.0, abs_tol=1e-12, rel_tol=1e-12)
is_orthogonal = np.allclose(np.dot(mat, mat.T), np.identity(3))
return is_orthogonal and det_is_one
| 3.5 | 4 |
python/archive/hashem.py | kfsone/tinker | 0 | 12792808 | #! /usr/bin/python3
import argparse
import hashlib
import mmap
import os
import posixpath
import stat
import sys
from collections import namedtuple
exclusions = ('lost+found', 'restore', 'backup', 'Newsfeed', '.pki', 'Jenkins')
joinpath = posixpath.join
FileDesc = namedtuple('FileDesc', ['full_path', 'item_type', 'size', 'mtime', 'checksum'])
def get_file_desc(full_path, want_checksum):
try:
stinf = os.stat(full_path)
except:
return None
if stat.S_ISREG(stinf.st_mode):
item_type, item_size = 'F', stinf.st_size
elif stat.S_ISDIR(stinf.st_mode):
item_type, item_size = 'D', 0
else:
return None
checksum = 0
if item_type == 'F' and item_size > 0 and want_checksum(full_path):
with open(full_path, 'rb') as fl:
mm = mmap.mmap(fl.fileno(), 0, access=mmap.ACCESS_READ)
checksum = hashlib.md5(mm).hexdigest().lower()
mm.close()
return FileDesc(full_path, item_type, item_size, stinf.st_mtime, checksum)
def get_file_stats(base_dir, exclusions, get_checksums=None):
if get_checksums is None:
want_checksum = lambda filename: False
elif not get_checksums:
want_checksum = lambda filename: True
else:
want_checksum = lambda filename: filename in get_checksums
for base, dirs, files in os.walk(base_dir):
if base == base_dir:
# Skip top-level cruft
dirs[:] = [d for d in dirs if d not in exclusions]
for filename in files:
fd = get_file_desc(posixpath.join(base, filename), want_checksum)
if fd:
yield fd
def ls_cmd(args):
if args.checksum:
get_checksums = []
else:
get_checksums = args.filepath or None
for filedesc in get_file_stats(args.base_dir, exclusions, get_checksums):
print("{},{},{},{},{}".format(filedesc.item_type, filedesc.size, filedesc.mtime, filedesc.checksum, filedesc.full_path))
class CompareError(Exception):
def __init__(self, action, reason):
super().__init__("Compare failed")
self.action = action
self.reason = reason
def cmp_cmd(args):
do_checksum, dont_checksum = lambda fn: True, lambda fn: False
with open(args.csv, "r") as fl:
for line in (l.strip() for l in fl):
if line:
remote_fd = FileDesc(line.split(',', 4))
local_path = os.path.normpath(os.path.join(args.base_dir, remote_fd.full_path))
want_checksum = do_checksum if local_fd.checksum else dont_checksum
local_fd = get_file_desc(local_path, want_checksum)
try:
if not local_fd:
raise CompareError("download", "missing")
if local_fd.item_type != remote_fd.item_type:
if remote_fd.item_type == 'D':
raise CompareError("mkdir", "changed")
elif remote_fd.item_type == 'F':
raise CompareError("download", "changed")
if remote_fd.size != local_fd.size:
raise CompareError("download", "size")
if remote_fd.checksum != local_fd.checksum:
raise CompareError("download", "checksum")
if remote_fd.mtime != local_fd.mtime:
os.utime(local_fd, (remote_fd.mtime, remote_fd.mtime))
raise CompareError("#touched", "mtime")
except CompareError as e:
print("%s,%s,%s,%s" % (e.action, e.reason, remote_fd.mtime, remote_fd.full_path))
if __name__ == "__main__":
argp = argparse.ArgumentParser("hasher")
argp.add_argument("--base-dir", "-C", dest="base_dir", default="/svn", help="Base folder")
subp = argp.add_subparsers()
lscmd = subp.add_parser("ls")
lscmd.add_argument("--checksum", action="store_true", help="Force checksumming")
lscmd.add_argument("filepath", action="append", default=[], nargs='*', help="File paths to check")
lscmd.set_defaults(func=ls_cmd)
cmpcmd = subp.add_parser("cmp")
lscmd.add_argument("csv", type=str, help="CSV file to check against")
args = argp.parse_args(sys.argv[1:])
if not hasattr(args, 'func'):
raise RuntimeError("No sub-command specified. See --help for assistance.")
args.func(args)
| 2.34375 | 2 |
Interview Preparation Kits/Interview Preparation Kit/Graphs/DFS: Connected Cell in a Grid/connected_cells.py | xuedong/hacker-rank | 1 | 12792809 | <filename>Interview Preparation Kits/Interview Preparation Kit/Graphs/DFS: Connected Cell in a Grid/connected_cells.py<gh_stars>1-10
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the maxRegion function below.
def in_graph(grid, i, j):
n = len(grid)
m = len(grid[0])
return i >= 0 and j >= 0 and i < n and j < m
def dfs(grid, visited, i, j):
visited.add((i, j))
ans = 1
neighbors = [(i-1, j-1), (i, j-1), (i-1, j), (i+1, j-1), (i-1, j+1), (i, j+1), (i+1, j), (i+1, j+1)]
for (x, y) in neighbors:
if in_graph(grid, x, y) and (x, y) not in visited and grid[x][y] == 1:
ans += dfs(grid, visited, x, y)
return ans
def max_region(grid):
visited = set()
n = len(grid)
m = len(grid[0])
max_value = 0
for i in range(n):
for j in range(m):
if grid[i][j] == 1 and (i, j) not in visited:
ans = dfs(grid, visited, i, j)
max_value = max(max_value, ans)
return max_value
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
n = int(input())
m = int(input())
grid = []
for _ in range(n):
grid.append(list(map(int, input().rstrip().split())))
res = max_region(grid)
fptr.write(str(res) + '\n')
fptr.close()
| 3.71875 | 4 |
tests/test_indexing.py | cloudbutton/lithops-array | 0 | 12792810 | import sklearn.datasets as datasets
from numpywren.matrix import BigMatrix
from numpywren import matrix_utils, binops
from numpywren.matrix_init import shard_matrix
import pytest
import numpy as np
import pywren
import unittest
class IndexingTestClass(unittest.TestCase):
def test_single_shard_index_get(self):
X = np.random.randn(128, 128)
X_sharded = BigMatrix("test_0", shape=X.shape, shard_sizes=X.shape)
shard_matrix(X_sharded, X)
X_sharded_local = X_sharded.submatrix(0, 0).get_block()
assert(np.all(X_sharded_local == X))
def test_single_shard_index_put(self):
X = np.random.randn(128, 128)
X_sharded = BigMatrix("test_1", shape=X.shape, shard_sizes=X.shape)
X_sharded.submatrix(0, 0).put_block(X)
assert(np.all(X_sharded.numpy() == X))
def test_multiple_shard_index_get(self):
X = np.random.randn(128, 128)
shard_sizes = [64, 64]
X_sharded = BigMatrix("test_2", shape=X.shape, shard_sizes=shard_sizes)
shard_matrix(X_sharded, X)
assert(np.all(X[0:64, 0:64] == X_sharded.submatrix(0).get_block(0)))
assert(np.all(X[64:128, 64:128] ==
X_sharded.submatrix(1, 1).get_block()))
assert(np.all(X[0:64, 64:128] ==
X_sharded.submatrix(0, 1).get_block()))
assert(np.all(X[64:128, 0:64] ==
X_sharded.submatrix(None, 0).get_block(1)))
def test_simple_slices(self):
X = np.random.randn(128, 128)
shard_sizes = [32, 32]
X_sharded = BigMatrix("test_3", shape=X.shape, shard_sizes=shard_sizes)
shard_matrix(X_sharded, X)
assert(np.all(X[0:64] == X_sharded.submatrix([2]).numpy()))
assert(np.all(X[64:128] == X_sharded.submatrix([2, None]).numpy()))
assert(np.all(X[:, 0:96] == X_sharded.submatrix(None, [0, 3]).numpy()))
assert(np.all(X[:, 96:128] == X_sharded.submatrix(
None, [3, None]).numpy()))
def test_step_slices(self):
X = np.random.randn(128, 128)
shard_sizes = [16, 16]
X_sharded = BigMatrix("test_4", shape=X.shape, shard_sizes=shard_sizes)
shard_matrix(X_sharded, X)
assert(np.all(X[::32] == X_sharded.submatrix(
[None, None, 2]).numpy()[::16]))
assert(np.all(X[16::32] == X_sharded.submatrix(
[1, None, 2]).numpy()[::16]))
assert(np.all(X[:, 0:96:64] == X_sharded.submatrix(
None, [0, 6, 4]).numpy()[:, ::16]))
assert(np.all(X[:, 96:128:64] == X_sharded.submatrix(
None, [6, 8, 4]).numpy()[:, ::16]))
def test_complex_slices(self):
X = np.random.randn(21, 67, 53)
shard_sizes = [21, 16, 11]
X_sharded = BigMatrix("test_5", shape=X.shape, shard_sizes=shard_sizes)
shard_matrix(X_sharded, X)
assert(np.all(X[:, :16, :11] == X_sharded.submatrix(0, 0, 0).numpy()))
assert(np.all(X[:, 64:67, 44:53] ==
X_sharded.submatrix(0, 4, 4).numpy()))
| 2.578125 | 3 |
tests/test_kooptimise.py | anilkumarpanda/kooptimize | 0 | 12792811 | <gh_stars>0
import pytest
from kooptimize.korules import get_rules_for_individual
def test_get_rule_for_individual():
ind = [1,0,1,0]
rule_dict = {0:"age <= 70",1:"ltv<=90",
3:"no_of_primary_accts <= 50",4:"credit_history_lenght >= 3"}
result_dict = get_rules_for_individual(ind)
expected_dict = {0:"age <= 70",3:"no_of_primary_accts <= 50"}
assert result_dict == expected_dict | 2.21875 | 2 |
src/aiofiles/__init__.py | q0w/aiofiles | 1,947 | 12792812 | <gh_stars>1000+
"""Utilities for asyncio-friendly file handling."""
from .threadpool import open
from . import tempfile
__all__ = ["open", "tempfile"]
| 1.609375 | 2 |
data_structures/recursion/key_combinatons.py | severian5it/udacity_dsa | 0 | 12792813 | <filename>data_structures/recursion/key_combinatons.py
def get_characters(num):
if num == 2:
return "abc"
elif num == 3:
return "def"
elif num == 4:
return "ghi"
elif num == 5:
return "jkl"
elif num == 6:
return "mno"
elif num == 7:
return "pqrs"
elif num == 8:
return "tuv"
elif num == 9:
return "wxyz"
else:
return ""
# my solution, this was not correct, and it took me too long, better consider better all the case.
def perm_key(output, input):
if not input:
return output, input
elif not output:
key, rest = input[:1], input[1:]
char = get_characters(int(key))
new_output = []
for c in char:
new_string = c
new_output.append(new_string)
final_output, final_input = perm_key(new_output, rest)
return final_output, final_input
else:
key, rest = input[:1], input[1:]
char = get_characters(int(key))
new_output = []
for str_member in output:
for c in char:
new_string = str_member + c
new_output.append(new_string)
perm_key(new_output, rest)
return new_output, rest
print('return --->', perm_key([], '23'))
def keypad(num):
# TODO: Write your keypad solution here!
out, _ = perm_key([], str(num))
return out
# Recursive Solution Udacity one, way easier and it's great how handle digits
def keypad(num):
# Base case
if num <= 1:
return [""]
# If `num` is single digit, get the LIST having one element - the associated string
elif 1 < num <= 9:
return list(get_characters(num))
# Otherwise `num` >= 10. Find the unit's (last) digits of `num`
last_digit = num % 10
'''Step 1'''
# Recursive call to the same function with “floor” of the `num//10`
small_output = keypad(num // 10) # returns a LIST of strings
'''Step 2'''
# Get the associated string for the `last_digit`
keypad_string = get_characters(last_digit) # returns a string
'''Permute the characters of result obtained from Step 1 and Step 2'''
output = list()
'''
The Idea:
Each character of keypad_string must be appended to the
end of each string available in the small_output
'''
for character in keypad_string:
for item in small_output:
new_item = item + character
output.append(new_item)
return output
def test_keypad(input, expected_output):
print(keypad(input))
if sorted(keypad(input)) == expected_output:
print("Yay. We got it right.")
else:
print("Oops! That was incorrect.")
# Example case
input = 23
expected_output = sorted(["ad", "ae", "af", "bd", "be", "bf", "cd", "ce", "cf"])
test_keypad(input, expected_output)
# Example case
input = 32
expected_output = sorted(["da", "db", "dc", "ea", "eb", "ec", "fa", "fb", "fc"])
test_keypad(input, expected_output)
# Example case
input = 8
expected_output = sorted(["t", "u", "v"])
test_keypad(input, expected_output)
input = 354
expected_output = sorted(["djg", "ejg", "fjg", "dkg", "ekg", "fkg", "dlg", "elg", "flg", "djh", "ejh", "fjh", "dkh", "ekh", "fkh", "dlh", "elh", "flh", "dji", "eji", "fji", "dki", "eki", "fki", "dli", "eli", "fli"])
test_keypad(input, expected_output)
# Base case: list with empty string
input = 0
expected_output = [""]
test_keypad(input, expected_output) | 3.609375 | 4 |
Part_1/ch03_func/3_2_return.py | hyperpc/AutoStuffWithPython | 0 | 12792814 | <filename>Part_1/ch03_func/3_2_return.py
import random
def getAnswer(answerNumber):
result = ''
if answerNumber == 1:
result = 'It is certain'
elif answerNumber == 2:
result = 'It is decidedly so'
elif answerNumber == 3:
result = 'Yes'
elif answerNumber == 4:
result = 'Reply hazy try again'
elif answerNumber == 5:
result = 'Ask again later'
elif answerNumber == 6:
result = 'Concentrate ans ask again'
elif answerNumber == 7:
result = 'My reply is no'
elif answerNumber == 8:
result = 'Outlook not so good'
elif answerNumber == 9:
result = 'Very doubtful'
return result
r = random.randint(1,9)
fortune = getAnswer(r)
print(fortune)
print(getAnswer(random.randint(1,9))) | 3.859375 | 4 |
core/migrations/0024_auto_20200410_2228.py | scottstanie/fashbowl | 1 | 12792815 | # Generated by Django 2.2.10 on 2020-04-10 22:28
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0023_game_max_words'),
]
operations = [
migrations.RenameField(
model_name='game',
old_name='max_words',
new_name='words_per_player',
),
]
| 1.859375 | 2 |
catena/cmd/manage.py | HewlettPackard/catena | 4 | 12792816 | # (C) Copyright 2017 Hewlett Packard Enterprise Development LP.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import subprocess
import sys
import tempfile
from oslo_config import cfg
from oslo_db.sqlalchemy import enginefacade
from oslo_log import log
from catena.common import config
from catena.common.utils import decrypt_private_rsakey
from catena.common.utils import decrypt_rsakey
from catena.db.sqlalchemy import api as db_api
from catena.db.sqlalchemy import models
CONF = cfg.CONF
LOG = log.getLogger(__name__)
def register_models():
context = enginefacade.writer.get_engine()
return models.register_models(context)
def unregister_models():
context = enginefacade.writer.get_engine()
return models.unregister_models(context)
def output_ssh_key():
context = db_api.get_context()
chain = db_api.get_chain(context, CONF.sub.chain_id)
if chain is None:
return LOG.error('This chain-id does not exist')
node = db_api.get_node(context, chain, CONF.sub.node_id)
if node is None:
return LOG.error('This node-id does not exist')
print(decrypt_rsakey(node.ssh_key))
def open_ssh_connection():
context = db_api.get_context()
chain = db_api.get_chain(context, CONF.sub.chain_id)
if chain is None:
return LOG.error('This chain-id does not exist')
node = db_api.get_node(context, chain, CONF.sub.node_id)
if node is None:
return LOG.error('This node-id does not exist')
home = os.path.expanduser("~/.ssh")
jumpbox_ip = chain.get_cloud_config()['jumpbox_ip']
with tempfile.NamedTemporaryFile(
dir=home) as temp_node_ssh, tempfile.NamedTemporaryFile(
dir=home) as temp_jumpbox_ssh:
decrypt_private_rsakey(node.ssh_key, temp_node_ssh)
decrypt_private_rsakey(
chain.get_cloud_config()['jumpbox_key'],
temp_jumpbox_ssh
)
args = [
'/bin/bash', '-c',
'ssh -i {} -o ProxyCommand="ssh -q -i {} -W %h:%p ubuntu@{}" -o '
'StrictHostKeyChecking=no ubuntu@{}'.format(
temp_node_ssh.name,
temp_jumpbox_ssh.name,
jumpbox_ip,
node.ip)
]
process = subprocess.Popen(args)
process.wait()
def register_sub_opts(subparser):
parser = subparser.add_parser('db_sync')
parser.set_defaults(action_fn=register_models)
parser.set_defaults(action='db_sync')
parser = subparser.add_parser('db_remove')
parser.set_defaults(action_fn=unregister_models)
parser.set_defaults(action='db_remove')
parser = subparser.add_parser('ssh_key')
parser.add_argument('chain_id')
parser.add_argument('node_id')
parser.set_defaults(action_fn=output_ssh_key)
parser.set_defaults(action='ssh_key')
parser = subparser.add_parser('ssh')
parser.add_argument('chain_id')
parser.add_argument('node_id')
parser.set_defaults(action_fn=open_ssh_connection)
parser.set_defaults(action='ssh')
SUB_OPTS = [
cfg.SubCommandOpt(
'sub',
dest='sub',
title='Sub Options',
handler=register_sub_opts)
]
def main():
"""Parse options and call the appropriate class/method."""
CONF.register_cli_opts(SUB_OPTS)
config.parse_args(sys.argv[1:])
config.setup_logging()
try:
if CONF.sub.action.startswith('db'):
return CONF.sub.action_fn()
if CONF.sub.action.startswith('ssh'):
return CONF.sub.action_fn()
except Exception as e:
sys.exit("ERROR: {0}".format(e))
| 1.820313 | 2 |
BancoEAN.py | VivianaMontacost/ProyectoBancoEAN | 0 | 12792817 | """
Juandabu
"""
persona= ['<NAME>', '<NAME>' ]
usuario= ['superman' , 'batman' ]
contraseña= [<PASSWORD> , <PASSWORD> ]
numero_de_cuenta= [ 3115996681 , 32221822 ]
tipo_de_cuenta= [ 'corriente', 'ahorros' ]
dinero= [ 1000.00 , 1500.00 ]
rta_pregunta_de_seguridad= ['perro' , 'murcielago' ]
estado_sesion= ['cerrada' , 'cerrada' ]
'''
print("Estado inicial en el banco")
for i in list(range(0,len(persona))):
print("\n")
print(" persona:",persona[i])
print(" usuario:",usuario[i])
print(" contraseña:",contraseña[i])
print(" numero_de_cuenta:",numero_de_cuenta[i])
print(" tipo_de_cuenta:",tipo_de_cuenta[i])
print(" dinero:",dinero[i])
print(" rta_pregunta_de_seguridad:",rta_pregunta_de_seguridad[i])
print(" estado_sesion:",estado_sesion[i])
print("\n")
'''
while(True):
sesion=False
while(sesion==False):
print("\n")
print("INICIO DE SESIÓN")
print("ingrese usuario:")
usuario_ingresado=input()
print("ingrese contraseña:")
contraseña_ingresada=input()
print(contraseña_ingresada)
if usuario_ingresado in usuario:
indice=usuario.index(usuario_ingresado)
if contraseña[indice] == int(contraseña_ingresada):
estado_sesion[indice] ='activa'
print("usuario logueado")
sesion=True
else:
print("contraseña invalida")
else:
print("usuario no existe")
sesion_terminada=False
while(sesion_terminada==False):
print("\n")
print("ELIJA ALGUNA TRANSACCION POR EL NUMERO:")
print("1: Consultar saldo")
print("2: Hacer retiro")
print("3: Transferir dinero")
print("4: Cerrar sesión")
print("5: Estado todas las cuentas del banco")
opcion=input()
opcion=int(opcion)
if opcion in [1,2,3,4,5]:
indice=usuario.index(usuario_ingresado)
if estado_sesion[indice] =='activa':
if opcion==1:
print("Saldo en cuenta",numero_de_cuenta[indice] ,":",dinero[indice] )
if opcion==2:
print("Digite el valor a retirar:")
valor_retiro=input()
valor_retiro=int(valor_retiro)
if valor_retiro > dinero[indice]:
print("Fondos insuficientes")
else:
dinero[indice]=dinero[indice]-valor_retiro
print("Valor retiro:",valor_retiro )
print("Saldo en cuenta:",dinero[indice] )
if opcion==3:
print("Digite el valor de la trasferencia:")
valor_trasferencia=input()
valor_trasferencia=int(valor_trasferencia)
print("Digite la cuenta de destino:")
numero_de_cuenta_final=input()
numero_de_cuenta_final=int(numero_de_cuenta_final)
if valor_trasferencia > dinero[indice]:
print("fondos insuficientes")
else:
if numero_de_cuenta_final in numero_de_cuenta:
indice_cta=numero_de_cuenta.index(numero_de_cuenta_final)
dinero[indice]=dinero[indice]-valor_trasferencia
dinero[indice_cta]=dinero[indice_cta]+valor_trasferencia
print("trasferencia exitosa al numero de cuenta:",numero_de_cuenta_final )
print("valor trasferencia:",valor_trasferencia)
print("saldo en cuenta",numero_de_cuenta[indice] ,":",dinero[indice] )
else:
print("la cuenta de destino no existe")
if opcion==4:
if estado_sesion[indice] =='activa':
estado_sesion[indice] ='cerrada'
print("sesion cerrada")
else:
print("sesion ya está cerrada")
sesion_terminada=True
if opcion==5:
print("\n")
print("Estado cuentas banco")
for i in list(range(0,len(persona))):
print("\n")
print(" persona:",persona[i])
print(" usuario:",usuario[i])
print(" contraseña:",contraseña[i])
print(" numero_de_cuenta:",numero_de_cuenta[i])
print(" tipo_de_cuenta:",tipo_de_cuenta[i])
print(" dinero:",dinero[i])
print(" rta_pregunta_de_seguridad:",rta_pregunta_de_seguridad[i])
print(" estado_sesion:",estado_sesion[i])
print("\n")
else:
print("sesion no activa, tramite invalido")
else:
print("Opción invalida") | 4.09375 | 4 |
main.py | khanhcsc/tts-bot | 0 | 12792818 | import os
import sys
import discord
from bot import init_cfg, init_bot
from template import handle, Context
DEBUG = True
TOKEN = ""
def init():
if len(sys.argv) <= 1:
sys.exit("start template: python main.py <TOKEN>")
global TOKEN
TOKEN = sys.argv[1]
if __name__ == "__main__":
init()
# create config and line dir
if not os.path.exists("cfg"):
os.mkdir("cfg")
if not os.path.exists("line"):
os.mkdir("line")
if not os.path.exists("emoji"):
os.mkdir("emoji")
while True:
try:
cli = discord.Client()
tts_bot = init_bot()
online = {}
async def helper(msg: discord.Message):
if DEBUG and msg.channel.name != "test":
# in debug mode, only serve messages from test
return
if not DEBUG and msg.channel.name == "test":
# not in debug mode, skip messages from test
return
guild_id = str(msg.guild.id)
if guild_id not in online:
online[guild_id] = init_cfg(guild_id)
await handle(Context(tts_bot, cli, online[guild_id], msg))
@cli.event
async def on_message(msg: discord.Message):
await helper(msg)
@cli.event
async def on_message_edit(before: discord.Message, after: discord.Message):
await helper(after)
cli.run(TOKEN)
except Exception as e:
print(f"ERROR: {e}")
| 2.125 | 2 |
dragonflow/tests/unit/test_fc_app.py | qianyuqiao/dragonflow | 0 | 12792819 | <reponame>qianyuqiao/dragonflow
# Copyright (c) 2016 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from dragonflow.db.models import l2
from dragonflow.db.models import sfc
from dragonflow.tests.common import utils
from dragonflow.tests.unit import test_app_base
lswitch1 = l2.LogicalSwitch(
id='lswitch1',
topic='topic1',
version=10,
unique_key=22,
)
lport1 = l2.LogicalPort(
id='lport1',
topic='topic1',
version=10,
unique_key=22,
lswitch='lswitch1',
binding=test_app_base.local_binding,
)
lport2 = l2.LogicalPort(
id='lport2',
topic='topic1',
version=10,
unique_key=24,
lswitch='lswitch1',
binding=test_app_base.local_binding,
)
lport3 = l2.LogicalPort(
id='lport3',
topic='topic1',
version=10,
unique_key=29,
lswitch='lswitch1',
binding=test_app_base.local_binding,
)
fc1 = sfc.FlowClassifier(
id='fc1',
topic='topic1',
unique_key=22,
source_port='lport1',
)
fc2 = sfc.FlowClassifier(
id='fc2',
topic='topic1',
unique_key=12,
dest_port='lport2',
)
fc3 = sfc.FlowClassifier(
id='fc3',
topic='topic1',
unique_key=13,
source_port='lport3',
)
pc1 = sfc.PortChain(
id='pc1',
topic='topic1',
flow_classifiers=['fc1', 'fc2'],
)
pc1add = sfc.PortChain(
id='pc1',
topic='topic1',
flow_classifiers=['fc1', 'fc3', 'fc2'],
)
pc1remove = sfc.PortChain(
id='pc1',
topic='topic1',
flow_classifiers=['fc2'],
)
pc1replace = sfc.PortChain(
id='pc1',
topic='topic1',
flow_classifiers=['fc3', 'fc2'],
)
fc10 = sfc.FlowClassifier(
id='fc10',
topic='topic1',
unique_key=10,
source_port='lport1',
)
fc11 = sfc.FlowClassifier(
id='fc11',
topic='topic1',
unique_key=11,
source_port='lport2',
)
fc12 = sfc.FlowClassifier(
id='fc12',
topic='topic1',
unique_key=12,
dest_port='lport1',
)
fc13 = sfc.FlowClassifier(
id='fc13',
topic='topic1',
unique_key=13,
dest_port='lport2',
)
pc2 = sfc.PortChain(
id='pc2',
topic='topic1',
flow_classifiers=['fc10', 'fc11', 'fc12', 'fc14'],
)
l2_objs = (lswitch1, lport1, lport2, lport3)
class TestFcApp(test_app_base.DFAppTestBase):
apps_list = ['fc']
def setUp(self):
super(TestFcApp, self).setUp()
self.app = self.open_flow_app.dispatcher.apps['fc']
for attribute in ('_install_flow_classifier',
'_uninstall_flow_classifier',
'_install_classification_flows',
'_install_dispatch_flows',
'_uninstall_classification_flows',
'_uninstall_dispatch_flows'):
orig = getattr(self.app, attribute)
p = mock.patch.object(self.app, attribute, side_effect=orig)
self.addCleanup(p.stop)
p.start()
@utils.with_local_objects(fc1, fc2, fc3, pc1, *l2_objs)
def test_pc_created(self):
pc1.emit_created()
self.app._install_flow_classifier.assert_has_calls(
[
mock.call(pc1.flow_classifiers[0]),
mock.call(pc1.flow_classifiers[1]),
],
)
self.assertEqual(2, self.app._install_flow_classifier.call_count)
self.app._uninstall_flow_classifier.assert_not_called()
@utils.with_local_objects(fc1, fc2, fc3, pc1, *l2_objs)
def test_pc_deleted(self):
pc1.emit_deleted()
self.app._install_flow_classifier.assert_not_called()
self.app._uninstall_flow_classifier.assert_has_calls(
[
mock.call(pc1.flow_classifiers[0]),
mock.call(pc1.flow_classifiers[1]),
],
)
self.assertEqual(2, self.app._uninstall_flow_classifier.call_count)
@utils.with_local_objects(fc1, fc2, fc3, pc1, *l2_objs)
def test_pc_updated_add_fc(self):
pc1add.emit_updated(pc1)
self.app._install_flow_classifier.assert_called_once_with(
pc1add.flow_classifiers[1])
self.app._uninstall_flow_classifier.assert_not_called()
@utils.with_local_objects(fc1, fc2, fc3, pc1, *l2_objs)
def test_pc_updated_remove_fc(self):
pc1remove.emit_updated(pc1)
self.app._install_flow_classifier.assert_not_called()
self.app._uninstall_flow_classifier.assert_called_once_with(
pc1.flow_classifiers[0])
@utils.with_local_objects(fc1, fc2, fc3, pc1, *l2_objs)
def test_pc_updated_replace_fc(self):
pc1replace.emit_updated(pc1)
self.app._install_flow_classifier.assert_called_once_with(
pc1replace.flow_classifiers[0])
self.app._uninstall_flow_classifier.assert_called_once_with(
pc1.flow_classifiers[0])
@utils.with_local_objects(fc10, fc11, fc12, fc13, pc2, *l2_objs)
def test_install_flow_classifier(self):
pc2.emit_created()
# Installed only for dest-port and local source ports:
self.app._install_classification_flows.has_calls(
[
mock.call(pc2.flow_classifiers[0]),
mock.call(pc2.flow_classifiers[2]),
mock.call(pc2.flow_classifiers[3]),
],
)
self.assertEqual(3, self.app._install_classification_flows.call_count)
# Installed only for source-port and local dest ports:
self.app._install_dispatch_flows.assert_has_calls(
[
mock.call(pc2.flow_classifiers[0]),
mock.call(pc2.flow_classifiers[1]),
mock.call(pc2.flow_classifiers[2]),
],
)
self.assertEqual(3, self.app._install_dispatch_flows.call_count)
@utils.with_local_objects(fc10, fc11, fc12, fc13, pc2, *l2_objs)
def test_uninstall_flow_classifier(self):
pc2.emit_deleted()
# Installed only for dest-port and local source ports:
self.app._uninstall_classification_flows.has_calls(
[
mock.call(pc2.flow_classifiers[0]),
mock.call(pc2.flow_classifiers[2]),
mock.call(pc2.flow_classifiers[3]),
],
)
self.assertEqual(
3, self.app._uninstall_classification_flows.call_count)
# Installed only for source-port and local dest ports:
self.app._uninstall_dispatch_flows.assert_has_calls(
[
mock.call(pc2.flow_classifiers[0]),
mock.call(pc2.flow_classifiers[1]),
mock.call(pc2.flow_classifiers[2]),
],
)
self.assertEqual(3, self.app._uninstall_dispatch_flows.call_count)
@utils.with_local_objects(fc1, fc2, pc1, *l2_objs)
def test_src_local_port_added(self):
lport1.emit_bind_local()
self.app._install_classification_flows.assert_called_once_with(fc1)
self.app._install_dispatch_flows.assert_not_called()
@utils.with_local_objects(fc1, fc2, pc1, *l2_objs)
def test_src_local_port_removed(self):
lport1.emit_unbind_local()
self.app._uninstall_classification_flows.assert_called_once_with(fc1)
self.app._uninstall_dispatch_flows.assert_not_called()
@utils.with_local_objects(fc1, fc2, pc1, *l2_objs)
def test_dest_local_port_added(self):
lport2.emit_bind_local()
self.app._install_classification_flows.assert_not_called()
self.app._install_dispatch_flows.assert_called_once_with(fc2)
@utils.with_local_objects(fc1, fc2, pc1, *l2_objs)
def test_dest_local_port_removed(self):
lport2.emit_unbind_local()
self.app._uninstall_classification_flows.assert_not_called()
self.app._uninstall_dispatch_flows.assert_called_once_with(fc2)
| 1.828125 | 2 |
draw_lines_from_files.py | inconvergent/axidraw-xy | 29 | 12792820 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
from xy.device import Device
def main(args):
from modules.utils import get_paths_from_n_files as get
pattern = args.pattern
steps = args.steps
stride = args.stride
skip = args.skip
paths = get(pattern, skip, steps, stride, spatial_concat=True, spatial_concat_eps=1e-4)
with Device(scale=0.99, penup=0.4) as device:
device.do_paths(paths)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
'--pattern',
type=str,
required=True
)
parser.add_argument(
'--steps',
type=int,
default=100000
)
parser.add_argument(
'--stride',
type=int,
default=1
)
parser.add_argument(
'--skip',
type=int,
default=0
)
args = parser.parse_args()
main(args)
| 2.109375 | 2 |
Data_Structure/memoized.py | 8luebottle/DataStructure-N-Algorithm | 0 | 12792821 | from functiontools import wraps
from benchmark import benchmark
def memo(func):
cache = {}
@wraps(func)
def wrap(*args):
if args not in cache:
cache[args] = func(*args)
return cache[args]
return wrap
def fib(n):
if n < 2:
return 1
else:
return fib(n-1) + fib(n-2)
@memo
def fib2(n):
if n < 2:
return 1
else:
return fib2(n-1) + fib2(n-2)
def fib3(m,n):
if m[n] == 0:
m[n] = fib3(m, n-1) + fib3(m, n-2)
return m[n]
@benchmark
def test_fib(n):
print(fib(n))
@benchmark
def test_fib2(n):
print(fib2(n))
@benchmark
def test_fib3(n):
m = [0] * (n+1)
m[0], m[1] = 1, 1
print(fib3(m,n))
if __name__=="__main__":
n = 35
test_fib(n)
test_fib2(n)
test_fib3(n)
| 3.296875 | 3 |
gamedata.py | Positron11/prison-breakout-game | 0 | 12792822 | <reponame>Positron11/prison-breakout-game
# Inmates data
inmates = [
{"name": "billy", "strength": 55, "friendliness": 40, "influence": 20},
{"name": "bob", "strength": 70, "friendliness": 20, "influence": 50},
{"name": "joe", "strength": 23, "friendliness": 79, "influence": 30}
]
# Guards data
guards = [
{"name": "mcferrin", "friendliness": 13},
{"name": "douglas", "friendliness": 5},
{"name": "hamilton", "friendliness": 26}
]
# Prison room data
rooms = [
{"name": "cells"},
{"name": "cafeteria"},
{"name": "yard"},
{"name": "laundry"},
{"name": "library"},
{"name": "solitary"},
{"name": "office"},
]
# Prison schedule data
schedule = [
{"location": "cells", "duration": 1},
{"location": "cafeteria", "duration": 3},
{"location": "laundry", "duration": 2},
{"location": "yard", "duration": 5},
{"location": "cafeteria", "duration": 3},
{"location": "library", "duration": 6},
{"location": "cafeteria", "duration": 4},
{"location": "cells", "duration": 2},
] | 2.375 | 2 |
pyram/rot2/Do_serialize.py | Hoseung/pyRamAn | 1 | 12792823 | <gh_stars>1-10
import numpy as np
import galaxymodule # needed for result_sub_sample_**.pickle
import utils.match as mtc
import pickle
#from utils import hagn
import os
from rot2.analysis import *
from rot2 import serialize_results
#import tree.halomodule as hmo
#from rot2 import cell_chunk_module as ccm
import numpy.lib.recfunctions as recf
from utils import cosmology
from load.info import Info
def fill_main(mainarr, nnza_cell, tc):
# Set up a new array.
new_nouts = nnza_cell.nnza["nout"][:mainarr["nstep"].ptp()+1]
newarr = np.zeros(len(new_nouts), dtype=mainarr.dtype)
# It's easy to fill nouts and nsteps.
newarr["nout"]=new_nouts
newarr["nstep"]=nnza_cell.a2b(newarr["nout"], "nout", "nstep")
interp_fields = list(this_gal.main_arr.dtype.names)
for field in ["nout", "nstep"]:
interp_fields.remove(field)
lbt_org = tc.zred2gyr(nnza_cell.a2b(mainarr["nstep"],"nstep","zred"), z_now=0)
lbt_new = tc.zred2gyr(nnza_cell.a2b(newarr["nstep"],"nstep","zred"), z_now=0)
for field in ["id", "idx"]:
newarr[field][mtc.match_list_ind(newarr["nout"], mainarr["nout"])] = mainarr[field]
interp_fields.remove(field)
for field in interp_fields:
if mainarr[field].ndim == 2:
for i in range(3):
r_p = mainarr[field][:,i]
newarr[field][:,i] = np.interp(lbt_new, lbt_org, mainarr[field][:,i])
else:
r_p = mainarr[field]
newarr[field] = np.interp(lbt_new, lbt_org, r_p)
return newarr
# interpolate main galaxy results on finetree.
def interpol_fine(this_gal, nnza_cell, nnza_all, tc, do_smooth=True):
finetree=this_gal.maintree
mainarr = this_gal.main_arr
finearr = np.zeros(len(finetree),dtype=mainarr.dtype)
fields_interp = list(mainarr.dtype.names)
finearr["nstep"]=finetree["nstep"]
finearr["id"] = finetree["id"]
finearr["idx"] = finetree["idx"]
finearr["pos"] = finetree["pos"] # Pos and vel can be overwritten if a better measurement from galaxy proeprty exist.
finearr["vel"] = finetree["vel"] #
finearr["nout"]=nnza_all.a2b(finetree["nstep"],"nstep","nout")
for field in ["id", "idx", "pos", "vel", "nstep", "nout"]:
fields_interp.remove(field)
lbt = tc.zred2gyr(nnza_all.a2b(finetree["nstep"],"nstep","zred"), z_now=0)
lbt_cell = tc.zred2gyr(nnza_cell.a2b(mainarr["nstep"],"nstep","zred"), z_now=0)
for mar in mainarr:
finearr["pos"][finearr["nout"] == mar["nout"]] = mar["pos"]
finearr["vel"][finearr["nout"] == mar["nout"]] = mar["vel"]
for field in fields_interp:
# Begining of merger
if mainarr[field].ndim == 2:
for i in range(3):
if do_smooth:
r_p = smooth(mainarr[field][:,i],
window_len=5,
clip_tail_zeros=False)
finearr[field][:,i] = np.interp(lbt, lbt_cell, r_p)
else:
r_p = mainarr[field][:,i]
finearr[field][:,i] = np.interp(lbt, lbt_cell, mainarr[field][:,i])
else:
if do_smooth:
r_p = smooth(mainarr[field],
window_len=5,
clip_tail_zeros=False) # odd number results in +1 element in the smoothed array.
else:
r_p = mainarr[field]
finearr[field] = np.interp(lbt, lbt_cell, r_p)
return finearr
def serialize(allresults, all_final_idxs, nnza_all, nnza_cell,
istep_max = 50,
prg_dir="./",
out_base="./",
nstep_too_short_main = 100):
nouts = nnza_cell.nnza["nout"][:istep_max]
print("Considering nouts: ", nouts)
"""
For an unknown reason, some of galaxies are repeatedly analized, and found in the result_lambda pickle.
Those repeatition must be removed from the begining.
Until then, go through one more step to remove duplicates
"""
all_sample_idxs=pickle.load(open(prg_dir + "all_sample_idxs.pickle", "rb"))
serial_out_dir = out_base+"result_serial/"
if not os.path.isdir(serial_out_dir):
os.mkdir(serial_out_dir)
# Build serial results and dump.
# Chunks of results in each nout (lambda_results/{nout}/result_sub_sample_{nout})
Allallidxs=[]
for result_thisnout in allresults:
Allallidxs.append(np.array([agal.idx for agal in result_thisnout]))
Allallids=[]
for result_thisnout in allresults:
Allallids.append(np.array([agal.id for agal in result_thisnout]))
info = Info(nout=nouts[0])
tc = cosmology.Timeconvert(info, zred_now=0)
all_fid_ok=[]
all_fidx_ok=[]
for i, this_idx in enumerate(all_final_idxs):
fname = prg_dir + "{}_adp.pickle".format(this_idx)
if not os.path.isfile(fname):
# dump_prgs broken in the middle.
continue
#print(i, "IDX=",this_idx)
adp = pickle.load(open(fname, "rb"))
if min(adp[0][0]["nstep"][adp[0][0]["nstep"] > 0]) > nstep_too_short_main:
print("Too short main tree. SKIP")
continue
# Append age to maintree and mainresult.
lbt = tc.zred2gyr(nnza_all.a2b(adp[0][0]["nstep"],"nstep","zred"),z_now=0)
adp[0][0] = recf.append_fields(adp[0][0], "time", lbt)
max_step=len(allresults)
this_gal = serialize_results.Serial_result(adp)
cnt_merger=0
bad_main=False
for i, this_sats in enumerate(adp):
nout_results=[]
for sat in this_sats:
sat_results=[]
for ss in sat:
nout=nnza_all.step2out([ss["nstep"]]) # NOT nnza_cell.
#print(nout)
if nout in nouts:
#print(nout, ss["idx"])
istep_cell = np.where(nnza_cell.nnza["nout"] == nout)[0][0]
allresults_now=allresults[istep_cell]
allresults_now_idx=Allallidxs[istep_cell]
i_result = np.where(allresults_now_idx == ss["idx"])[0]
#print("len results", len(allresults_now), "i_result", i_result)
if len(i_result) > 0:
sat_results.append(allresults_now[i_result[0]])
sat_results[-1].nout=int(nout)
sat_results[-1].nstep=nnza_cell.nnza["nstep"][istep_cell]
#print(len(sat_results))
nout_results.append(sat_results)
# Merger properties
if i == 0:
this_gal.main_arr = serialize_results.galresult2rec(sat_results, is_main=True)
#print(len(this_gal.main_arr.nstep), this_gal.main_arr.nstep.ptp())
if len(this_gal.main_arr.nstep) <= this_gal.main_arr.nstep.ptp():
#bad_main=True
this_gal.main_arr = fill_main(this_gal.main_arr, nnza_cell, tc)
this_gal.finearr = interpol_fine(this_gal, nnza_cell, nnza_all, tc, do_smooth=True)
#print("BAD", bad_main)
elif len(sat_results) > 0 and sat_results[0].mstar > 0.0:
#print("merger2")
this_gal.add_merger(sat_results, sat)
cnt_merger+=1
#this_gal.mergers.append(serialize_results.get_merger_props(this_gal.main_arr,
# serialize_results.galresult2rec(sat_results)))
if bad_main:
print("Bad main. Break")
break
#print(i)
this_gal.data.append(nout_results)
pickle.dump(this_gal, open(serial_out_dir+"serial_result{}.pickle".format(this_idx), "wb"))
all_fidx_ok.append(this_idx)
#break
np.savetxt(serial_out_dir+"all_fidx_ok.txt", all_fidx_ok, fmt="%d")
return [pickle.load(open(serial_out_dir+"serial_result{}.pickle".format(this_idx), "rb")) for this_idx in all_fidx_ok]
| 1.914063 | 2 |
resticweb/interfaces/repository_list.py | XXL6/resticweb | 1 | 12792824 | import os
# from resticweb.dictionary.resticweb_constants import Repository as Rep
from resticweb.models.general import Repository, Snapshot, SnapshotObject, JobParameter, RepositoryType
from resticweb.tools.local_session import LocalSession
from resticweb.misc.credential_manager import credential_manager
# from .repository import ResticRepository
from .repository_formatted import ResticRepositoryFormatted
from resticweb.tools.repository_tools import sync_snapshots, sync_snapshot_objects, sync_single_snapshot
import json
import traceback
from datetime import datetime
from resticweb.dateutil import parser
import logging
logger = logging.getLogger('debugLogger')
# repository_add_to_db is used instead of the following method
# it's located under resticweb.tools.job_callbacks
def add_repository(info):
with LocalSession() as session:
repository = Repository(
name=info['name'],
description=info.get('description'),
repo_id=info.get('repo_id'),
address=info['address'],
parameters=info['parameters'],
data=info.get('data'),
credential_group_id=info.get('credential_group_id'),
repository_type_id=info['repository_type_id'],
concurrent_uses=info.get('concurrent_uses'),
timeout=info.get('timeout')
)
session.add(repository)
session.commit()
return repository.id
def update_repository(info, repo_id, sync_db=False, unsync_db=False):
with LocalSession() as session:
repository = session.query(Repository).filter_by(id=repo_id).first()
if repository.name != info['name']:
credential_manager.set_service_id(repository.credential_group_id, info['name'])
repository.name = info['name']
repository.description = info.get('description')
repository.address = info['address']
repository.cache_repo = info['cache_repo']
repository.concurrent_uses = info['concurrent_uses']
repository.timeout = info['timeout']
repository.parameters = json.dumps(info['parameters'])
session.commit()
from resticweb.tools.job_build import JobBuilder
if sync_db:
job_builder = JobBuilder(job_name=f"Sync repo {repository.name}", job_class='repository_sync', parameters=dict(repository=repository.id, sync_type='full'))
job_builder.run_job()
if unsync_db:
'''
for snapshot in repository.snapshots:
snapshot.snapshot_objects = []
session.commit()
'''
job_builder = JobBuilder(job_name=f'Clear db from repo {repository.name}', job_class='clear_snapshot_objects', parameters=dict(repo_id=repository.id))
job_builder.run_job()
return repo_id
def delete_repositories(ids):
credential_groups = []
with LocalSession() as session:
for id in ids:
repo_to_remove = session.query(Repository).filter_by(id=id).first()
# credential_manager.remove_credentials(repo_to_remove.credential_group_id)
credential_groups.append(repo_to_remove.credential_group_id)
job_parameters = session.query(JobParameter).filter_by(param_name='repository', param_value=id).all()
for parameter in job_parameters:
parameter.param_value = None
session.delete(repo_to_remove)
session.commit()
for id in credential_groups:
credential_manager.remove_credentials(id)
def get_repository_from_snap_id(snap_id):
with LocalSession() as session:
snapshot = session.query(Snapshot).filter_by(snap_id=snap_id).first()
repository = session.query(Repository).filter_by(id=snapshot.repository_id).first()
return repository
# gets basic info about the repository from the database. Also grabs the stats
# from the repository itself like the total size and number of files.
# if use_cache is set to False then the repo stats are grabbed from repo itself
# which might take a bit of time
def get_info(id, repository_interface=None, use_cache=False, repo_status=True):
info_dict = {}
misc_data = None
if repo_status:
if not repository_interface:
repository_interface = get_formatted_repository_interface_from_id(id)
repo_status = repository_interface.is_offline()
if not use_cache:
if not repo_status:
misc_data = repository_interface.get_stats()
with LocalSession() as session:
repository = session.query(Repository).filter_by(id=id).first()
repository_type = session.query(RepositoryType).filter_by(id=repository.repository_type_id).first()
if misc_data:
repository.data = json.dumps(misc_data)
session.commit()
else:
try:
misc_data = json.loads(repository.data)
except TypeError:
misc_data = dict(data=repository.data)
misc_data['status'] = repo_status
info_dict = dict(
id=repository.id,
name=repository.name,
description=repository.description,
repo_id=repository.repo_id,
address=repository.address,
repository_data=repository.data,
concurrent_uses=repository.concurrent_uses,
timeout=repository.timeout,
data=misc_data,
cache_repo=repository.cache_repo,
repository_type=repository_type.name
)
return info_dict
# returns a list of snapshots and places them into the database from the
# repository if use_cache is set to False. Returns list of snapshots from
# the database if use_cache is set to True
def get_snapshots(id, use_cache=False):
repository_interface = get_formatted_repository_interface_from_id(id)
snapshots = []
if not use_cache and repository_interface.is_online():
snapshots = repository_interface.get_snapshots()
return snapshots if snapshots else {}
else:
with LocalSession() as session:
snapshots = session.query(Snapshot).filter_by(repository_id=id).all()
return snapshots
def get_snapshot(repo_id, snapshot_id, use_cache=False):
repository_interface = get_formatted_repository_interface_from_id(repo_id)
if not use_cache and repository_interface.is_online():
snapshot = repository_interface.get_snapshots(snapshot_id)[0]
return snapshot if snapshot else {}
else:
with LocalSession() as session:
snapshot = session.query(Snapshot).filter_by(repository_id=repo_id, snap_short_id=snapshot_id).first()
return snapshot
def insert_snapshots(items, repo_id):
with LocalSession() as session:
for item in items:
item['snap_id'] = item.pop('id')
item['snap_short_id'] = item.pop('short_id')
item['snap_time'] = item.pop('time')
if item['snap_time']:
main_time = item['snap_time'][:-7]
extra = item['snap_time'][-6:]
main_time = main_time + extra
# item['snap_time'] = datetime.strptime(main_time, "%Y-%m-%dT%H:%M:%S.%f%z")
item['snap_time'] = parser.parse(main_time)
new_snapshot = Snapshot(
snap_id=item.get('snap_id'),
snap_short_id=item.get('snap_short_id'),
snap_time=item.get('snap_time'),
hostname=item.get('hostname'),
username=item.get('username'),
tree=item.get('tree'),
repository_id=repo_id,
paths=json.dumps(item.get('paths')),
tags=json.dumps(item.get('tags'))
)
session.add(new_snapshot)
session.commit()
def delete_snapshot(repo_id, snapshot_id):
with LocalSession() as session:
snapshot = session.query(Snapshot).filter_by(repository_id=repo_id, snap_short_id=snapshot_id).first()
session.delete(snapshot)
session.commit()
def get_snapshot_objects(snap_id, use_cache=False):
with LocalSession() as session:
snapshot = session.query(Snapshot).filter_by(snap_id=snap_id).first()
repository = session.query(Repository).filter_by(id=snapshot.repository_id).first()
repository_interface = get_formatted_repository_interface_from_id(snapshot.repository_id)
if not use_cache and repository_interface.is_online():
# if the repo is online, we can purge the snapshots from db as we will
# just re-add them fresh from the actual repo
object_list = repository_interface.get_snapshot_ls(snap_id)
# if repository.cache_repo:
# sync_snapshot_objects(repository.id, snap_id, repository_interface=repository_interface)
return object_list
else:
with LocalSession() as session:
snapshot_object_list = session.query(SnapshotObject).filter_by(snapshot_id=snap_id).all()
snapshot_dict_list = [snapshot_object.to_dict() for snapshot_object in snapshot_object_list]
return snapshot_dict_list
def delete_snapshot_objects(snap_id):
pass
def insert_snapshot_objects(items, snap_id):
with LocalSession() as session:
for item in items:
if item.get('mtime'):
try:
item['mtime'] = parser.parse(item['mtime'])
except ValueError:
item['mtime'] = None
item['modified_time'] = item.pop("mtime")
if item.get('atime'):
try:
item['atime'] = parser.parse(item['atime'])
except ValueError:
item['atime'] = None
item['accessed_time'] = item.pop("atime")
if item.get('ctime'):
try:
item['ctime'] = parser.parse(item['ctime'])
except ValueError:
item['ctime'] = None
item['created_time'] = item.pop("ctime")
new_item = SnapshotObject(
name=item.get('name'),
type=item.get('type'),
path=item.get('path'),
uid=item.get('uid'),
gid=item.get('gid'),
size=item.get('size'),
mode=item.get('mode'),
struct_type=item.get('struct_type'),
modified_time=item.get('modified_time'),
accessed_time=item.get('accessed_time'),
created_time=item.get('created_time'),
snapshot_id=snap_id
)
session.add(new_item)
session.commit()
def get_engine_repositories():
repository_list = []
with LocalSession() as session:
repositories = session.query(Repository).filter_by()
for repository in repositories:
repository_list.append((repository.id, repository.name))
return repository_list
def get_snapshot_info(id):
with LocalSession() as session:
snapshot = session.query(Snapshot).filter_by(snap_id=id).first()
if snapshot.paths:
try:
snapshot.paths = json.loads(snapshot.paths)
except ValueError:
pass
if snapshot.tags:
try:
snapshot.tags = json.loads(snapshot.tags)
except ValueError:
pass
return snapshot
def get_repository_status(id):
repository_interface = get_formatted_repository_interface_from_id(id)
status = repository_interface.is_online()
if status is None:
return "Couldn't get status"
else:
if status:
return "Online"
else:
return "Offline"
def get_repository_name(id):
with LocalSession() as session:
repository = session.query(Repository).filter_by(id=id).first()
if repository:
return repository.name
else:
return None
def get_repository_address(id):
with LocalSession() as session:
repository = session.query(Repository).filter_by(id=id).first()
if repository:
return repository.address
else:
return None
def get_repository_password(id):
with LocalSession() as session:
repository = session.query(Repository).filter_by(id=id).first()
if repository:
return credential_manager.get_credential(repository.credential_group_id, "repo_password")
else:
return None
def get_formatted_repository_interface_from_id(id):
try:
with LocalSession() as session:
repository = session.query(Repository).filter_by(id=id).first()
if repository:
credential_list = credential_manager.get_group_credentials(repository.credential_group_id)
if credential_list:
repo_password = credential_list.pop('repo_password')
respository_interface = ResticRepositoryFormatted(repository.address, repo_password, credential_list if len(credential_list) > 0 else None, id)
return respository_interface
except Exception as e:
logger.error(e)
logger.error("trace:" + traceback.format_exc())
return None
| 1.8125 | 2 |
expect.py | chendong2016/chendong2016.github.io | 0 | 12792825 | #!/usr/bin/python
import pexpect
import os
import sys
def git_expect(repodir, u, p):
os.chdir(repodir)
os.system('git pull')
os.system('git add .')
os.system('git commit -m update')
foo = pexpect.spawn('git push')
foo.expect('.*Username.*:')
foo.sendline(u)
foo.expect('.*ssword:*')
foo.sendline(p)
print foo.read()
def main(argv):
git_expect(argv[1], argv[2], argv[3])
if __name__ == '__main__':
main(sys.argv)
| 2.046875 | 2 |
latent_with_splitseqs/base/classifier_base.py | fgitmichael/SelfSupevisedSkillDiscovery | 0 | 12792826 | import abc
import torch
from code_slac.network.base import BaseNetwork
from latent_with_splitseqs.config.fun.get_obs_dims_used_df import get_obs_dims_used_df
class SplitSeqClassifierBase(BaseNetwork, metaclass=abc.ABCMeta):
def __init__(self,
obs_dim,
seq_len,
obs_dims_used=None,
obs_dims_used_except=None,
):
super(SplitSeqClassifierBase, self).__init__()
self.used_dims = get_obs_dims_used_df(
obs_dim=obs_dim,
obs_dims_used=obs_dims_used,
obs_dims_used_except=obs_dims_used_except,
)
self._seq_len = seq_len
@property
def seq_len(self):
return self._seq_len
def _check_inputs(self, obs_seq, skill):
batch_dim = 0
seq_dim = 1
data_dim = -1
if skill is not None:
assert skill.size(batch_dim) == obs_seq.size(batch_dim)
assert skill.size(data_dim) == self.skill_dim
assert len(skill.shape) == 2
assert len(obs_seq.shape) == 3
def forward(self,
obs_seq,
skill=None
):
batch_dim = 0
seq_dim = 1
data_dim = -1
self._check_inputs(
obs_seq=obs_seq,
skill=skill
)
obs_seq = obs_seq[..., self.used_dims]
if self.training:
return self.train_forwardpass(
obs_seq=obs_seq,
skill=skill,
)
else:
with torch.no_grad():
return self.eval_forwardpass(
obs_seq=obs_seq,
skill=skill,
)
@abc.abstractmethod
def train_forwardpass(
self,
obs_seq,
skill,
):
raise NotImplementedError
@abc.abstractmethod
def eval_forwardpass(
self,
obs_seq,
skill,
**kwargs,
):
raise NotImplementedError
| 2.078125 | 2 |
simplecommit/__init__.py | lightningchen34/simplecommit | 0 | 12792827 | <filename>simplecommit/__init__.py
from __future__ import absolute_import
from .commit import *
name = "simplecommit"
if __name__=="__main__":
run()
| 1.453125 | 1 |
benchtmpl/workflow/parameter/base.py | scailfin/benchmark-templates | 0 | 12792828 | # This file is part of the Reproducible Open Benchmarks for Data Analysis
# Platform (ROB).
#
# Copyright (C) 2019 NYU.
#
# ROB is free software; you can redistribute it and/or modify it under the
# terms of the MIT License; see LICENSE file for more details.
"""Base class for workflow template parameters. Each parameter has a set of
properties that are used to (i) identify the parameter, (ii) define a nested
parameter structure, and (iii) render UI forms to collect parameter values.
"""
from benchtmpl.error import InvalidParameterError
import benchtmpl.workflow.parameter.declaration as pd
"""Special value for as-property that indicates user input for target path of
uploaded files.
"""
AS_INPUT = '$input'
class ParameterBase(object):
"""Base class for template parameter and parameter argument values. The
base class maintains the unique parameter identifier and the information
about the data type.
"""
def __init__(self, identifier, data_type):
"""Initialize the unique identifier and data type. Raises value error
if the given data type identifier is not valid.
Parameters
----------
identifier: string
Unique parameter identifier
data_type: string
Identifier for parameter data type
Raises
------
benchtmpl.error.InvalidParameterError
"""
if not data_type in pd.DATA_TYPES:
raise InvalidParameterError('invalid data type \'{}\''.format(data_type))
self.identifier = identifier
self.data_type = data_type
def is_bool(self):
"""Test if data type for the parameter declaration is DT_BOOL.
Returns
-------
bool
"""
return self.data_type == pd.DT_BOOL
def is_file(self):
"""Test if data type for the parameter declaration is DT_FILE.
Returns
-------
bool
"""
return self.data_type == pd.DT_FILE
def is_float(self):
"""Test if data type for the parameter declaration is DT_DECIMAL.
Returns
-------
bool
"""
return self.data_type == pd.DT_DECIMAL
def is_int(self):
"""Test if data type for the parameter declaration is DT_INTEGER.
Returns
-------
bool
"""
return self.data_type == pd.DT_INTEGER
def is_list(self):
"""Test if data type for the parameter declaration is DT_LIST.
Returns
-------
bool
"""
return self.data_type == pd.DT_LIST
def is_record(self):
"""Test if data type for the parameter declaration is DT_RECORD.
Returns
-------
bool
"""
return self.data_type == pd.DT_RECORD
def is_string(self):
"""Test if data type for the parameter declaration is DT_STRING.
Returns
-------
bool
"""
return self.data_type == pd.DT_STRING
class TemplateParameter(ParameterBase):
"""The template parameter is a simple wrapper around a dictionary that
contains a parameter declaration. The wrapper provides easy access to the
different components of the parameter declaration.
"""
def __init__(self, obj, children=None):
"""Initialize the different attributes of a template parameter
declaration from a given dictionary.
Parameters
----------
obj: dict
Dictionary containing the template parameter declaration properties
children: list(benchtmpl.workflow.parameter.base.TemplateParameter), optional
Optional list of parameter children for parameter lists or records
"""
super(TemplateParameter, self).__init__(
identifier=obj[pd.LABEL_ID],
data_type = obj[pd.LABEL_DATATYPE]
)
self.obj = obj
self.name = obj[pd.LABEL_NAME]
self.description = obj[pd.LABEL_DESCRIPTION]
self.index = obj[pd.LABEL_INDEX]
self.default_value = obj[pd.LABEL_DEFAULT] if pd.LABEL_DEFAULT in obj else None
self.is_required = obj[pd.LABEL_REQUIRED]
self.values = obj[pd.LABEL_VALUES] if pd.LABEL_VALUES in obj else None
self.parent = obj[pd.LABEL_PARENT] if pd.LABEL_PARENT in obj else None
self.as_constant = obj[pd.LABEL_AS] if pd.LABEL_AS in obj else None
self.children = children
def add_child(self, para):
"""Short-cut to add an element to the list of children of the parameter.
Parameters
----------
para: benchtmpl.workflow.parameter.base.TemplateParameter
Template parameter instance for child parameter
"""
self.children.append(para)
self.children.sort(key=lambda p: (p.index, p.identifier))
def as_input(self):
"""Flag indicating whether the value for the as constant property is
the special value that indicates that the property value is provided
by the user.
"""
return self.as_constant == AS_INPUT
def get_constant(self):
"""Get the value of the as_constant property.
Returns
-------
string
"""
return self.as_constant
def has_children(self):
"""Test if a parameter has children. Only returns True if the list of
children is not None and not empty.
Returns
-------
bool
"""
if not self.children is None:
return len(self.children) > 0
return False
def has_constant(self):
"""True if the as_constant property is not None.
Returns
-------
bool
"""
return not self.as_constant is None
def prompt(self):
"""Get default input prompt for the parameter declaration. The prompt
contains an indication of the data type, the parameter name and the
default value (if defined).
Returns
-------
string
"""
val = str(self.name)
# Add text that indicates the parameter type
if self.is_bool():
val += ' (bool)'
elif self.is_file():
val += ' (file)'
elif self.is_float():
val += ' (decimal)'
elif self.is_int():
val += ' (integer)'
elif self.is_string():
val += ' (string)'
if not self.default_value is None:
if self.is_bool() or self.is_float() or self.is_int():
val += ' [default ' + str(self.default_value) + ']'
else:
val += ' [default \'' + str(self.default_value) + '\']'
return val + ': '
def to_dict(self):
"""Get the dictionary serialization for the parameter declaration.
Returns
-------
dict
"""
return self.obj
| 2.859375 | 3 |
main.py | maphouse/create-subtitles | 0 | 12792829 | <filename>main.py
#requires oscar.mp4 video file in same folder
#transcript text file "oscar4.txt"
#might have to be in a folder name called oscar
#change certain character variables
import imageio
imageio.plugins.ffmpeg.download()
from moviepy.video.io.ffmpeg_tools import ffmpeg_extract_subclip
import time
from time import strftime,localtime
from postprocess_and_fuse_subs import compileSubs
import pickle
import os
#adjust sleeping time as needed - ES
#adjust switches as needed
sleepingTime = 400
#___SWITCHES(defaults)___#
#ES: cut transcript into snippets based on the transcript's timestamps (must be set to True for other processes to run)
snipTranscript = True
#ES: cut video into snippets based on the transcript's timestamps (must be set to True for other processes to run)
snipVideos = True
#ES: upload video snippets
uploadVideos = True
#ES: if the video/caption upload process was terminated unexpectedly before and you want to continue where you left off (uploadVideos must still be set to True):
resumeUploads = False
#ES: upload snippet transcripts (.txt)
uploadTranscripts = True
#ES: download snippet subtitle files (.vtt)
downloadCaptions = True
#ES: delete uploaded video snippets from your Youtube account once subtitle processing is complete
deleteVideos = False
#ES: upload the full video and compiled transcript to your Youtube account once complete
uploadFull = False
#ES: combine vtt snippets that were downloaded from Youtube into a total subtitle file.
combineSubtitles = True
#ES: the following switches control how subtitles are altered when concatenating snippets (i.e. when combineSubtitles = True)
#ES A feature created by RG that has yet to be explored...
placeBasedTimestamping = False
#ES: resample subtitles to prevent cut-up phrases, lone-word subtitles, and improve the subtitle structure overall (can lead to short, choppy, fast subtitles that are hard to read)
resampleSubtitles = False
#ES: IF you enabled 'resampleSubtitles' (above), you have the option to make subtitle entries full sentences (not recommended, since some timestamp/subtitle units can end up being excessively large)
fullSentenceSubtitles = False
#ES: IF you enabled 'resampleSubtitles' (above), you have the option to remove subtitle entries which may be a single word (and put them in an adjacent subtitle (verify))
removeLoneWords = False
#____________#
#ES: USER INTERVIEW SECTION
def verify_y_n(a):
while True:
a = a.lower().strip()
if a == 'y' or a == 'n':
return a
else:
a = raw_input("Please answer 'y' or 'n': ")
continue
def verify_y_n_none(a):
while True:
a = a.lower().strip()
if a == 'y' or a == 'n' or a == '':
return a
else:
a = raw_input("Please answer 'y' or 'n', or leave the answer blank by hitting 'Enter': ")
continue
print "\n\n"
print "This application creates subtitles for a video for which you have an associated transcript. Make sure you have gone over README.md before proceeding."
time.sleep(1)
print "You may terminate the application at any point by pressing Ctrl+C (Cmd+C on Mac)."
time.sleep(1)
print "\n"
print "This tool:\n- snips your transcript (.txt) into text snippets based on its timestamps,\n- snips the associated video accordingly into video snippets,\n- uploads these video snippets to Youtube as private videos only visible to your account,\n- uploads the text snippets to Youtube as transcript files for these video snippets,\n- allows Youtube to sync the video and text snippets\n- downloads the text snippets as subtitle files (.vtt),\n- stitches these subtitle files together into a single subtitle file for your video.\n\nYou may switch these processes 'on' or 'off' depending on which steps you would like to run. If this is your first time running the tool, simply leave the following answers blank. For more advanced users or users who have already used this tool, please select which processes you would like to run: \n\n"
time.sleep(2)
answer = raw_input("\n1/7 Will you be cutting your video into video snippets (y) ")
answer = verify_y_n_none(answer)
if answer == 'y':
snipVideos = True
elif answer == 'n':
snipVideos = False
elif answer == '':
snipVideos = True
answer = raw_input("\n2/7 Will you be uploading video snippets to Youtube for syncing? (y) ")
answer = verify_y_n_none(answer)
if answer == 'y':
uploadVideos = True
snipVideos = True
elif answer == 'n':
uploadVideos = False
elif answer == '':
uploadVideos = True
answer = raw_input("\n3/7 Will you be resuming video uploads from a previously-initiated process? (n) ")
answer = verify_y_n_none(answer)
if answer == 'y':
resumeUploads = True
elif answer == 'n':
resumeUploads = False
elif answer == '':
resumeUploads = False
answer = raw_input("\n4/7 Will you be uploading text snippets for syncing with your video snippets? (y) ")
answer = verify_y_n_none(answer)
if answer == 'y':
uploadTranscripts = True
elif answer == 'n':
uploadTranscripts = False
elif answer == '':
uploadTranscripts = True
answer = raw_input("\n5/7 Will you be downloading the generated subtitle snippets from Youtube? (y) ")
answer = verify_y_n_none(answer)
if answer == 'y':
downloadCaptions = True
elif answer == 'n':
downloadCaptions = False
elif answer == '':
downloadCaptions = True
answer = raw_input("\n6/7 Would you like your uploaded video snippets to be deleted from Youtube once subtitles have been successfully generated? (n) ")
answer = verify_y_n_none(answer)
if answer == 'y':
deleteVideos = True
elif answer == 'n':
deleteVideos = False
elif answer == '':
deleteVideos = False
answer = raw_input("\n7/7 Will you be combining the downloaded subtitle snippets into a single subtitle file for your video? (y) ")
answer = verify_y_n_none(answer)
if answer == 'y':
combineSubtitles = True
elif answer == 'n':
combineSubtitles = False
elif answer == '':
combineSubtitles = True
if combineSubtitles == True:
answer = raw_input("\n7.1 Would you like to reorganize subtitles according to punctuation? (Experimental; can lead to short, choppy, fast subtitles that are hard to read) (n) ")
answer = verify_y_n_none(answer)
if answer == 'y':
resampleSubtitles = True
elif answer == 'n':
resampleSubtitles = False
elif answer == '':
resampleSubtitles = False
if resampleSubtitles == True:
answer = raw_input("\n7.1.1 Would you like to reorganize subtitles to prioritize keeping full sentences intact? (Experimental; this feature is not recommended since subtitle units tend to become excessively long) (n) ")
answer = verify_y_n_none(answer)
if answer == 'y':
fullSentenceSubtitles = True
elif answer == 'n':
fullSentenceSubtitles = False
elif answer == '':
fullSentenceSubtitles = False
answer = raw_input("\n7.1.2 Would you like to reorganize subtitles to remove lone words? (Experimental) (n) ")
answer = verify_y_n_none(answer)
if answer == 'y':
removeLoneWords = True
elif answer == 'n':
removeLoneWords = False
elif answer == '':
removeLoneWords = False
answer = raw_input("\n7.2 Would you like to reorganize subtitles according to the presence of place names? (Experimental) (n) ")
answer = verify_y_n_none(answer)
if answer == 'y':
placeBasedTimestamping = True
elif answer == 'n':
placeBasedTimestamping = False
elif answer == '':
placeBasedTimestamping = False
print "\n"
folderName = raw_input("Enter the name of the folder containing your transcript and/or video and/or subtitle files\n(this folder must be located inside the 'files' folder): ")
try:
verifyExistence = os.stat(folderName).st_size
except Exception as e:
print e
print "The folder named '" + folderName + "' does not exist in the current directory. Please see README.md for instructions."
print "exiting application..."
time.sleep(2)
exit()
print "\n"
if snipVideos == True or uploadTranscripts == True or resumeUploads == True or downloadCaptions == True or deleteVideos == True:
combine_only = False
fileName = raw_input("Enter the file name of your transcript (excluding the \".txt\" extention): ")
try:
verifyExistence = os.stat(folderName + '/' + fileName + '.txt').st_size
except Exception as e:
print e
print "The file named '" + fileName + ".txt' does not exist in the folder '" + folderName + "'. Please see README.md for instructions."
print "exiting application..."
time.sleep(2)
exit()
print "\n"
originalVideo = raw_input("Enter the file name of your video (this time including the file's extention): ")
try:
verifyExistence = os.stat(folderName + '/' + originalVideo).st_size
except Exception as e:
print e
print "The file named '" + originalVideo + "' does not exist in the folder '" + folderName + "'. Please see README.md for instructions."
print "exiting application..."
time.sleep(2)
exit()
print "\n"
videoSize = os.stat(folderName + '/' + originalVideo).st_size/1000000
answer = raw_input("If this is your first time running this tool on the files you have indicated, you will temporarily require " + str(videoSize) + " Mb available space on your hard drive to run this program. Continue? (y/n) ")
answer = verify_y_n(answer)
if answer == "n":
print "Please make sure you have the available space on your hard drive, and then restart the program."
print "exiting application..."
time.sleep(2)
exit()
print "\n"
elif combineSubtitles == True:
#in this case, the user has chosen to only combine subtitles. the switch combine_only allows some different functionality down the road
combine_only = True
fileName = raw_input("In order to accurately combine subtitle files, you will need to create a list of timestamps demarcating the length of each video to which your subtitle files are associated. These values will be used as offsets for accurately combining your subtitle files.\nEach timestamp should be written as follows [HH:MM:SS.00], followed by a newline.\n\nPlease enter the file name of your timestamp list (excluding the \".txt\" extention): ")
else:
print "You have not chosen any options for running this application. Exiting..."
exit()
while True:
language = raw_input("Enter the language code of your video and transcript or the intended language code of your subtitles (e.g. en, fr, es, etc.):\n(You can refer to the second column in http://www.loc.gov/standards/iso639-2/php/code_list.php for the appropriate two-letter 'ISO 639-1' language code.)\n")
if language != '':
verifyLanguage = raw_input("\nYou have entered '" + language + "' as the language code for your transcript and video files. Youtube will use this code for processing your files. Continue? (y/n) ")
if verifyLanguage.lower() == '' or 'y':
break
#if combineSubtitles == True:
print "\n\n"
print "\n6.3 If your transcript has speaker names (e.g. the interviewer or interviewee's names) that precede their discourse (e.g. \"Emmanuel: Hi, I'd like to ask you a few questions...\"), please input them. If this does not apply to your transcript, simply leave the following two answers blank by pressing the 'Enter' key."
time.sleep(1)
interviewer = raw_input("\n6.3.1 Please input your interviewer's name as it appears in the transcript: ")
interviewee = raw_input("\n6.3.2 Please input your interviewee's name as it appears in the transcript: ")
print "\n"
#____________#
# let rodolphe know if there is a problem with playlist id, might need to create a playlist in youtube online and copy url id to script
#playlistID = "PLSbFnWujSxCZxm7tYAGNeG9l5s19m4T65"
#language = 'fr'
#change these variables according to what story you want to process - ES
#interviewer = "C.V."
#interviewee = "V.S."
#where the video and txt files are stored
#folderName = 'venant'
#fileName refers to the name of the input .txt file (excluding .txt)
#fileName = 'venant'
#originalVideo refers to the name of the video file including its ext
#originalVideo = "venant.mp4"
#interviewer = "E.H."
#interviewee = "E.M."
#fileName = 'Frederic'
#originalVideo = "Frederic.mov"
#interviewer = "M.M."
#interviewee = "B.K."
#fileName = 'Berthe'
#originalVideo = "DD2FD4AE-FEE4-4DF3-9AF7-A4D6BF453B49.flv"
#interviewer = "S.G."
#interviewee = "O.G."
#folderName = 'oscar'
#fileName = 'oscar'
#originalVideo = "Oscar.mp4"
### START BOILERPLATE CODE
# Sample Python code for user authorization
import httplib2
import os
import sys
import httplib
import random
from apiclient.discovery import build
from apiclient.errors import HttpError
from apiclient.http import MediaFileUpload
from oauth2client.client import flow_from_clientsecrets
from oauth2client.file import Storage
from oauth2client.tools import argparser, run_flow
# The CLIENT_SECRETS_FILE variable specifies the name of a file that contains
# the OAuth 2.0 information for this application, including its client_id and
# client_secret.
"""
to create a client secret file:
google apis dashboard --> create a new project
on the resulting dashboard, "enable apis and get credntials like keys"
search for youtube api
click "YouTube Data API v3" and ENABLE it
click "create credentials"
create and "OAUT client id"
"""
#CLIENT_SECRETS_FILE = "client_secret.json"
#api key is <KEY>
#client id is in client_id.json
CLIENT_SECRETS_FILE = "client_id.json"
# This OAuth 2.0 access scope allows for full read/write access to the
# authenticated user's account and requires requests to use an SSL connection.
YOUTUBE_READ_WRITE_SSL_SCOPE = "https://www.googleapis.com/auth/youtube.force-ssl"
API_SERVICE_NAME = "youtube"
API_VERSION = "v3"
# This variable defines a message to display if the CLIENT_SECRETS_FILE is
# missing.
MISSING_CLIENT_SECRETS_MESSAGE = "WARNING: Please configure OAuth 2.0"
# Authorize the request and store authorization credentials.
def get_authenticated_service(args):
flow = flow_from_clientsecrets(CLIENT_SECRETS_FILE, scope=YOUTUBE_READ_WRITE_SSL_SCOPE,
message=MISSING_CLIENT_SECRETS_MESSAGE)
storage = Storage("youtube-api-snippets-oauth2.json")
credentials = storage.get()
if credentials is None or credentials.invalid:
credentials = run_flow(flow, storage, args)
# Trusted testers can download this discovery document from the developers page
# and it should be in the same directory with the code.
return build(API_SERVICE_NAME, API_VERSION,
http=credentials.authorize(httplib2.Http()))
# Explicitly tell the underlying HTTP transport library not to retry, since
# we are handling retry logic ourselves.
httplib2.RETRIES = 1
# Maximum number of times to retry before giving up.
MAX_RETRIES = 10
# Always retry when these exceptions are raised.
RETRIABLE_EXCEPTIONS = (httplib2.HttpLib2Error, IOError, httplib.NotConnected,
httplib.IncompleteRead, httplib.ImproperConnectionState,
httplib.CannotSendRequest, httplib.CannotSendHeader,
httplib.ResponseNotReady, httplib.BadStatusLine)
# Always retry when an apiclient.errors.HttpError with one of these status
# codes is raised.
RETRIABLE_STATUS_CODES = [500, 502, 503, 504]
# This method implements an exponential backoff strategy to resume a
# failed upload.
def resumable_upload(request, resource, method):
response = None
error = None
retry = 0
while response is None:
try:
print "Uploading file..."
status, response = request.next_chunk()
if response is not None:
if method == 'insert' and 'id' in response:
print "Video id '%s' was successfully uploaded." % response['id']
videoid = response['id']
elif method != 'insert' or 'id' not in response:
print response
else:
exit("The upload failed with an unexpected response: %s" % response)
except HttpError, e:
if e.resp.status in RETRIABLE_STATUS_CODES:
error = "A retriable HTTP error %d occurred:\n%s" % (e.resp.status,e.content)
else:
raise
except RETRIABLE_EXCEPTIONS, e:
error = "A retriable error occurred: %s" % e
if error is not None:
print error
retry += 1
if retry > MAX_RETRIES:
exit("No longer attempting to retry.")
max_sleep = 2 ** retry
sleep_seconds = random.random() * max_sleep
print "Sleeping %f seconds and then retrying..." % sleep_seconds
time.sleep(sleep_seconds)
return response['id']
if uploadTranscripts == True or resumeUploads == True or downloadCaptions == True or deleteVideos == True:
args = argparser.parse_args()
service = get_authenticated_service(args)
def print_results(results):
print(results)
# Build a resource based on a list of properties given as key-value pairs.
# Leave properties with empty values out of the inserted resource.
def build_resource(properties):
resource = {}
for p in properties:
# Given a key like "snippet.title", split into "snippet" and "title", where
# "snippet" will be an object and "title" will be a property in that object.
prop_array = p.split('.')
ref = resource
for pa in range(0, len(prop_array)):
is_array = False
key = prop_array[pa]
# Convert a name like "snippet.tags[]" to snippet.tags, but handle
# the value as an array.
if key[-2:] == '[]':
key = key[0:len(key)-2:]
is_array = True
if pa == (len(prop_array) - 1):
# Leave properties without values out of inserted resource.
if properties[p]:
if is_array:
ref[key] = properties[p].split(',')
else:
ref[key] = properties[p]
elif key not in ref:
# For example, the property is "snippet.title", but the resource does
# not yet have a "snippet" object. Create the snippet object here.
# Setting "ref = ref[key]" means that in the next time through the
# "for pa in range ..." loop, we will be setting a property in the
# resource's "snippet" object.
ref[key] = {}
ref = ref[key]
else:
# For example, the property is "snippet.description", and the resource
# already has a "snippet" object.
ref = ref[key]
return resource
# Remove keyword arguments that are not set
def remove_empty_kwargs(**kwargs):
good_kwargs = {}
if kwargs is not None:
for key, value in kwargs.iteritems():
if value:
good_kwargs[key] = value
return good_kwargs
### END BOILERPLATE CODE
# Sample python code for videos.insert
def videos_insert(properties, media_file, **kwargs):
resource = build_resource(properties) # See full sample for function
kwargs = remove_empty_kwargs(**kwargs) # See full sample for function
request = service.videos().insert(
body=resource,
media_body=MediaFileUpload(media_file, chunksize=-1,
resumable=True),
**kwargs
)
vid = resumable_upload(request, 'video', 'insert') # See full sample for function
return vid
def hms_to_s(time):
time = unicode(time, "UTF-8")
time = time.split(" --> ")
t_0 = time[0].split(":")
t_1 = time[1].split(":")
t0 = float(int(t_0[0])*3600) + int(float(t_0[1])*60) + int(float(t_0[2]))
t1 = float(int(t_1[0])*3600) + int(float(t_1[1])*60) + int(float(t_1[2]))
return [t0,t1]
def s_to_hms(seconds):
m, sec = divmod(seconds, 60)
h, m = divmod(m, 60)
#print str(int(h)) + ":" + str(int(m)) + ":" + str(int(s))
return str(int(h)) + ":" + str(int(m)) + ":" + str(int(sec))
#ES: open anita/Anita.txt as myfile
try:
with open(folderName + "/" + fileName + ".txt", 'r') as myfile:
text = myfile.read().replace('\n', '')
#print "ES: replace \\n with ''"
with open(folderName + "/" + fileName + ".txt") as f:
text = f.readlines()
except IOError as e:
print "No text file found because you are not running the entire pipeline. Creating dummy file 'delete me.txt' to finish pipeline."
foo = open(folderName + "/" + "delete me.txt","w+")
foo.close()
with open(folderName + "/" + "delete me.txt", 'r') as myfile:
text = myfile.read().replace('\n', '')
with open(folderName + "/" + "delete me.txt") as f:
text = f.readlines()
pass
#print "ES: text is the following" + str(text)
#ES: strip whitespace
text = [x.strip() for x in text]
#split times (?)
splits = []
#list of cut-up texts
texts = [""]
t0 = 0
c = 0
#ES: several print commands were added for guidance. they can be removed.
#ES: a list of the transcript's timestamps
t_list = []
#ES: PREPARE INPUT TEXT FOR PROCESSING
if snipTranscript == True:
for t in text:
#add a \n to the end of each line (why?)
t += "\n"
#ES: if the beginning of the line is not a digit and is not a next-line char
#ES: removing punctuation from '[00:00:01.09]' since it is never qualified as a digit (False) and therefore the following condition is almost always met.
if not t.replace('[','').replace(']','').replace(':','').replace('.','').replace('\n','').isdigit() and t != "\n":
#ES: add t to position c of texts
texts[c] += t#.encode('utf8')
#print t.replace('[','').replace(']','').replace(':','').replace('.','').replace('\n','').isdigit()
#ES: this will aggregate phrases (t) into one list item (a text) until a timestamp is reached
#ES: if t is a timestamp
#ES: removing punctuation from '[00:00:01.09]' since it is never qualified as a digit (False) and therefore the following condition is never met.
if t != "" and t.replace('[','').replace(']','').replace(':','').replace('.','').replace('\n','').isdigit() and "[" in t:
#increase pos on texts by 1
c += 1
#ES: printing deets
#print t.replace('[','').replace(']','').replace(':','').replace('.','').replace('\n','').isdigit()
#print "c: " + str(c)
with open(folderName + "/" + fileName + "_" + str(c) + ".txt", 'w') as thefile:
#thefile = open(folderName + "/" + fileName + "_" + str(c) + ".txt", 'w')
try:
#ES: write the previous position of c in texts (a chunk of text prior to timestamp) to thefile
thefile.write("%s\n" % texts[c-1])
#time.sleep(.1)
texts.append("")
texts[c] = ""
#t = t.replace(" ", "")
#t = t
t = t.replace('[','').replace(']','').replace('\n','')
t = unicode(t, "UTF-8")
#split the timestamps at : (into 3)
t = t.split(":")
if len(t) > 3 or len(t) < 3:
print "\nOne of your timestamps (",':'.join(t) ,") isn't formatted correctly. Consult README.md for guidelines on proper timestamp formatting."
print "\nexiting application..."
time.sleep(2)
exit()
if len(t) == 2:
if combine_only == True:
t1 = int(t[0])*60 + int(t[1])
splits.append([t0,t0+t1])
t_list.append(t1)
t0 = t0 + t1
else:
t1 = int(t[0])*60 + int(t[1])
splits.append([t0,t1])
t_list.append(t1)
t0 = t1
elif len(t) == 3:
#if we are only combining subtitle files, and we are using a .txt file with a list of video lengths, then we need to make this into a list of cumulative times so that the rest of the pipeline can run
if combine_only == True:
t1 = int(t[0])*3600 + int(t[1])*60 + float(t[2])
splits.append([t0,t0+t1])
#print int(t[0])*3600 + int(t[1])*60 + int(t[2])
t_list.append(t1)
t0 = t0 + t1
else:
t1 = int(t[0])*3600 + int(t[1])*60 + float(t[2])
splits.append([t0,t1])
#print int(t[0])*3600 + int(t[1])*60 + int(t[2])
t_list.append(t1)
t0 = t1
except ValueError as e:
print e
print "\n One of your timestamps isn't formatted correctly. Consult README.md for guidelines on proper timestamp formatting."
print "\nVerifying if timestamps are in ascending order..."
sp1 = 0
num = 0
#print str(splits)
#print str(t_list)
for sp in splits:
if num > 0:
if sp[1] <= sp1[1]:
print "\nThere is a problem with one of your timestamps:"
print "Timestamp number #",str(num+2)," (equivalent to ",str(sp[1])," seconds) should be a larger number than the timestamp that comes before it (",str(sp1[1])," seconds), but it is smaller."
print "Please make sure your timestamps are in ascending order and that there are no mistakes (see README.md) and restart the program."
exit()
sp1 = sp
num+=1
print "\nThe document named '" + fileName + ".txt' was cut into " + str(len(splits)) + " text snippets based on it containing " + str(len(splits)) + " timestamps formatted like such '[HH:MM:SS.00]'."
else:
print "Please set the variable 'snipTranscript' to True so that the code can properly run."
exit()
#ES print texts[c]
#print "splits: " + str(splits)
#for i in splits:
# print s_to_hms(i[0]),"->",s_to_hms(i[1])
#time.sleep(60)
#print splits,splits[len(splits)-1][1]
#splits.append([splits[len(splits)-1][1],7200])
#print splits
#print "Wait"
#time.sleep(30)
c = 0
#print splits
videoids = []
#videoids = [u'jDAZHgL-nG4', u'cMNTnd8pApk', u's5hLO6T_BhY', u'gOAoCh5Mecc', u'p0PX5s6k5DU', u'hSmPkLqOt0M', u'2Ik7_biRs9g', u'G64A_hpNWfI', u'ZzVVEcGekv0', u'ZxKJhN3JFfI', u'TsDnqWmpvrw', u'Kvem1XnPHF0', u'VwqhkmbiLh0', u'V1sv1MYLdC0']
#videoids = [u'cj62vgUfnik', u'5k9WCcWCLiU', u'MexTd0EGfRc', u'hWY_30yHOec', u'GrMtKARI9kQ', u'YDHnQAE7U0w', u'yc4IXkGHuXs', u'ZauR51lBjQo', u'kisoEOTjmVI', u'V9XdpjtUU4Q', u'eOdKfhePfTs', u'AAQ9YuybUxM', u'3BaTzSSL4_c', u'OriOoB5yF0s', u'91qOFKithgE', u'WQJQkGEwG-Q', u'n4eW0T6Oek0', u'2dRf-EbKYHA', u'RUgi4NfoPEw', u'n40bGD_9eZI', u'OWWAQTGKyMI', u'8a2De6Gzfek', u'VQJgxR3iAoA', u'UEzrAMq6fGc', u'PXCHMF-Z7X4', u'SU_Rbp9V_Zo', u'VLhSxDh9gI0', u'80rY1RlbVQw', u'1yumt5fRBF4', u'u5qAHXhhJoo', u'G3gO6DW-wrM', u'qAU_8DNEqP8', u'fbGaOVHXkvY', u'_Knl1rP8Z9w', u'O6f8ZWjSgiw', u'uXY-00DuLjY', u'WpreZ_gbEyw']
#with open(folderName + "/" + 'videoids.pkl', 'wb') as f:
# pickle.dump(videoids, f)
if resumeUploads == True:
print "\nResuming video uploads...\n"
time.sleep(1)
try:
with open(folderName + "/" + 'videoids.pkl', 'rb') as f:videoids = pickle.load(f)
except Exception as e:
print e
print "\nThe program is unable to resume uploads because there are no uploads to resume or your 'videoids.pkl' file has gone missing. The program will restart by uploading all videos. You may need to remove any previously-uploaded videos if the videos you are uploading are identical. If so, do this manually on youtube.com and then restart the program."
uploadVideos = True
wait = False
def yes_or_no(question):
while "the answer is invalid":
reply = str(raw_input(question+' (y/n): ')).lower().strip()
if reply[0] == 'y':
return True
if reply[0] == '':
return True
if reply[0] == 'n':
exit()
if uploadVideos == False and snipVideos == True:
#ES: the following is called when videos are being uploaded (uploadVideos = True) to warn the user as to how many videos will be uploaded.
question = "\nThere were " + str(len(splits)) + " timestamps detected in " + fileName + ". " + str(len(splits)) + " video snippets will created. Continue?"
print "\nIf all input was correct, the program will begin snipping"
yes_or_no(question)
print "\n1. Slicing into " + str(len(splits)) + " parts"
time.sleep(1)
for s in splits:
c += 1
if c > len(videoids):
ffmpeg_extract_subclip(folderName + "/" + originalVideo, s[0], s[1], targetname=folderName + "/" + fileName + "_" + str(c) +".mp4")
media_file = folderName + '/' + fileName + "_" + str(c) + ".mp4"
if not os.path.exists(media_file):
exit('Please specify a valid file location.')
print "\nSnipping completed. No further options were selected. Exiting..."
exit()
#ES: UPLOADS THE VIDEOS
if uploadVideos == True:
#ES: the following is called when videos are being uploaded (uploadVideos = True) to warn the user as to how many videos will be uploaded.
question = "\nThere were " + str(len(splits)) + " timestamps detected in " + fileName + ". " + str(len(splits)) + " video snippets will therefore be uploaded to YouTube for processing. YouTube allows a maximum of 100 video uploads per 24h using the current API credentials. Continue?"
print "\nIf all input was correct, the program will begin snipping and uploading content to Youtube for processing. This may take between 20 minutes and several hours, depending on the size of your video file (" + str(videoSize) + " Mb)."
yes_or_no(question)
print "\n1. Slicing into " + str(len(splits)) + " parts & uploading videos..."
time.sleep(1)
if len(videoids) > 0:
print "(However, it looks like ",len(videoids)," video snippets were already uploaded to Youtube. Now trying to resume uploading the remaining snippets...)"
time.sleep(1)
for s in splits:
c += 1
if c > len(videoids):
ffmpeg_extract_subclip(folderName + "/" + originalVideo, s[0], s[1], targetname=folderName + "/" + fileName + "_" + str(c) +".mp4")
media_file = folderName + '/' + fileName + "_" + str(c) + ".mp4"
if not os.path.exists(media_file):
exit('Please specify a valid file location.')
vid = videos_insert(
{'snippet.categoryId': '22',
'snippet.defaultLanguage': language,
'snippet.defaultAudioLanguage': language,
'snippet.description': 'Description of uploaded video.',
'snippet.tags[]': '',
'snippet.title': media_file,
'status.embeddable': '',
'status.license': '',
'status.privacyStatus': 'unlisted',
'status.publicStatsViewable': ''},
media_file,
part='snippet,status')
videoids.append(vid)
print videoids
#c += 1
wait = True
with open(folderName + "/" + 'videoids.pkl', 'wb') as f:
pickle.dump(videoids, f)
else:
if resumeUploads == True or deleteVideos == True or uploadTranscripts == True:
with open(folderName + "/" + 'videoids.pkl', 'rb') as f:
videoids = pickle.load(f)
print "\nThe video IDs are composed of the following: " + str(videoids)
#print videoids
if resumeUploads == True or deleteVideos == True or uploadTranscripts == True:
with open(folderName + "/" + 'videoids.pkl', 'wb') as f:
pickle.dump(videoids, f)
if wait == True:
print "\nWaiting for videos to be processed. It is",strftime("%H:%M:%S", localtime()),". Script will resume in " + str(sleepingTime/60) + " minutes..."
time.sleep(sleepingTime)
#search_response = service.search().list(
# q="Anita",
# part="id",
# type="video",
# fields="items/id"
#).execute()
#
#videos = []
#
#for search_result in search_response.get("items", []):
# videos.append("%s" % (search_result["id"]["videoId"]))
#
#print "Videos:\n", "\n".join(videos), "\n"
#ES: I don't think this function is ever called...
# Call the API's captions.insert method to upload a caption track in draft status.
def upload_caption(youtube, video_id, language, name, file):
insert_result = youtube.captions().insert(
part="snippet",
body=dict(
snippet=dict(
videoId=video_id,
language=language,
name=name,
isDraft=True
)
),
media_body=file
).execute()
id = insert_result["id"]
name = insert_result["snippet"]["name"]
language = insert_result["snippet"]["language"]
status = insert_result["snippet"]["status"]
#print "Uploaded caption track '%s(%s) in '%s' language, '%s' status." % (name,
# id, language, status)
c = 1
captionsids = []
wait = False
if uploadTranscripts == True:
#print splits,videoids
#uploads transcripts
print "\nUploading transcripts..."
for s in splits:
print c,s
media_file = folderName + '/' + fileName + "_" + str(c) + ".flv"
caption_file = folderName + '/' + fileName + "_" + str(c) + ".txt"
#print s,media_file,caption_file,videoids[c-1]
a = service.captions().insert(
part="snippet",
body=dict(
snippet=dict(
videoId=videoids[c-1],
language=language,
name=media_file,
isDraft=True,
sync=True
)
),
media_body=caption_file
).execute()
captionsids.append(a['id'])
c += 1
#print a
wait = True
with open(folderName + "/" + 'captionsids.pkl', 'wb') as f:
pickle.dump(captionsids, f)
print "Waiting for transcripts to be processed into captions. It is",strftime("%H:%M:%S", localtime()),". Script will resume in " + str(2 * sleepingTime / 60) + " minutes..."
time.sleep(2 * sleepingTime)
else:
if downloadCaptions == True:
with open(folderName + "/" + 'captionsids.pkl', 'rb') as f:
captionsids = pickle.load(f)
#if wait == True:
if downloadCaptions == True:
print "\nDownloading captions..."
c = 1
waitLonger = True
for s in splits:
print c,s,captionsids[c-1]
sub_txt = ""
# while waitLonger == True:
# try:
subtitle = service.captions().download(id=captionsids[c-1],tfmt='vtt').execute()
# waitLonger = False
# except:
# waitLonger = True
# print "Waiting for transcripts " + str(c) + " " + captionsids[c-1] + " to be processed into captions. It is",strftime("%H:%M:%S", localtime()),". Script will resume in " + str(2) + " minutes..."
# time.sleep(120)
sub_txt += subtitle
cc = ""
if c < 10:
cc = "0" + str(c)
else:
cc = str(c)
#print subtitle
print cc
with open(folderName + "/" + fileName + "_" + str(cc) + ".vtt", 'w') as thefile:
#thefile.write(sub_txt)
thefile.write(subtitle)
if cc == "31":
print subtitle
c += 1
time.sleep(3)
#deletes videos from youtube -ES
if deleteVideos == True:
print "\nDeleting videos...\n"
c = 1
for s in splits:
print c,videoids[c-1]
service.videos().delete(
id=videoids[c-1]
).execute()
c += 1
time.sleep(10)
if combineSubtitles == True:
#compiles them all
print "\nCombining subtitle snippets ..."
#ES: this is a feature that needs exploration so as to make sure that place names are never split between 2 timestamps, at the least.
#place-based time stamping can be set to True or False (make a variable for this)
compiledSubs = compileSubs(folderName,fileName,[['_high-frequency-timestamps',0,placeBasedTimestamping]],t_list,interviewer,interviewee,False,language,resampleSubtitles,fullSentenceSubtitles,removeLoneWords)
time.sleep(10)
#thefile = open(folderName + "/" + fileName + ".srt", 'w')
#thefile.write(compiledSubs)
if uploadFull == True:
print "\nUploading full video..."
vid = videos_insert(
{'snippet.categoryId': '22',
'snippet.defaultLanguage': language,
'snippet.description': 'Description of uploaded video.',
'snippet.tags[]': '',
'snippet.title': fileName,
'status.embeddable': '',
'status.license': '',
'status.privacyStatus': 'unlisted',
'status.publicStatsViewable': ''},
folderName + "/" + originalVideo,
part='snippet,status')
# place video in custom playlist
def playlist_items_insert(properties, **kwargs):
resource = build_resource(properties) # See full sample for function
kwargs = remove_empty_kwargs(**kwargs) # See full sample for function
results = service.playlistItems().insert(
body=resource,
**kwargs
).execute()
print_results(results)
#'snippet.playlistId': playlistID,
playlist_items_insert(
{'snippet.resourceId.kind': 'youtube#video',
'snippet.resourceId.videoId': vid,
'snippet.position': ''},
part='snippet',
onBehalfOfContentOwner='')
print "Waiting for full video to be processed. It is",strftime("%H:%M:%S", localtime()),". Script will resume in " + str(sleepingTime/60) + " minutes..."
time.sleep(sleepingTime)
id = vid
print "\nUploading compiled subtitles..."
caption_file = folderName + '/' + fileName + ".srt"
service.captions().insert(
part="snippet",
body=dict(
snippet=dict(
videoId=id,
language=language,
name=originalVideo,
isDraft=True,
sync=False
)
),
media_body=caption_file
).execute()
print "\nFull video is soon available on your Youtube channel for you to check and adjust captions."
| 2.625 | 3 |
ComplementaryScripts/branch_work/Step_01_model_comparison.py | HaoLuoChalmers/Lactobacillus_reuteri_MM41A_GEM | 0 | 12792830 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Created by <NAME> at 2019-05-27
"""Step_01_model_comparison.py
:description : script
:param :
:returns:
:rtype:
"""
import os
import cobra
from matplotlib import pyplot as plt
from matplotlib_venn import venn2
import pandas as pd
import My_def
from My_def.model_report import *
if __name__ == '__main__':
os.chdir('../../ComplementaryData/Step_02_DraftModels/')
# %% <load data>
Lreu_ca = cobra.io.load_json_model('CarveMe/Lreu_ca.json')
Lreu_ca_gp = cobra.io.load_json_model('CarveMe/Lreu_ca_gp.json')
Lreu_from_iNF517 = cobra.io.load_json_model('Template/Lreu_from_iNF517.json')
Lreu_from_iBT721 = cobra.io.load_json_model('Template/Lreu_from_iBT721.json')
Lreu_from_iML1515 = cobra.io.load_json_model('Template/Lreu_from_iML1515.json')
bigg_rea_df = pd.read_csv('../bigg_database/bigg_rea_df.csv', sep='\t')
bigg_met_df = pd.read_csv('../bigg_database/bigg_met_df.csv', sep='\t')
Lreu_ca_genes = [i.id for i in Lreu_ca.genes]
Lreu_ca_gp_genes = [i.id for i in Lreu_ca_gp.genes]
Lreu_ca_reas = [i.id for i in Lreu_ca.reactions]
Lreu_ca_gp_reas = [i.id for i in Lreu_ca_gp.reactions]
Lreu_ca_mets = [i.id for i in Lreu_ca.metabolites]
Lreu_ca_gp_mets = [i.id for i in Lreu_ca_gp.metabolites]
# %% <fig compare Lreu_ca and Lreu_ca_gp>
# Lreu_ca_gp have more
figure, axes = plt.subplots(1, 3)
axes[0].set_title("gene")
axes[1].set_title("rea")
axes[2].set_title("met")
fg1 = venn2([set(Lreu_ca_genes), set(Lreu_ca_gp_genes)],
('Normal','Gram positive' ), ax=axes[0])
# fg1.get_patch_by_id('10').set_color('Aquamarine')
fg2 = venn2([set(Lreu_ca_reas), set(Lreu_ca_reas)],
('Normal','Gram positive'), ax=axes[1])
fg3 = venn2([set(Lreu_ca_mets), set(Lreu_ca_gp_mets)],
('Normal','Gram positive'), ax=axes[2])
plt.show()
Lreu_from_iBT721_genes = [i.id for i in Lreu_from_iBT721.genes]
Lreu_from_iBT721_reas = [i.id for i in Lreu_from_iBT721.reactions]
Lreu_from_iBT721_mets = [i.id for i in Lreu_from_iBT721.metabolites]
Lreu_from_iNF517_genes = [i.id for i in Lreu_from_iNF517.genes]
Lreu_from_iNF517_reas = [i.id for i in Lreu_from_iNF517.reactions]
Lreu_from_iNF517_mets = [i.id for i in Lreu_from_iNF517.metabolites]
Lreu_from_iML1515_genes = [i.id for i in Lreu_from_iML1515.genes]
Lreu_from_iML1515_reas = [i.id for i in Lreu_from_iML1515.reactions]
Lreu_from_iML1515_mets = [i.id for i in Lreu_from_iML1515.metabolites]
# %% <fig compare templated based method models and Lreu_ca_gp>
# just a overview
figure_2, axes = plt.subplots(1, 3)
axes[0].set_title("gene")
axes[1].set_title("rea")
axes[2].set_title("met")
fg_1 = My_def.venn3_samesize([set(Lreu_from_iBT721_genes),
set(Lreu_from_iNF517_genes),
set(Lreu_from_iML1515_genes)],
('iBT721', 'iNF517','iML1515'), ax=axes[0])
fg_2 = My_def.venn3_samesize([set(Lreu_from_iBT721_reas),
set(Lreu_from_iNF517_reas),
set(Lreu_from_iML1515_reas)],
('iBT721', 'iNF517','iML1515'), ax=axes[1])
fg_3 = My_def.venn3_samesize([set(Lreu_from_iBT721_mets),
set(Lreu_from_iNF517_mets),
set(Lreu_from_iML1515_mets)],
('iBT721', 'iNF517','iML1515'), ax=axes[2])
plt.show()
| 2.109375 | 2 |
wikiml.py | tejaswiniallikanti/upgrad-mlcloud | 0 | 12792831 | <reponame>tejaswiniallikanti/upgrad-mlcloud<gh_stars>0
# coding: utf-8
import pandas as pd
from sklearn.linear_model import LogisticRegressionCV
df = pd.read_csv('data/wiki/articles.tsv', index_col='article_id')
df.head()
get_ipython().run_line_magic('pinfo', 'pd.get_dummies')
catCols = ['category', 'namespace']
catData = []
for c in catCols:
catData.append(pd.get_dummies(df[c], prefix=c, drop_first=True))
catData = pd.DataFrame(catData, axis=1)
catData = pd.concat(catData, axis=1)
catData.head()
df.head()
df['related_page'] = df['related_page'].astype(int)
numCols = ['redirect', 'related_page', 'lifetime', 'p_revert', 'M_delta', 'S_delta', 'M_size', 'S_size', 'M_comments', 'S_comments', 'n_count']
XDF = pd.concat([df[numCols], catData], axis=1)
XDF.head()
ix = XDF.index.values.copy()
import numpy as np
np.random.shuffle(ix)
X = XDF.loc[ix]
y = X.pop('n_count')
y.head()
XDF.head()
from sklearn.linear_model import LinearRegression
lr = LinearRegression()
lr.fit(XDF.values, y)
lr.score(XDF.values, y)
from sklearn import linear_model as lm
r = lm.Ridge().fit(XDF.values, y)
get_ipython().run_line_magic('pinfo', 'r.score')
r.score(XDF.values, y)
r = lm.Ridge(alpha=0.5).fit(XDF.values, y)
r.score(XDF.values, y)
get_ipython().run_line_magic('pinfo', 'lm.RidgeCV')
get_ipython().run_line_magic('pinfo', 'lm.RidgeCV')
rcv = lm.RidgeCV(alphas=[0.001, 0.01, 0.1, 1, 10, 100, 1000], cv=5)
rcv.fit(XDF.values, y)
rcv
rcv.score(XDF.values, y)
get_ipython().set_next_input('lasso = lm.LassoCV');get_ipython().run_line_magic('pinfo', 'lm.LassoCV')
lasso = lm.LassoCV(n_jobs=-1, cv=5)
lasso.fit(XDF.values, y)
lasso.score(XDF.values, y)
lasso.coef_
rcv.coef_
ecv = lm.ElasticNetCV()
ecv.fit(XDF.values, y)
ecv.score(XDF.values, y)
from sklearn.feature_selection import RFECV
RFECV.head()
RFECV
lr = lm.LinearRegression()
rfecv = RFECV()
rfecv = RFECV(lr, cv=5, n_jobs=-1)
rfecv.fit(XDF.values, y)
rfecv.grid_scores_
rfecv.grid_scores_.max()
XDF.var(0)
get_ipython().run_line_magic('whos', '')
df.head()
X.shape
X.head()
X.columns
XDF.columns
XDF.groupby('redirect')['n_count'].mean()
get_ipython().run_line_magic('matplotlib', '')
import seaborn as sns
X.var(0)
XDF.groupby('related_page')['n_count'].mean()
XX = X[[c for c in X if not c.startswith('M_')]]
XX = XX[[c for c in XX if not c.startswith('S_')]]
XX.columns
XX.var(0).plot(kind='bar')
XX.drop(['lifetime'], axis=1).var(0).plot(kind='bar')
rfecv.fit(XX, y)
rfecv.grid_scores_.max()
rfecv.score(XX, y)
lasso
lasso.fit(XX, y)
lasso.score(XX, y)
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import f_regression
XX.head()
get_ipython().run_line_magic('pinfo', 'SelectKBest')
XX.shape
for i in range(2, 20):
best = SelectKBest(f_regression, k=i)
XXX = best.fit_transform(XX.values)
lr = lm.LinearRegression().fit(XXX, y.values)
print(i, lr.score(XXX, y.values))
for i in range(2, 20):
best = SelectKBest(f_regression, k=i)
XXX = best.fit_transform(XX.values, y.values)
lr = lm.LinearRegression().fit(XXX, y.values)
print(i, lr.score(XXX, y.values))
ecv = lm.ElasticNetCV(l1_ratio=[0.1, 0.2, 0.5, 0.75, 0.8, 0.89, 0.9, 0.95, 0.99, 1])
ecv.fit(XX.values, y.values)
ecv.score(XX, y)
XDF.columns
for c in XDF:
if c.startswith('M_') or c.startswith('S_'):
print(c, XDF[c].var())
XDF['n_count'].var()
XX.var(0)
XXX = XDF[[c for c in XDF if c.startswith('M_')]]
XXX = XXX[[c for c in XXX if c.startswith('S_')]]
XXX.head()
XXX = XDF[[c for c in XDF if c.startswith('M_') or c.startswith('S_')]]
XXX.head()
XXX['lifetime'] = XDF['lifetime']
XXX.head()
y.shapoe
y.shape
XXX.shape
ecv.fit(XXX.values, y.values, n_jobs=-1)
ecv.n_jobs = -1
ecv.fit(XXX.values, y.values, n_jobs=-1)
ecv.fit(XXX.values, y.values)
ecv.score(XXX.values, y.values)
| 2.640625 | 3 |
wirelesstraitor/display.py | DMaendlen/wirelesstraitor | 0 | 12792832 | <filename>wirelesstraitor/display.py<gh_stars>0
from wirelesstraitor.observer import Observer
class CommandLineLocationDisplay(Observer):
def __init__(self):
super(CommandLineLocationDisplay, self).__init__()
def update(self, *args, **kwargs):
"""Continuously print new mac-location pairs"""
argtuple = args[0]
mac = argtuple[0]
bssid = argtuple[1][0]
ssid = argtuple[1][1]
location = argtuple[1][2]
lat = location['lat']
lng = location['lng']
self.display_location(mac = mac, bssid = bssid, ssid = ssid, lat = lat, lng = lng)
def display_location(self, mac, bssid, ssid, lat, lng):
"""Take json-formatted location and print it together with mac"""
print("Device {mac} has seen {ssid} ({bssid}) at location {lat}, {lng}".format(
mac = mac,
ssid = ssid,
bssid = bssid,
lat = lat,
lng = lng))
| 2.859375 | 3 |
protreim/config/base.py | hinihatetsu/protreim | 0 | 12792833 | <gh_stars>0
from abc import ABC, abstractmethod
from typing import Dict, List, Any, TypeVar
import PySimpleGUI as sg
class ConfigBase(ABC):
""" Base class of config class.
All config class must inherit this class.
"""
def __init__(self) -> None:
""" Set class variables as instance variables. """
for field in self.fields:
setattr(self, field, getattr(self, field))
@property
def fields(self) -> List[str]:
""" Fields of configuration. """
return list(self.__annotations__.keys())
def update(self, *args: Any, **kwargs: Any) -> None:
""" Update instance variables with kwargs. """
for key, val in kwargs.items():
if key in self.__annotations__:
setattr(self, key, val)
def asdict(self) -> Dict[str, Any]:
""" Return dict of ConfigBase class. """
return {key:getattr(self, key) for key in self.fields}
@abstractmethod
def GUI(self, parent: str='') -> sg.Frame:
""" GUI.
Parameters
----------
parent : str
keyword to tell apart instances in the same window.
Returns
-------
PySimpleGUI.Frame
"""
pass
ConfigType = TypeVar('ConfigType', bound=ConfigBase)
def load(config: ConfigType, d: Dict[str, Any]) -> ConfigType:
fields = config.fields
for field in fields:
if field not in d:
continue
val = d[field]
if type(val) is dict:
kwargs = {
field: load(getattr(config, field), val,)
}
elif field in {'color', 'stroke_color'}:
kwargs = {
field: tuple(val[:3]) if type(val) is list else val
}
else:
kwargs = {
field: val
}
config.update(**kwargs)
return config | 3.0625 | 3 |
inference.py | aws-samples/serverless-word-embedding | 0 | 12792834 | from gensim import models
import json
import numpy as np
MODEL_VERSION = "glove-wiki-gigaword-300"
model = models.KeyedVectors.load_word2vec_format(MODEL_VERSION)
def get_word_vec(word_list):
"""
This method will get the vector of the given word
:param word_list: list of a single word string
:return: the vector list of this word
"""
result = {"status_code": "0000"}
if len(word_list) > 1:
result["status_code"] = "0001"
result["result_info"] = "Expect one wordString for getVec"
return result
word = word_list[0]
try:
vec = model.get_vector(word)
result["vec"] = str(np.array(vec).tolist())
except Exception as e:
result["status_code"] = "0001"
result["result_info"] = str(e)
return result
def get_sim_by_word(word_list):
"""
This method will return a list of the similar words by the given word
:param word_list: list of a single word string
:return: the sim words list of the given word
"""
result = {"status_code": "0000"}
if len(word_list) > 1:
result["status_code"] = "0001"
result["result_info"] = "Expect one wordString for getSim"
return result
word = word_list[0]
try:
sim_words = model.similar_by_word(word)
result["sim_words"] = sim_words
except Exception as e:
result["status_code"] = "0001"
result["result_info"] = str(e)
return result
def get_similarity_between(word_list):
"""
This method will get the similarity of two given words
:param word_list: list of two words A B for similarity calculation
:return: cosine similarity of the two given words
"""
result = {"status_code": "0000"}
if len(word_list) != 2:
result["status_code"] = "0001"
result["result_info"] = "Expect two wordString for getSimBetween"
return result
try:
word_a = word_list[0]
word_b = word_list[1]
similarity = model.similarity(word_a, word_b)
result["similarity"] = str(similarity)
except Exception as e:
result["status_code"] = "0001"
result["result_info"] = str(e)
return result
method_dispatcher = {
"getVec": lambda word_list,: get_word_vec(word_list),
"getSim": lambda word_list,: get_sim_by_word(word_list),
"getSimBetween": lambda word_list,: get_similarity_between(word_list)
}
def validate_event(event):
"""
This function will validate the event send from API gateway to Lambda and raise exception if exists
:param event:
:return:
"""
params = event["multiValueQueryStringParameters"]
if "method" not in params.keys() or "wordString" not in params.keys():
raise Exception('"method" and "wordString" are expected as the Query Params')
# flag = False
method = params.get("method")
if len(method) != 1:
# flag = False
raise Exception('Expect one value for method param')
method = method[0]
if method not in method_dispatcher.keys():
# flag = False
raise Exception('method must be in one of ' + str(list(method_dispatcher.keys())))
def lambda_handler(event, context):
result = {}
response = {
'statusCode': 200,
'body': ""
}
try:
validate_event(event)
except Exception as e:
result["status_code"] = "0001"
result["result_info"] = str(e)
result["request_info"] = event["multiValueQueryStringParameters"]
result["model_version"] = MODEL_VERSION
response["body"] = json.dumps(result)
return response
params = event["multiValueQueryStringParameters"]
method = params["method"][0]
word_list = params["wordString"]
result = method_dispatcher[method](word_list)
result["request_info"] = event["multiValueQueryStringParameters"]
result["model_version"] = MODEL_VERSION
response["body"] = json.dumps(result)
print(response)
return response
if __name__ == "__main__":
f = open('mock_event.json')
mock_event = json.load(f)
f.close()
print(lambda_handler(mock_event, context=""))
| 3.265625 | 3 |
day16/part2.py | stevotvr/adventofcode2016 | 3 | 12792835 | inp = '11101000110010100'
output = list(inp)
while len(output) < 35651584:
output += ['0'] + ['0' if x == '1' else '1' for x in reversed(output)]
output = output[0:35651584]
while len(output) % 2 == 0:
output = ['1' if output[i] == output[i + 1] else '0' for i in range(0, len(output), 2)]
print(''.join(output))
input()
| 3.015625 | 3 |
vertica_python/vertica/messages/frontend_messages/bind.py | jakubjedelsky/vertica-python | 1 | 12792836 | <filename>vertica_python/vertica/messages/frontend_messages/bind.py
from __future__ import print_function, division, absolute_import
from struct import pack
from ..message import BulkFrontendMessage
class Bind(BulkFrontendMessage):
message_id = b'B'
def __init__(self, portal_name, prepared_statement_name, parameter_values):
BulkFrontendMessage.__init__(self)
self._portal_name = portal_name
self._prepared_statement_name = prepared_statement_name
self._parameter_values = parameter_values
def read_bytes(self):
bytes_ = pack('!{0}sx{1}sxHH'.format(
len(self._portal_name), len(self._prepared_statement_name)),
self._portal_name, self._prepared_statement_name, 0, len(self._parameter_values))
for val in self._parameter_values.values():
if val is None:
bytes_ += pack('!I', [-1])
else:
bytes_ += pack('!I{0}s'.format(len(val)), len(val), val)
bytes_ += pack('!H', [0])
return bytes_
| 2.109375 | 2 |
code/python/dataprocessing.py | nordin11/DataProject | 0 | 12792837 | <gh_stars>0
# coding: utf-8
# In[1]:
from pandas_datareader import data
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import json
import io
# In[2]:
## FINANCIAL INDUSTRY
# Call the stocks I want to analyze
financeTickers = ['JPM','BAC','WFC','V','C']
# Define the data source
data_source = 'yahoo'
# Define the time-scale
start_date = '2000-01-01'
end_date = '2018-01-01'
# Get the data
panel_data = data.DataReader(financeTickers, data_source, start_date, end_date)
# Getting just the adjusted closing prices. This will return a Pandas DataFrame
# The index in this DataFrame is the major index of the panel_data
close = panel_data.loc['Close']
# Getting all weekdays between 01/01/2000 and 12/31/2016
all_weekdays = pd.date_range(start=start_date, end=end_date, freq='B')
# How do we align the existing prices in adj_close with our new set of dates?
# All we need to do is reindex close using all_weekdays as the new index
close = close.reindex(all_weekdays)
# Drop the dates where one of the companies wasn't public yet
close = close.dropna(axis=0, how='any')
# Define the table as a matrix
close = np.asmatrix(close)
# Print the FINANCE correlation matrix
print np.corrcoef(close, rowvar=False)
# In[3]:
## TECH INDUSTRY
# Call the stocks I want to analyze
techTickers = ['GOOGL','MSFT','FB','T','VZ']
# Define the data source
data_source = 'yahoo'
# Define the time-scale
start_date = '2000-01-01'
end_date = '2018-01-01'
# Get the data
panel_data = data.DataReader(techTickers, data_source, start_date, end_date)
# Getting just the adjusted closing prices. This will return a Pandas DataFrame
# The index in this DataFrame is the major index of the panel_data
close = panel_data.loc['Close']
# Getting all weekdays between 01/01/2000 and 12/31/2016
all_weekdays = pd.date_range(start=start_date, end=end_date, freq='B')
# How do we align the existing prices in adj_close with our new set of dates?
# All we need to do is reindex close using all_weekdays as the new index
close = close.reindex(all_weekdays)
# Drop the dates where one of the companies wasn't public yet
close = close.dropna(axis=0, how='any')
# Define the table as a matrix
close = np.asmatrix(close)
# Print the TECH correlation matrix
print np.corrcoef(close, rowvar=False)
# In[4]:
## SERVICES
# Call the stocks I want to analyze
servicesTickers = ['AMZN','BABA','WMT','HD','CMCSA']
# Define the data source
data_source = 'yahoo'
# Define the time-scale
start_date = '2000-01-01'
end_date = '2018-01-01'
# Get the data
panel_data = data.DataReader(servicesTickers, data_source, start_date, end_date)
# Getting just the adjusted closing prices. This will return a Pandas DataFrame
# The index in this DataFrame is the major index of the panel_data.
close = panel_data.loc['Close']
# Getting all weekdays between 01/01/2000 and 12/31/2016
all_weekdays = pd.date_range(start=start_date, end=end_date, freq='B')
# How do we align the existing prices in adj_close with our new set of dates?
# All we need to do is reindex close using all_weekdays as the new index
close = close.reindex(all_weekdays)
# Drop the dates where one of the companies wasn't public yet
close = close.dropna(axis=0, how='any')
# Define the table as a matrix
close = np.asmatrix(close)
# Print the SERVICES correlation matrix
print np.corrcoef(close, rowvar=False)
# In[5]:
## BASIC MATERIALS
# Call the stocks I want to analyze
basicTickers = ['XOM','RDS-B','PTR','CVX','BP']
# Define the data source
data_source = 'yahoo'
# Define the time-scale
start_date = '2000-01-01'
end_date = '2018-01-01'
# Get the data
panel_data = data.DataReader(basicTickers, data_source, start_date, end_date)
# Getting just the adjusted closing prices. This will return a Pandas DataFrame
# The index in this DataFrame is the major index of the panel_data.
close = panel_data.loc['Close']
# Getting all weekdays between 01/01/2000 and 12/31/2016
all_weekdays = pd.date_range(start=start_date, end=end_date, freq='B')
# How do we align the existing prices in adj_close with our new set of dates?
# All we need to do is reindex close using all_weekdays as the new index
close = close.reindex(all_weekdays)
# Drop the dates where one of the companies wasn't public yet
close = close.dropna(axis=0, how='any')
# Define the table as a matrix
close = np.asmatrix(close)
# Print the MATERIALS correlation matrix
print np.corrcoef(close, rowvar=False)
# In[6]:
## CONSUMER GOODS
# Call the stocks I want to analyze
consumerTickers = ['AAPL','PG','BUD','KO','TM']
# Define the data source
data_source = 'yahoo'
# Define the time-scale
start_date = '2000-01-01'
end_date = '2018-01-01'
# Get the data
panel_data = data.DataReader(consumerTickers, data_source, start_date, end_date)
# Getting just the adjusted closing prices. This will return a Pandas DataFrame
# The index in this DataFrame is the major index of the panel_data.
close = panel_data.loc['Close']
# Getting all weekdays between 01/01/2000 and 12/31/2016
all_weekdays = pd.date_range(start=start_date, end=end_date, freq='B')
# How do we align the existing prices in adj_close with our new set of dates?
# All we need to do is reindex close using all_weekdays as the new index
close = close.reindex(all_weekdays)
# Drop the dates where one of the companies wasn't public yet
close = close.dropna(axis=0, how='any')
# Define the table as a matrix
close = np.asmatrix(close)
# Print the GOODS correlation matrix
print np.corrcoef(close, rowvar=False)
# In[45]:
## ALL INDUSTRIES
# Call the stocks I want to analyze
financeTickers = ['JPM','BAC','WFC','V','C']
techTickers = ['GOOGL','MSFT','FB','T','VZ']
servicesTickers = ['AMZN','BABA','WMT','HD','CMCSA']
basicTickers = ['XOM','RDS-B','PTR','CVX','BP']
consumerTickers = ['AAPL','PG','BUD','KO','TM']
# group all tickers together
AllTickers = ['JPM','BAC','WFC','V','C','GOOGL','MSFT','FB','T','VZ','AMZN','BABA','WMT','HD','CMCSA','XOM','RDS-B','PTR','CVX','BP','AAPL','PG','BUD','KO','TM']
# Define the data source
data_source = 'yahoo'
# Define the time-scale
start_date = '2000-01-01'
end_date = '2018-01-01'
# Get the data
panel_data = data.DataReader(AllTickers, data_source, start_date, end_date)
# Getting just the adjusted closing prices. This will return a Pandas DataFrame
# The index in this DataFrame is the major index of the panel_data.
close = panel_data.loc['Close']
# Getting all weekdays between 01/01/2000 and 12/31/2016
all_weekdays = pd.date_range(start=start_date, end=end_date, freq='B')
# How do we align the existing prices in adj_close with our new set of dates?
# All we need to do is reindex close using all_weekdays as the new index
close = close.reindex(all_weekdays)
# Drop the dates where one of the companies wasn't public yet
close = close.dropna(axis=0, how='any')
# Define the table as a matrix
close = np.matrix(close)
# Define and print the correlation matrix in absolute values
close = np.corrcoef(close, rowvar=False)
a = np.zeros(len(AllTickers))
for i in range(len(AllTickers)):
a[i] = np.sum(abs(close[i]))
print a - 1
# In[8]:
## ALL INDUSTRIES
# Call the stocks I want to analyze
financeTickers = ['JPM','BAC','WFC','V','C']
techTickers = ['GOOGL','MSFT','FB','T','VZ']
servicesTickers = ['AMZN','BABA','WMT','HD','CMCSA']
basicTickers = ['XOM','RDS-B','PTR','CVX','BP']
consumerTickers = ['AAPL','PG','BUD','KO','TM']
# group all tickers together
AllTickers = ['JPM','BAC','WFC','V','C','GOOGL','MSFT','FB','T','VZ','AMZN','BABA','WMT','HD','CMCSA','XOM','RDS-B','PTR','CVX','BP','AAPL','PG','BUD','KO','TM']
# Define the data source
data_source = 'yahoo'
# Define the time-scale
start_date = '2000-01-01'
end_date = '2018-01-01'
# Get the data
panel_data = data.DataReader(AllTickers, data_source, start_date, end_date)
# Getting just the adjusted closing prices. This will return a Pandas DataFrame
# The index in this DataFrame is the major index of the panel_data.
close = panel_data.loc['Close']
# Getting all weekdays between 01/01/2000 and 12/31/2016
all_weekdays = pd.date_range(start=start_date, end=end_date, freq='B')
# How do we align the existing prices in adj_close with our new set of dates?
# All we need to do is reindex close using all_weekdays as the new index
close = close.reindex(all_weekdays)
# Drop the dates where one of the companies wasn't public yet
close = close.dropna(axis=0, how='any')
# Define the table as a matrix
close = np.matrix(close)
# Print the correlation matrix
c = np.corrcoef(close, rowvar=False)
# manipulate the data so that I can output the proper format for the network
nodes = []
# define the different industries by seperating it in different groups
for i in range(len(AllTickers)):
if i < len(financeTickers):
nodes.append({"id":AllTickers[i], "group": 1})
elif i < len(financeTickers) + len(techTickers):
nodes.append({"id":AllTickers[i], "group": 2})
elif i < len(financeTickers) + len(techTickers) + len(servicesTickers):
nodes.append({"id":AllTickers[i], "group": 3})
elif i < len(financeTickers) + len(techTickers) + len(servicesTickers) + len(basicTickers):
nodes.append({"id":AllTickers[i], "group": 4})
else:
nodes.append({"id":AllTickers[i], "group": 5})
links = []
# Go through the stocks and link the stocks and connections with eachother to the correlation matrix.
for i in range(len(AllTickers)):
for j in range(1,len(AllTickers) - i):
links.append({"source" : AllTickers[i],"target" : AllTickers[i + j],"value" : c[i,i+j]})
# bring together the two dictionaries into one big dict
json_data = {
"nodes": nodes,
"links": links
}
network = json.dumps(json_data)
# copied this print into a downloaded json file.
print network
# In[29]:
## ALL INDUSTRIES
# Call the stocks I want to analyze
financeTickers = ['JPM','BAC','WFC','V','C']
techTickers = ['GOOGL','MSFT','FB','T','VZ']
servicesTickers = ['AMZN','BABA','WMT','HD','CMCSA']
basicTickers = ['XOM','RDS-B','PTR','CVX','BP']
consumerTickers = ['AAPL','PG','BUD','KO','TM']
# group all tickers together
AllTickers = ['JPM','BAC','WFC','V','C','GOOGL','MSFT','FB','T','VZ','AMZN','BABA','WMT','HD','CMCSA','XOM','RDS-B','PTR','CVX','BP','AAPL','PG','BUD','KO','TM']
# Define the data source
data_source = 'yahoo'
# Define the time-scale
start_date = '2000-01-01'
end_date = '2018-01-01'
# Get the data
panel_data = data.DataReader(AllTickers, data_source, start_date, end_date)
# Getting just the adjusted closing prices. This will return a Pandas DataFrame
# The index in this DataFrame is the major index of the panel_data.
close = panel_data.loc['Close']
# Getting all weekdays between 01/01/2000 and 12/31/2016
all_weekdays = pd.date_range(start=start_date, end=end_date, freq='B')
# How do we align the existing prices in adj_close with our new set of dates?
# All we need to do is reindex close using all_weekdays as the new index
close = close.reindex(all_weekdays)
# Drop the dates where one of the companies wasn't public yet
close = close.dropna(axis=0, how='any')
# normalize de data by defining relative gain. starting at the first price 1.0
close = close/close.iloc[0, :]
close.to_csv('price_relative_gain.csv', encoding='utf-8')
| 3.328125 | 3 |
ml/classifier.py | shilang1220/tfwrapper | 0 | 12792838 | <filename>ml/classifier.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Time : 18-5-15 下午5:32
# @Author : <NAME>
# @File : classifier.py
# @Software: tfwrapper
from keras.layers import Dense
from keras.models import Sequential
from keras.datasets import mnist
(x_train,y_train),(x_test,y_test) = mnist.load_data()
print('Origin train data shape',x_train.shape(),y_train.shape())
x_train = x_train / 255
x_test = x_test /255
| 2.9375 | 3 |
bot/bot.py | dzd/slack-starterbot | 0 | 12792839 | from time import sleep
from re import search, finditer
from copy import copy
# from pprint import pprint
from slackclient import SlackClient
from bot.message import Message
class Bot:
def __init__(self, conf):
# instantiate Slack client
self.slack_client = SlackClient(conf.slack_bot_token)
# starterbot's user ID in Slack: value is assigned
# after the bot starts up
self.starterbot_id = None
# constants
self.RTM_READ_DELAY = 2 # 1 second delay between reading from RTM
self.MENTION_REGEX = "^<@(|[WU].+?)>(.*)"
self.LINK_URL = conf.link_url
self.MATCH_PATTERN = conf.match_pattern
# list of channel the bot is member of
self.g_member_channel = []
# context: in which thread the link was already provided
self.message_context = {}
def chat(self):
if self.slack_client.rtm_connect(with_team_state=False):
print("Starter Bot connected and running!")
self.get_list_of_channels()
self.bot_loop()
else:
print("Connection failed. Exception traceback printed above.")
def bot_loop(self):
# Read bot's user ID by calling Web API method `auth.test`
self.slack_client.api_call("auth.test")["user_id"]
while True:
bot_message = self.parse_events_in_channel(self.slack_client.rtm_read())
if bot_message.channel:
self.respond_in_thread(bot_message)
sleep(self.RTM_READ_DELAY)
def parse_direct_mention(self, message_text):
"""
Finds a direct mention (a mention that is at the beginning)
in message text and returns the user ID which was mentioned.
If there is no direct mention, returns None
"""
matches = search(self.MENTION_REGEX, message_text)
# the first group contains the username,
# the second group contains the remaining message
return (matches.group(1), matches.group(2).strip()) if matches else (None, None)
def get_list_of_channels(self):
""" print the list of available channels """
channels = self.slack_client.api_call(
"channels.list",
exclude_archived=1
)
self.g_member_channel = [channel for channel in channels['channels'] if channel['is_member']]
# print("available channels:")
# pprint(channels)
print("I am member of {} channels: {}"
.format(len(self.g_member_channel),
",".join([c['name'] for c in self.g_member_channel])))
def check_if_member(self, channel):
""" checking if the bot is member of a given channel """
return channel in [channel['id'] for channel in self.g_member_channel]
def parse_events_in_channel(self, events):
"""
Selecting events of type message with no subtype
which are posted in channel where the bot is
"""
# print("DEBUG: my channels: {}".format(g_member_channel))
for event in events:
# pprint(event)
# Parsing only messages in the channels where the bot is member
if event["type"] != "message" or "subtype" in event or \
not self.check_if_member(event["channel"]):
# print("not for me: type:{}".format(event))
continue
# analyse message to see if we can suggest some links
analysed_message = self.analyse_message(event['text'])
thread_ts = event['ts']
if 'thread_ts' in event.keys():
thread_ts = event['thread_ts']
if not analysed_message:
return Message(None, None, None)
analysed_message_no_repeat = self.dont_repeat_in_thread(analysed_message, thread_ts)
if not analysed_message_no_repeat:
return Message(None, None, None)
return Message(event["channel"], thread_ts,
analysed_message_no_repeat, self.LINK_URL)
return Message(None, None, None)
def analyse_message(self, message):
"""
find matching sub string in the message and
returns a list of formatted links
"""
pattern = self.MATCH_PATTERN
matchs = []
for i in finditer(pattern, message):
value = i.group(1)
if value not in matchs:
matchs.append(value)
if not len(matchs):
return
return matchs
def dont_repeat_in_thread(self, analysed_messages, thread_ts):
""" Remove message from analysed message if it was already sent in the same
message thread.
"""
# pprint(self.message_context)
no_repeat_messages = copy(analysed_messages)
for message in analysed_messages:
if thread_ts in self.message_context.keys():
if message in self.message_context[thread_ts]:
no_repeat_messages.remove(message)
return no_repeat_messages
def respond_in_thread(self, bot_message):
"""Sends the response back to the channel
în a thread
"""
# Add message to the message context to avoid
# repeating same message in a thread
if bot_message.thread_ts not in self.message_context.keys():
self.message_context[bot_message.thread_ts] = []
self.message_context[bot_message.thread_ts].extend(bot_message.raw_message)
self.slack_client.api_call(
"chat.postMessage",
channel=bot_message.channel,
thread_ts=bot_message.thread_ts,
text=bot_message.formatted_message
)
| 2.65625 | 3 |
code/nbs/reco-tut-mlh-02-model-comparison.py | sparsh-ai/reco-tut-mlh | 0 | 12792840 | #!/usr/bin/env python
# coding: utf-8
# In[1]:
import os
project_name = "reco-tut-mlh"; branch = "main"; account = "sparsh-ai"
project_path = os.path.join('/content', project_name)
# In[2]:
if not os.path.exists(project_path):
get_ipython().system(u'cp /content/drive/MyDrive/mykeys.py /content')
import mykeys
get_ipython().system(u'rm /content/mykeys.py')
path = "/content/" + project_name;
get_ipython().system(u'mkdir "{path}"')
get_ipython().magic(u'cd "{path}"')
import sys; sys.path.append(path)
get_ipython().system(u'git config --global user.email "<EMAIL>"')
get_ipython().system(u'git config --global user.name "reco-tut"')
get_ipython().system(u'git init')
get_ipython().system(u'git remote add origin https://"{mykeys.git_token}":[email protected]/"{account}"/"{project_name}".git')
get_ipython().system(u'git pull origin "{branch}"')
get_ipython().system(u'git checkout main')
else:
get_ipython().magic(u'cd "{project_path}"')
# In[34]:
get_ipython().system(u'git status')
# In[35]:
get_ipython().system(u'git add . && git commit -m \'commit\' && git push origin "{branch}"')
# In[7]:
import sys
sys.path.insert(0, './code')
# ---
# # Collaborative Filtering Comparison
#
# In this notebook we compare different recommendation systems starting with the state-of-the-art LightGCN and going back to the winning algorithm for 2009's Netflix Prize competition, SVD++.
#
# Models include in order are LightGCN, NGCF, SVAE, SVD++, and SVD. Each model has their own individual notebooks where we go more indepth, especially LightGCN and NGCF, where we implemented them from scratch in Tensorflow.
#
# The last cell compares the performance of the different models using ranking metrics:
#
#
# * Precision@k
# * Recall@k
# * Mean Average Precision (MAP)
# * Normalized Discounted Cumulative Gain (NDCG)
#
# where $k=10$
#
#
# # Imports
# In[4]:
get_ipython().system(u'pip install -q surprise')
# In[8]:
import math
import numpy as np
import os
import pandas as pd
import random
import requests
import scipy.sparse as sp
import surprise
import tensorflow as tf
from sklearn.model_selection import train_test_split
from tensorflow.python.framework.ops import disable_eager_execution
from tqdm import tqdm
from utils import stratified_split, numpy_stratified_split
import build_features
import metrics
from models import SVAE
from models.GCN import LightGCN, NGCF
# # Prepare data
# In[9]:
fp = os.path.join('./data/bronze', 'u.data')
raw_data = pd.read_csv(fp, sep='\t', names=['userId', 'movieId', 'rating', 'timestamp'])
print(f'Shape: {raw_data.shape}')
raw_data.sample(10, random_state=123)
# In[10]:
# Load movie titles.
fp = os.path.join('./data/bronze', 'u.item')
movie_titles = pd.read_csv(fp, sep='|', names=['movieId', 'title'], usecols = range(2), encoding='iso-8859-1')
print(f'Shape: {movie_titles.shape}')
movie_titles.sample(10, random_state=123)
# In[15]:
train_size = 0.75
train, test = stratified_split(raw_data, 'userId', train_size)
print(f'Train Shape: {train.shape}')
print(f'Test Shape: {test.shape}')
print(f'Do they have the same users?: {set(train.userId) == set(test.userId)}')
# In[16]:
combined = train.append(test)
n_users = combined['userId'].nunique()
print('Number of users:', n_users)
n_movies = combined['movieId'].nunique()
print('Number of movies:', n_movies)
# In[17]:
# Create DataFrame with reset index of 0-n_movies.
movie_new = combined[['movieId']].drop_duplicates()
movie_new['movieId_new'] = np.arange(len(movie_new))
train_reindex = pd.merge(train, movie_new, on='movieId', how='left')
# Reset index to 0-n_users.
train_reindex['userId_new'] = train_reindex['userId'] - 1
train_reindex = train_reindex[['userId_new', 'movieId_new', 'rating']]
test_reindex = pd.merge(test, movie_new, on='movieId', how='left')
# Reset index to 0-n_users.
test_reindex['userId_new'] = test_reindex['userId'] - 1
test_reindex = test_reindex[['userId_new', 'movieId_new', 'rating']]
# Create dictionaries so we can convert to and from indexes
item2id = dict(zip(movie_new['movieId'], movie_new['movieId_new']))
id2item = dict(zip(movie_new['movieId_new'], movie_new['movieId']))
user2id = dict(zip(train['userId'], train_reindex['userId_new']))
id2user = dict(zip(train_reindex['userId_new'], train['userId']))
# In[18]:
# Create user-item graph (sparse matix where users are rows and movies are columns.
# 1 if a user reviewed that movie, 0 if they didn't).
R = sp.dok_matrix((n_users, n_movies), dtype=np.float32)
R[train_reindex['userId_new'], train_reindex['movieId_new']] = 1
# Create the adjaceny matrix with the user-item graph.
adj_mat = sp.dok_matrix((n_users + n_movies, n_users + n_movies), dtype=np.float32)
# List of lists.
adj_mat.tolil()
R = R.tolil()
# Put together adjacency matrix. Movies and users are nodes/vertices.
# 1 if the movie and user are connected.
adj_mat[:n_users, n_users:] = R
adj_mat[n_users:, :n_users] = R.T
adj_mat
# In[19]:
# Calculate degree matrix D (for every row count the number of nonzero entries)
D_values = np.array(adj_mat.sum(1))
# Square root and inverse.
D_inv_values = np.power(D_values + 1e-9, -0.5).flatten()
D_inv_values[np.isinf(D_inv_values)] = 0.0
# Create sparse matrix with the values of D^(-0.5) are the diagonals.
D_inv_sq_root = sp.diags(D_inv_values)
# Eval (D^-0.5 * A * D^-0.5).
norm_adj_mat = D_inv_sq_root.dot(adj_mat).dot(D_inv_sq_root)
# In[20]:
# to COOrdinate format first ((row, column), data)
coo = norm_adj_mat.tocoo().astype(np.float32)
# create an index that will tell SparseTensor where the non-zero points are
indices = np.mat([coo.row, coo.col]).transpose()
# covert to sparse tensor
A_tilde = tf.SparseTensor(indices, coo.data, coo.shape)
A_tilde
# # Train models
# ## Graph Convoultional Networks (GCNs)
# ### Light Graph Convolution Network (LightGCN)
# In[21]:
light_model = LightGCN(A_tilde,
n_users = n_users,
n_items = n_movies,
n_layers = 3)
# In[22]:
optimizer = tf.keras.optimizers.Adam(learning_rate=1e-2)
light_model.fit(epochs=25, batch_size=1024, optimizer=optimizer)
# ### Neural Graph Collaborative Filtering (NGCF)
# In[23]:
ngcf_model = NGCF(A_tilde,
n_users = n_users,
n_items = n_movies,
n_layers = 3
)
ngcf_model.fit(epochs=25, batch_size=1024, optimizer=optimizer)
# ### Recommend with LightGCN and NGCF
# In[24]:
# Convert test user ids to the new ids
users = np.array([user2id[x] for x in test['userId'].unique()])
recs = []
for model in [light_model, ngcf_model]:
recommendations = model.recommend(users, k=10)
recommendations = recommendations.replace({'userId': id2user, 'movieId': id2item})
recommendations = recommendations.merge(movie_titles,
how='left',
on='movieId'
)[['userId', 'movieId', 'title', 'prediction']]
# Create column with the predicted movie's rank for each user
top_k = recommendations.copy()
top_k['rank'] = recommendations.groupby('userId', sort=False).cumcount() + 1 # For each user, only include movies recommendations that are also in the test set
recs.append(top_k)
# ## Standard Variational Autoencoder (SVAE)
# In[26]:
# Binarize the data (only keep ratings >= 4)
df_preferred = raw_data[raw_data['rating'] > 3.5]
df_low_rating = raw_data[raw_data['rating'] <= 3.5]
df = df_preferred.groupby('userId').filter(lambda x: len(x) >= 5)
df = df.groupby('movieId').filter(lambda x: len(x) >= 1)
# Obtain both usercount and itemcount after filtering
usercount = df[['userId']].groupby('userId', as_index = False).size()
itemcount = df[['movieId']].groupby('movieId', as_index = False).size()
unique_users =sorted(df.userId.unique())
np.random.seed(123)
unique_users = np.random.permutation(unique_users)
HELDOUT_USERS = 200
# Create train/validation/test users
n_users = len(unique_users)
train_users = unique_users[:(n_users - HELDOUT_USERS * 2)]
val_users = unique_users[(n_users - HELDOUT_USERS * 2) : (n_users - HELDOUT_USERS)]
test_users = unique_users[(n_users - HELDOUT_USERS):]
train_set = df.loc[df['userId'].isin(train_users)]
val_set = df.loc[df['userId'].isin(val_users)]
test_set = df.loc[df['userId'].isin(test_users)]
unique_train_items = pd.unique(train_set['movieId'])
val_set = val_set.loc[val_set['movieId'].isin(unique_train_items)]
test_set = test_set.loc[test_set['movieId'].isin(unique_train_items)]
# Instantiate the sparse matrix generation for train, validation and test sets
# use list of unique items from training set for all sets
am_train = build_features.AffinityMatrix(df=train_set, items_list=unique_train_items)
am_val = build_features.AffinityMatrix(df=val_set, items_list=unique_train_items)
am_test = build_features.AffinityMatrix(df=test_set, items_list=unique_train_items)
# Obtain the sparse matrix for train, validation and test sets
train_data, _, _ = am_train.gen_affinity_matrix()
val_data, val_map_users, val_map_items = am_val.gen_affinity_matrix()
test_data, test_map_users, test_map_items = am_test.gen_affinity_matrix()
# Split validation and test data into training and testing parts
val_data_tr, val_data_te = numpy_stratified_split(val_data, ratio=0.75, seed=123)
test_data_tr, test_data_te = numpy_stratified_split(test_data, ratio=0.75, seed=123)
# Binarize train, validation and test data
train_data = np.where(train_data > 3.5, 1.0, 0.0)
val_data = np.where(val_data > 3.5, 1.0, 0.0)
test_data = np.where(test_data > 3.5, 1.0, 0.0)
# Binarize validation data
val_data_tr = np.where(val_data_tr > 3.5, 1.0, 0.0)
val_data_te_ratings = val_data_te.copy()
val_data_te = np.where(val_data_te > 3.5, 1.0, 0.0)
# Binarize test data: training part
test_data_tr = np.where(test_data_tr > 3.5, 1.0, 0.0)
# Binarize test data: testing part (save non-binary version in the separate object, will be used for calculating NDCG)
test_data_te_ratings = test_data_te.copy()
test_data_te = np.where(test_data_te > 3.5, 1.0, 0.0)
# retrieve real ratings from initial dataset
test_data_te_ratings=pd.DataFrame(test_data_te_ratings)
val_data_te_ratings=pd.DataFrame(val_data_te_ratings)
for index,i in df_low_rating.iterrows():
user_old= i['userId'] # old value
item_old=i['movieId'] # old value
if (test_map_users.get(user_old) is not None) and (test_map_items.get(item_old) is not None) :
user_new=test_map_users.get(user_old) # new value
item_new=test_map_items.get(item_old) # new value
rating=i['rating']
test_data_te_ratings.at[user_new,item_new]= rating
if (val_map_users.get(user_old) is not None) and (val_map_items.get(item_old) is not None) :
user_new=val_map_users.get(user_old) # new value
item_new=val_map_items.get(item_old) # new value
rating=i['rating']
val_data_te_ratings.at[user_new,item_new]= rating
val_data_te_ratings=val_data_te_ratings.to_numpy()
test_data_te_ratings=test_data_te_ratings.to_numpy()
# In[27]:
disable_eager_execution()
svae_model = SVAE.StandardVAE(n_users=train_data.shape[0],
original_dim=train_data.shape[1],
intermediate_dim=200,
latent_dim=64,
n_epochs=400,
batch_size=100,
k=10,
verbose=0,
seed=123,
drop_encoder=0.5,
drop_decoder=0.5,
annealing=False,
beta=1.0
)
svae_model.fit(x_train=train_data,
x_valid=val_data,
x_val_tr=val_data_tr,
x_val_te=val_data_te_ratings,
mapper=am_val
)
# ### Recommend with SVAE
# In[28]:
# Model prediction on the training part of test set
top_k = svae_model.recommend_k_items(x=test_data_tr,k=10,remove_seen=True)
# Convert sparse matrix back to df
recommendations = am_test.map_back_sparse(top_k, kind='prediction')
test_df = am_test.map_back_sparse(test_data_te_ratings, kind='ratings') # use test_data_te_, with the original ratings
# Create column with the predicted movie's rank for each user
top_k = recommendations.copy()
top_k['rank'] = recommendations.groupby('userId', sort=False).cumcount() + 1 # For each user, only include movies recommendations that are also in the test set
recs.append(top_k)
# ## Singular Value Decomposition (SVD)
# ### SVD++
# In[29]:
surprise_train = surprise.Dataset.load_from_df(train.drop('timestamp', axis=1), reader=surprise.Reader('ml-100k')).build_full_trainset()
svdpp = surprise.SVDpp(random_state=0, n_factors=64, n_epochs=10, verbose=True)
svdpp.fit(surprise_train)
# ### SVD
# In[30]:
svd = surprise.SVD(random_state=0, n_factors=64, n_epochs=10, verbose=True)
svd.fit(surprise_train)
# ### Recommend with SVD++ and SVD
# In[31]:
for model in [svdpp, svd]:
predictions = []
users = train['userId'].unique()
items = train['movieId'].unique()
for user in users:
for item in items:
predictions.append([user, item, model.predict(user, item).est])
predictions = pd.DataFrame(predictions, columns=['userId', 'movieId', 'prediction'])
# Remove movies already seen by users
# Create column of all 1s
temp = train[['userId', 'movieId']].copy()
temp['seen'] = 1
# Outer join and remove movies that have alread been seen (seen=1)
merged = pd.merge(temp, predictions, on=['userId', 'movieId'], how="outer")
merged = merged[merged['seen'].isnull()].drop('seen', axis=1)
# Create filter for users that appear in both the train and test set
common_users = set(test['userId']).intersection(set(predictions['userId']))
# Filter the test and predictions so they have the same users between them
test_common = test[test['userId'].isin(common_users)]
svd_pred_common = merged[merged['userId'].isin(common_users)]
if len(set(merged['userId'])) != len(set(test['userId'])):
print('Number of users in train and test are NOT equal')
print(f"# of users in train and test respectively: {len(set(merged['userId']))}, {len(set(test['userId']))}")
print(f"# of users in BOTH train and test: {len(set(svd_pred_common['userId']))}")
continue
# From the predictions, we want only the top k for each user,
# not all the recommendations.
# Extract the top k recommendations from the predictions
top_movies = svd_pred_common.groupby('userId', as_index=False).apply(lambda x: x.nlargest(10, 'prediction')).reset_index(drop=True)
top_movies['rank'] = top_movies.groupby('userId', sort=False).cumcount() + 1
top_k = top_movies.copy()
top_k['rank'] = top_movies.groupby('userId', sort=False).cumcount() + 1 # For each user, only include movies recommendations that are also in the test set
recs.append(top_k)
# # Compare performance
# Looking at all 5 of our models, we can see that the state-of-the-art model LightGCN vastly outperforms all other models. When compared to SVD++, a widely used algorithm during the Netflix Prize competition, LightGCN achieves an increase in **Percision@k by 29%, Recall@k by 18%, MAP by 12%, and NDCG by 35%**.
#
# NGCF is the older sister model to LightGCN, but only by a single year. We can see how LightGCN improves in ranking metrics compared to NGCF by simply removing unnecessary operations.
#
# In conclusion, this demonstrates how far recommendation systems have advanced since 2009, and how new model architectures with notable performance increases can be developed in the span of just 1-2 years.
# In[32]:
model_names = ['LightGCN', 'NGCF', 'SVAE', 'SVD++', 'SVD']
comparison = pd.DataFrame(columns=['Algorithm', 'Precision@k', 'Recall@k', 'MAP', 'NDCG'])
# Convert test user ids to the new ids
users = np.array([user2id[x] for x in test['userId'].unique()])
for rec, name in zip(recs, model_names):
tester = test_df if name == 'SVAE' else test
pak = metrics.precision_at_k(rec, tester, 'userId', 'movieId', 'rank')
rak = metrics.recall_at_k(rec, tester, 'userId', 'movieId', 'rank')
map = metrics.mean_average_precision(rec, tester, 'userId', 'movieId', 'rank')
ndcg = metrics.ndcg(rec, tester, 'userId', 'movieId', 'rank')
comparison.loc[len(comparison)] = [name, pak, rak, map, ndcg]
# In[33]:
comparison
# # References:
#
# 1. <NAME>, <NAME>, <NAME>, <NAME>, <NAME> & <NAME>, LightGCN: Simplifying and Powering Graph Convolution Network for Recommendation, 2020, https://arxiv.org/abs/2002.02126
# 2. <NAME>, <NAME>, <NAME>, <NAME>, & <NAME>, Neural Graph Collaorative Filtering, 2019, https://arxiv.org/abs/1905.08108
# 3. Microsoft SVAE implementation: https://github.com/microsoft/recommenders/blob/main/examples/02_model_collaborative_filtering/standard_vae_deep_dive.ipynb
# 4. <NAME>, Netflix Prize and SVD, 2014, https://www.semanticscholar.org/paper/Netflix-Prize-and-SVD-Gower/ce7b81b46939d7852dbb30538a7796e69fdd407c
#
| 2.171875 | 2 |
exhal/daemon.py | nficano/exhal | 0 | 12792841 | import atexit
import os
import sys
import time
from contextlib import suppress
from signal import SIGTERM
class Daemon:
def __init__(self, pidfile=None):
self.pidfile = pidfile or os.path.join("/var/run/exhal.service")
def start(self):
try:
self.get_pidfile()
except IOError:
pass
finally:
self.daemonize()
self.run()
def stop(self):
try:
pid = self.get_pidfile()
except IOError:
return
try:
while 1:
os.kill(pid, SIGTERM)
time.sleep(0.1)
except OSError as err:
e = str(err.args)
if e.find("No such process") > 0:
self.delete_pidfile()
else:
sys.exit(1)
def daemonize(self):
self.fork()
os.chdir("/")
os.setsid()
os.umask(0)
self.fork()
atexit.register(self.delete_pidfile)
self.create_pidfile()
def fork(self):
try:
if os.fork() > 0:
sys.exit(0)
except OSError as err:
self.error(f"failed to fork a child process. Reason: {err}\n")
def delete_pidfile(self):
with suppress(FileNotFoundError):
os.remove(self.pidfile)
def create_pidfile(self):
with open(self.pidfile, "w+") as fh:
fh.write(str(os.getpid()) + "\n")
def get_pidfile(self):
with open(self.pidfile, "r") as fh:
return int(fh.read().strip())
def error(self, message):
sys.stderr.write(f"{message}\n")
sys.exit(1)
def restart(self):
self.stop()
self.start()
def run(self):
raise NotImplementedError
| 2.375 | 2 |
ctpn/export_image.py | kspook/text-detection-ctpn01 | 0 | 12792842 | <reponame>kspook/text-detection-ctpn01
from __future__ import print_function
import tensorflow as tf
import numpy as np
import os, sys, cv2
from tensorflow.python.platform import gfile
import glob
import shutil
dir_path = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(dir_path, '..'))
from lib.networks.factory import get_network
from lib.fast_rcnn.config import cfg, cfg_from_file
from lib.fast_rcnn.test import test_ctpn
from lib.utils.timer import Timer
from lib.text_connector.detectors import TextDetector
from lib.text_connector.text_connect_cfg import Config as TextLineCfg
from lib.fast_rcnn.test import _get_blobs
from lib.rpn_msr.proposal_layer_tf import proposal_layer
dir_path = os.path.dirname(os.path.realpath(__file__))
def resize_im(im, scale, max_scale=None):
f = float(scale) / min(im.shape[0], im.shape[1])
if max_scale != None and f * max(im.shape[0], im.shape[1]) > max_scale:
f = float(max_scale) / max(im.shape[0], im.shape[1])
return cv2.resize(im, None, None, fx=f, fy=f, interpolation=cv2.INTER_LINEAR), f
def preprocess_image(image_buffer):
"""Preprocess JPEG encoded bytes to 3D float Tensor."""
# Decode the string as an RGB JPEG.
# Note that the resulting image contains an unknown height and width
# that is set dynamically by decode_jpeg. In other words, the height
# and width of image is unknown at compile-time.
image = tf.image.decode_image(image_buffer, channels=3)
image.set_shape([256, 256, 256,3])
# self.img_pl = tf.placeholder(tf.string, name='input_image_as_bytes')
# After this point, all image pixels reside in [0,1)
# until the very end, when they're rescaled to (-1, 1). The various
# adjust_* ops all require this range for dtype float.
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
# Crop the central region of the image with an area containing 87.5% of
# the original image.
image = tf.image.central_crop(image, central_fraction=0.875)
image = tf.expand_dims(image, 0)
image = tf.squeeze(image, [0])
# Finally, rescale to [-1,1] instead of [0, 1)
image = tf.subtract(image, 0.5)
image = tf.multiply(image, 2.0)
return image
def query_ctpn(sess, cv2img):
"""Args:
sess: tensorflow session
cfg: CTPN config
img: numpy array image
Returns:
A list of detected bounding boxes,
each bounding box have followed coordinates: [(xmin, ymin), (xmax, ymax)]
(xmin, ymin) -------------
| |
---------------- (xmax, ymax)
"""
# Specify input/output
input_img = sess.graph.get_tensor_by_name('Placeholder:0')
output_cls_box = sess.graph.get_tensor_by_name('Reshape_2:0')
output_box_pred = sess.graph.get_tensor_by_name('rpn_bbox_pred/Reshape_1:0')
#print('query_pb : img, ', img)
img, scale = resize_im(cv2img, scale=TextLineCfg.SCALE, max_scale=TextLineCfg.MAX_SCALE)
blobs, im_scales = _get_blobs(img, None)
if cfg.TEST.HAS_RPN:
im_blob = blobs['data']
blobs['im_info'] = np.array([[im_blob.shape[1], im_blob.shape[2], im_scales[0]]],
dtype=np.float32)
cls_prob, box_pred = sess.run([output_cls_box, output_box_pred],
feed_dict={input_img: blobs['data']})
#print('cls_prob, ', cls_prob, box_pred )
print('box_pred, ', box_pred )
rois, _ = proposal_layer(cls_prob, box_pred, blobs['im_info'],
'TEST', anchor_scales=cfg.ANCHOR_SCALES)
print('rois, ', rois)
scores = rois[:, 0]
#print('scores, ', scores )
boxes = rois[:, 1:5] / im_scales[0]
#print('boxes=rois, ', boxes )
textdetector = TextDetector()
print('textDetector, ', textdetector )
boxes = textdetector.detect(boxes, scores[:, np.newaxis], img.shape[:2])
print('boxes=textdetector, ', boxes )
# Convert boxes to bouding rectangles
rects = []
for box in boxes:
min_x = min(int(box[0]/scale), int(box[2]/scale), int(box[4]/scale), int(box[6]/scale))
min_y = min(int(box[1]/scale), int(box[3]/scale), int(box[5]/scale), int(box[7]/scale))
max_x = max(int(box[0]/scale), int(box[2]/scale), int(box[4]/scale), int(box[6]/scale))
max_y = max(int(box[1]/scale), int(box[3]/scale), int(box[5]/scale), int(box[7]/scale))
rects.append([(min_x, min_y), (max_x, max_y)])
print('rects.append, ', rects)
return rects
def export():
'''
No 1 Sess outf of 2 : ctpn_sess
'''
cfg_from_file(os.path.join(dir_path, 'text_post.yml'))
config = tf.ConfigProto(allow_soft_placement=True)
ctpn_sess = tf.Session(config=config)
with ctpn_sess.as_default():
with tf.gfile.FastGFile('../data/ctpn.pb', 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
ctpn_sess.graph.as_default()
tf.import_graph_def(graph_def, name='')
ctpn_sess.run(tf.global_variables_initializer())
cv2img = cv2.imread("../data/demo/006.jpg", cv2.IMREAD_COLOR)
result_boxes=query_ctpn(ctpn_sess, cv2img)
print('Creating boxes done')
'''
No 2 Sess outf of 2:sess
'''
with tf.Session() as sess:
with gfile.FastGFile('../data/ctpn.pb', 'rb') as f:
restored_graph_def = tf.GraphDef()
restored_graph_def.ParseFromString(f.read())
tf.import_graph_def(
restored_graph_def,
input_map=None,
return_elements=None,
name=""
)
'''
export_path_base = args.export_model_dir
export_path = os.path.join(tf.compat.as_bytes(export_path_base),
tf.compat.as_bytes(str(args.model_version)))
'''
builder = tf.saved_model.builder.SavedModelBuilder('../exportPo/1')
#print('Exporting trained model to', export_path)
print('Exporting trained model ')
raw_image = tf.placeholder(tf.string, name='tf_box')
feature_configs = {
'image/encoded': tf.FixedLenFeature(
shape=[], dtype=tf.string),
}
tf_example = tf.parse_example(raw_image , feature_configs)
jpegs = tf_example['image/encoded']
image_string = tf.reshape(jpegs, shape=[])
jpeg= preprocess_image(image_string)
print('jpeg,jpeg.shape[]', jpeg, jpeg.shape)
output_tensor_cls_prob,output_tensor_box_pred = tf.import_graph_def\
(tf.get_default_graph().as_graph_def(),
input_map={'Placeholder:0': jpeg},
return_elements=['Reshape_2:0','rpn_bbox_pred/Reshape_1:0'])
tensor_info_input = tf.saved_model.utils.build_tensor_info(raw_image)
tensor_info_output_cls_prob = tf.saved_model.utils.build_tensor_info(output_tensor_cls_prob)
tensor_info_output_box_pred = tf.saved_model.utils.build_tensor_info(output_tensor_box_pred)
'''
#crop_resize_img,crop_resize_im_info = resize_im(cv2img, result_boxes)
#crop_resize_img,crop_resize_im_info = crop_resize_image(imageplaceholder_info, result_boxes)
# output_crop_resize_img = tf.saved_model.utils.build_tensor_info(crop_resize_img)
#output_crop_resize_img_info = tf.saved_model.utils.build_tensor_info(crop_resize_im_info)
#----------
'''
result_boxes= np.array(result_boxes, dtype=np.float32)
result_boxes= tf.convert_to_tensor(result_boxes)
tensor_info_output_boxes = tf.saved_model.utils.build_tensor_info(result_boxes)
prediction_post_signature = (
tf.saved_model.signature_def_utils.build_signature_def(
inputs={'images': tensor_info_input},
outputs={'detection_boxes': tensor_info_output_boxes},
#outputs={'detection_boxes': tensor_info_output_boxes,
# 'resize_im_info':im_info_output,
# 'crop_resize_img': output_crop_resize_img,
# 'crop_resize_im_info': output_crop_resize_img_info,},
method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME
))
builder.add_meta_graph_and_variables(
sess, [tf.saved_model.tag_constants.SERVING],
signature_def_map={
# 'predict_images':prediction_signature,
'predict_images_post': prediction_post_signature
})
builder.save(as_text=False)
if __name__ == '__main__':
export()
| 2.609375 | 3 |
HDPython/hdl_converter.py | HardwareDesignWithPython/HDPython | 0 | 12792843 | <reponame>HardwareDesignWithPython/HDPython
def get_dependency_objects(obj, dep_list):
return obj.__hdl_converter__.get_dependency_objects(obj, dep_list)
def ops2str(obj, ops):
return obj.__hdl_converter__.ops2str(ops)
def get_MemfunctionCalls(obj):
return obj.__hdl_converter__.get_MemfunctionCalls(obj)
def FlagFor_TemplateMissing(obj):
obj.__hdl_converter__.FlagFor_TemplateMissing(obj)
def reset_TemplateMissing(obj):
obj.__hdl_converter__.reset_TemplateMissing(obj)
def isTemplateMissing(obj):
return obj.__hdl_converter__.isTemplateMissing(obj)
def IsSucessfullConverted(obj):
return obj.__hdl_converter__.IsSucessfullConverted(obj)
def convert_all_packages(obj, ouputFolder, x, FilesDone):
return obj.__hdl_converter__.convert_all_packages(obj, ouputFolder, x, FilesDone)
def convert_all_entities(obj, ouputFolder, x, FilesDone):
return obj.__hdl_converter__.convert_all_entities(obj, ouputFolder, x, FilesDone)
def convert_all_impl(obj, ouputFolder, FilesDone):
return obj.__hdl_converter__.convert_all_impl(obj, ouputFolder, FilesDone)
def convert_all(obj, ouputFolder):
return obj.__hdl_converter__.convert_all(obj, ouputFolder)
def get_primary_object(obj):
return obj.__hdl_converter__.get_primary_object(obj)
def get_packet_file_name(obj):
return obj.__hdl_converter__.get_packet_file_name(obj)
def get_packet_file_content(obj):
return obj.__hdl_converter__.get_packet_file_content(obj)
def get_enity_file_content(obj):
return obj.__hdl_converter__.get_enity_file_content(obj)
def get_entity_file_name(obj):
return obj.__hdl_converter__.get_entity_file_name(obj)
def get_type_simple(obj):
return obj.__hdl_converter__.get_type_simple(obj)
def get_type_simple_template(obj):
return obj.__hdl_converter__.get_type_simple_template(obj)
def impl_constructor(obj):
return obj.__hdl_converter__.impl_constructor(obj)
def parse_file(obj):
return obj.__hdl_converter__.parse_file(obj)
def impl_includes(obj, name, parent):
return obj.__hdl_converter__.impl_includes(obj, name, parent)
def def_includes(obj, name, parent):
return obj.__hdl_converter__.def_includes(obj, name, parent)
def def_record_Member(obj, name, parent, Inout=None):
return obj.__hdl_converter__.def_record_Member(obj, name, parent, Inout)
def def_record_Member_Default(obj, name, parent, Inout=None):
return obj.__hdl_converter__.def_record_Member_Default(obj, name, parent, Inout)
def def_packet_header(obj, name, parent):
return obj.__hdl_converter__.def_packet_header(obj, name, parent)
def def_packet_body(obj, name, parent):
return obj.__hdl_converter__.def_packet_body(obj, name, parent)
def impl_entity_port(obj, name):
return obj.__hdl_converter__.impl_entity_port(obj, name)
def impl_function_argument(obj, func_arg, arg):
return obj.__hdl_converter__.impl_function_argument(obj, func_arg, arg)
def impl_get_attribute(obj, attName,parent = None):
return obj.__hdl_converter__.impl_get_attribute(obj, attName, parent)
def impl_slice(obj, sl, astParser=None):
return obj.__hdl_converter__.impl_slice(obj, sl, astParser)
def impl_compare(obj, ops, rhs, astParser=None):
return obj.__hdl_converter__.impl_compare(obj, ops, rhs, astParser)
def impl_add(obj, args):
return obj.__hdl_converter__.impl_add(obj, args)
def impl_sub(obj, args):
return obj.__hdl_converter__.impl_sub(obj, args)
def impl_to_bool(obj, astParser):
return obj.__hdl_converter__.impl_to_bool(obj, astParser)
def impl_bit_and(obj, rhs, astParser):
return obj.__hdl_converter__.impl_bit_and(obj, rhs, astParser)
def function_name_modifier(obj, name, varSigSuffix):
return obj.__hdl_converter__.function_name_modifier(obj, name, varSigSuffix)
def impl_get_value(obj, ReturnToObj=None, astParser=None):
return obj.__hdl_converter__.impl_get_value(obj, ReturnToObj, astParser)
def impl_reasign_type(obj):
return obj.__hdl_converter__.impl_reasign_type(obj)
def impl_reasign(obj, rhs, astParser=None, context_str=None):
return obj.__hdl_converter__.impl_reasign(obj, rhs, astParser, context_str)
def impl_reasign_rshift_(obj, rhs, astParser=None, context_str=None):
return obj.__hdl_converter__.impl_reasign_rshift_(obj, rhs, astParser, context_str)
def get_call_member_function(obj, name, args):
return obj.__hdl_converter__.get_call_member_function(obj, name, args)
def impl_function_call(obj, name, args, astParser=None):
return obj.__hdl_converter__.impl_function_call(obj=obj, name=name, args=args, astParser=astParser)
def impl_symbol_instantiation(obj, VarSymb="variable"):
return obj.__hdl_converter__.impl_symbol_instantiation(obj, VarSymb)
def impl_architecture_header(obj):
prepare_for_conversion(obj)
return obj.__hdl_converter__.impl_architecture_header(obj)
def impl_architecture_body(obj):
return obj.__hdl_converter__.impl_architecture_body(obj)
def impl_add(obj,args):
return obj.__hdl_converter__.impl_add(obj, args)
def impl_sub(obj,args):
return obj.__hdl_converter__.impl_sub(obj, args)
def impl_multi(obj,args):
return obj.__hdl_converter__.impl_multi(obj, args)
def def_entity_port(obj):
prepare_for_conversion(obj)
return obj.__hdl_converter__.def_entity_port(obj)
def impl_process_header(obj):
return obj.__hdl_converter__.impl_process_header(obj)
def impl_process_sensitivity_list(obj):
return obj.__hdl_converter__.impl_process_sensitivity_list(obj)
def impl_process_pull(obj,clk):
return obj.__hdl_converter__.impl_process_pull(obj,clk)
def impl_process_push(obj,clk):
return obj.__hdl_converter__.impl_process_push(obj,clk)
def impl_enter_rising_edge(obj):
return obj.__hdl_converter__.impl_enter_rising_edge(obj)
def impl_exit_rising_edge(obj):
return obj.__hdl_converter__.impl_exit_rising_edge(obj)
def get_assiment_op(obj):
return obj.__hdl_converter__.get_assiment_op(obj)
def get_Inout(obj,parent):
return obj.__hdl_converter__.get_Inout(obj,parent)
def InOut_t2str2(obj, inOut):
return obj.__hdl_converter__.InOut_t2str2(inOut)
def InOut_t2str(obj):
return obj.__hdl_converter__.InOut_t2str(obj)
def get_default_value(obj):
return obj.__hdl_converter__.get_default_value(obj)
def extract_conversion_types(obj, exclude_class_type=None, filter_inout=None):
return obj.__hdl_converter__.extract_conversion_types(obj, exclude_class_type, filter_inout)
def get_Name_array(obj):
return obj.__hdl_converter__.get_Name_array(obj)
def length(obj):
return obj.__hdl_converter__.length(obj)
def to_arglist(obj, name, parent, withDefault=False, astParser=None):
return obj.__hdl_converter__.to_arglist(obj, name, parent, withDefault, astParser)
def get_inout_type_recursive(obj):
return obj.__hdl_converter__.get_inout_type_recursive(obj)
def Has_pushpull_function(obj, pushpull):
return obj.__hdl_converter__.Has_pushpull_function(obj, pushpull)
def get_free_symbols(obj, name, parent_list=[]):
return obj.__hdl_converter__.get_free_symbols(obj,name, parent_list)
def get_component_suffix(obj, Inout_type, varsignal_type):
return obj.__hdl_converter__.get_component_suffix(obj, Inout_type, varsignal_type)
def prepare_for_conversion(obj):
return obj.__hdl_converter__.prepare_for_conversion(obj)
def get_HDL_name(obj, parent,suffix):
return obj.__hdl_converter__.get_HDL_name(obj,parent,suffix)
def impl_get_init_values(obj,parent=None, InOut_Filter=None, VaribleSignalFilter = None,ForceExpand=False):
return obj.__hdl_converter__.impl_get_init_values(obj, parent, InOut_Filter, VaribleSignalFilter ,ForceExpand)
def get_extractedTypes(obj):
primary = get_primary_object(obj)
prepare_for_conversion(primary)
return primary.__hdl_converter__.extractedTypes
| 1.945313 | 2 |
Yatube/hw04_tests/posts/migrations/0010_auto_20201103_2339.py | abi83/YaPractice | 3 | 12792844 | <reponame>abi83/YaPractice
# Generated by Django 2.2.6 on 2020-11-03 16:39
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('posts', '0009_auto_20201023_1643'),
]
operations = [
migrations.AlterField(
model_name='post',
name='title',
field=models.CharField(help_text='Назовите пост', max_length=200, verbose_name='Название поста'),
),
]
| 1.429688 | 1 |
{{cookiecutter.project_name}}/template_minimal/app/schemas/requests.py | rafsaf/respo-fastapi-template | 75 | 12792845 | <reponame>rafsaf/respo-fastapi-template<gh_stars>10-100
from pydantic import BaseModel, EmailStr
class BaseRequest(BaseModel):
# may define additional fields or config shared across requests
pass
class RefreshTokenRequest(BaseRequest):
refresh_token: str
class UserUpdatePasswordRequest(BaseRequest):
password: str
class UserCreateRequest(BaseRequest):
email: EmailStr
password: str
| 2.09375 | 2 |
pyrtmp/__init__.py | mludolph/pyrtmp | 9 | 12792846 | import asyncio
import os
from asyncio import StreamReader, StreamWriter, AbstractEventLoop, \
WriteTransport, Task, BaseTransport
from io import BytesIO
from typing import Any, List, Optional, Mapping
from bitstring import tokenparser, BitStream
def random_byte_array(size: int) -> bytes:
return os.urandom(size)
class StreamClosedException(Exception):
pass
class FIFOStream:
def __init__(self, reader: StreamReader) -> None:
self.reader = reader
self.buffer = BitStream()
self.total_bytes = 0
super().__init__()
async def read(self, fmt):
_, token = tokenparser(fmt)
assert len(token) == 1
name, length, _ = token[0]
assert length is not None
bit_needed = int(length) - (self.buffer.length - self.buffer.pos)
while bit_needed > 0:
new_data = await self.reader.read(4096)
if len(new_data) == 0:
raise StreamClosedException()
self.buffer.append(new_data)
bit_needed = int(length) - (self.buffer.length - self.buffer.pos)
self.total_bytes += length
value = self.buffer.read(fmt)
del self.buffer[:length]
self.buffer.bitpos = 0
return value
class BufferedWriteTransport(WriteTransport):
def __init__(self, buffer: BytesIO, extra: Optional[Mapping[Any, Any]] = ...) -> None:
self._buffer = buffer
self._closing = False
self._closed = False
super().__init__(extra)
def set_write_buffer_limits(self, high: Optional[int] = ..., low: Optional[int] = ...) -> None:
raise NotImplementedError
def get_write_buffer_size(self) -> int:
raise NotImplementedError
def write(self, data: Any) -> None:
self._buffer.write(data)
def writelines(self, list_of_data: List[Any]) -> None:
raise NotImplementedError
def write_eof(self) -> None:
raise NotImplementedError
def can_write_eof(self) -> bool:
return False
def abort(self) -> None:
raise NotImplementedError
def is_closing(self) -> bool:
return self._closing is True or self._closed is True
def close(self) -> None:
self._closing = True
self._closed = True
class RTMPProtocol(asyncio.Protocol):
def __init__(self, controller, loop: AbstractEventLoop) -> None:
self.loop: AbstractEventLoop = loop
self.transport: BaseTransport = None
self.reader: StreamReader = None
self.writer: StreamWriter = None
self.controller = controller
self.task: Task = None
super().__init__()
def connection_made(self, transport):
self.reader = StreamReader(loop=self.loop)
self.writer = StreamWriter(transport,
self,
self.reader,
self.loop)
self.task = self.loop.create_task(self.controller(self.reader, self.writer))
def connection_lost(self, exc):
self.reader.feed_eof()
def data_received(self, data):
self.reader.feed_data(data)
async def _drain_helper(self):
pass
async def _get_close_waiter(self, stream: StreamWriter):
return self.task
| 2.515625 | 3 |
transposition_cyphers/onetimepad.py | MahatKC/Daniao | 2 | 12792847 | def modulo_sum(x,y):
return (x+y)%26
def alphabet_position(char):
return ord(char.lower())-97
def character_sum(a,b):
new_char_index = modulo_sum(alphabet_position(a),alphabet_position(b))
return chr(new_char_index+97)
def one_time_pad(plaintext, cypher):
cyphertext = "".join([character_sum(plaintext[i],cypher[i%len(cypher)]) for i in range(len(plaintext))])
return cyphertext
if __name__ == "__main__":
x=one_time_pad("hermes",2)
print(x) | 3.421875 | 3 |
rtcat/get_knpsr.py | mpsurnis/greenburst | 0 | 12792848 | <gh_stars>0
#!/usr/bin/env python3.5
__author__ = '<NAME>'
__email__ = '<EMAIL>'
import argparse
from catalog_utils import gen_catalog
from webcrawler import *
# Generate the catalog using the rtcat function
cat = gen_catalog()
# Some useful functions
def hmstodeg(ra):
# Convert ra from hms to decimal degree
# First convert to decimal hours
dhrs = int(ra[0]) + (int(ra[1])/60.) + (float(ra[2])/3600.)
# Now convert to decimal degree
ddeg = dhrs*15.
return ddeg
def dmstodeg(dec):
# Convert dec from dms to decimal degree
# Generate a flag for positive or negative dec
flag = 1
if (int(dec[0]) < 0):
flag = -1
decdeg = abs(int(dec[0]))
ddec = decdeg + (int(dec[1])/60.) + (float(dec[2])/3600.)
ddec = flag*ddec
return ddec
if __name__ == '__main__':
# Parse command line arguments
parser = argparse.ArgumentParser(description='Provide information about any known pulsar, RRAT or FRB in the beam')
parser.add_argument('-ra', dest = 'raq', action = 'store', metavar = 'RAJ', required=True, help='RAJ in decimal degree')
parser.add_argument('-dec', dest = 'decq', action = 'store', metavar = 'DECJ', required=True, help='DECJ in decimal degree')
args = parser.parse_args()
# Compare with catalog entries and print out relevant information
length = len(cat["entries"])
outarr = []
for i in range(length):
catra = cat["entries"][i]["RA"]
raobj = catra.split(":",3)
catdec = cat["entries"][i]["DEC"]
decobj = catdec.split(":",3)
#check if there are blanks and fill them with zeros
if (len(raobj) < 3):
if (len(raobj) == 2):
raobj.append("0.0")
else:
raobj.append("0")
raobj.append("0.0")
if (len(decobj) < 3):
if (len(decobj) == 2):
decobj.append("0.0")
else:
decobj.append("0")
decobj.append("0.0")
#print("Index is:",i)
#if i==343:
# print(cat["entries"][i]["sources"][0]["data"])
# Now calculate separation in arcmin and put out values if sep < 10'
racand = float(args.raq)
deccand = float(args.decq)
racat = hmstodeg(raobj)
deccat = dmstodeg(decobj)
dlra = abs(racand - racat)
dldec = abs(deccand - deccat)
sep = ((dlra**2 + dldec**2)**0.5)*60.
if (sep < 10.):
#Print DM if not NRAD and 0.0 otherwise
try:
if cat["entries"][i]["sources"][0]["data"]["TYPE"]:
print('%.2f'%sep,cat["entries"][i]["Name"],cat["entries"][i]["RA"],cat["entries"][i]["DEC"],"0.0",cat["entries"][i]["sources"][0]["Name"])
except:
pass
if cat["entries"][i]["sources"][0]["Name"] == "ATNF":
print('%.2f'%sep,cat["entries"][i]["Name"],cat["entries"][i]["RA"],cat["entries"][i]["DEC"],cat["entries"][i]["sources"][0]["data"]["DM"][0],cat["entries"][i]["sources"][0]["Name"])
else:
print('%.2f'%sep,cat["entries"][i]["Name"],cat["entries"][i]["RA"],cat["entries"][i]["DEC"],cat["entries"][i]["sources"][0]["data"]["DM"],cat["entries"][i]["sources"][0]["Name"])
#outarr.append(cat["entries"][i]["sources"][0]["data"])
#print(outarr)
| 2.765625 | 3 |
app/utils/poster.py | Xerrors/Meco-Server | 1 | 12792849 | import os
import json
from app.config import DATA_PATH
from app.utils.mass import string_to_md5
"""
cover:
link:
text:
type:
top:
"""
def get_posters():
with open(os.path.join(DATA_PATH, 'poster.json'), 'r') as f:
data = json.load(f)
return data['data']
def save_poster(data):
with open(os.path.join(DATA_PATH, 'poster.json'), 'w') as f:
json.dump({'data': data}, f)
def add_poster(post:dict):
data = get_posters()
post['id'] = string_to_md5(post['link'])
if post['top']:
for i in data:
i['top'] = False
# 否在在原数据上面追加
data.append(post)
save_poster(data)
return "添加成功"
def set_as_top(_id):
data = get_posters()
for i in data:
if i['id'] == _id:
i['top'] = True
else:
i['top'] = False
save_poster(data)
def delete_poster(_id):
data = get_posters()
if len(data) == 1:
return data
for i in range(len(data)):
if data[i]['id'] == _id:
del data[i]
break
save_poster(data)
return data
| 2.671875 | 3 |
stable_world/output/error_output.py | StableWorld/stable.world | 0 | 12792850 | from __future__ import print_function, unicode_literals
import sys
import os
import traceback
import time
from requests.utils import quote
from requests.exceptions import ConnectionError
import click
from stable_world.py_helpers import platform_uname
from stable_world import __version__ as version
from stable_world import errors
original_excepthook = sys.excepthook
from stable_world.py_helpers import PY3
if PY3:
unicode = str
def write_error_log(cache_dirname, exctype, value, tb):
'''
Write the exception to a the log file
'''
logfile = os.path.join(cache_dirname, 'logs', 'debug.txt')
try:
with open(logfile, 'w') as fd:
uname = platform_uname()
header = '[Unhandled Exception at {}] system={}, stable.world version: {}'
print(header.format(time.ctime(), uname.system, version), file=fd)
tb = '\n'.join(traceback.format_exception(exctype, value, tb))
print(tb, file=fd)
click.echo('\n Wrote full traceback to "{}"\n'.format(logfile), err=True)
except Exception:
click.echo("Failed to write logfile", err=True)
original_excepthook(exctype, value, tb)
def brief_excepthook(cache_dirname):
"""
Shorten exeptions with the base class errors.UserError
"""
def inner(exctype, value, tb):
if issubclass(exctype, errors.BRIEF_ERRORS):
click.secho("\n\n {}: ".format(exctype.__name__), nl=False, fg='red', bold=True, err=True)
click.echo(unicode(value), err=True)
click.echo(err=True)
elif issubclass(exctype, ConnectionError):
click.secho("\n\n {}: ".format(exctype.__name__), nl=False, fg='red', bold=True, err=True)
click.echo('Could not connect to url "{}"'.format(value.request.url), err=True)
click.echo(err=True)
else:
msg = "\n\n Critical! Unhandled Exception\n {}: ".format(exctype.__name__)
click.secho(msg, nl=False, fg='red', bold=True, err=True)
click.echo(unicode(value), err=True)
click.echo(err=True)
click.echo('\n Check for updates on this exception on the issue tracker:')
search_str = quote('is:issue {} "{}"'.format(exctype.__name__, value))
click.echo(' ', nl=False)
click.secho(
'https://github.com/srossross/stable.world/issues?q={}\n'.format(search_str),
fg='blue', underline=True, err=True
)
click.echo(' Or create a new issue:', err=True)
click.echo(' ', nl=False, err=True)
click.secho(
'https://github.com/srossross/stable.world/issues/new',
fg='blue', underline=True, err=True
)
write_error_log(cache_dirname, exctype, value, tb)
return
return inner
| 2.375 | 2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.