max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
server.py | kaseykwong/bme590final | 0 | 12796251 | <gh_stars>0
from flask import Flask, request
from pymodm import connect
from pymodm import MongoModel, fields, errors
import pymodm
import requests
from datetime import datetime
import logging
connect("mongodb://almostdone:<EMAIL>:45148/bme590final")
app = Flask(__name__)
class HeadData(MongoModel):
season = fields.IntegerField()
pin_number = fields.CharField()
date_measured = fields.CharField()
encoded_binary = fields.CharField()
time = fields.CharField()
@app.route("/api/download", methods=["POST"])
def download():
"""
Function downloads individual BIN files to server
Returns:
msg: status message of file download
"""
set_logging()
files = request.get_json()
if check_input(files) is True:
if check_exist(files) is False:
pin = files['Pin']
szn = files['Year']
date = files['Date']
data = files['Encoded .BIN file']
t = files['Time']
hd = HeadData(szn, pin, date, data, t)
hd.save()
file_info = {'pin_number': pin,
'Year': szn,
'Date': date,
'Time': t
}
logging.info(file_info)
msg = "Data saved"
print(msg)
return msg
else:
msg = "Data file already exists"
print(msg)
return "Data already exists"
else:
msg = "Data input not correct"
print(msg)
return "Data input not correct"
def download_all(files):
"""
Client Function downloads all .BIN folders in given
Pandas dataframe
Args:
files: datafram of all files
Returns:
result: boolean to determine if all files completed
"""
# count = 0
for row in files.itertuples():
pin = getattr(row, "Pin")
date = getattr(row, "Date")
time = getattr(row, "Time")
year = getattr(row, "Year")
data = getattr(row, "_5")
file_info = {"Pin": pin,
"Date": date,
"Time": time,
"Year": year,
"Encoded .BIN file": data
}
# file_check = {'Pin': pin,
# 'Date': date,
# 'Time': time
# }
# if check_input(file_info) is True:
# if check_exist(file_check) is False:
# count = count + 1
# else:
# count = count
r = requests.post("http://127.0.0.1:5000/api/download", json=file_info)
# print(count)
# set_logging()
# logging.info(str(count)+" new files.")
return True
def check_input(file_info):
"""
Function finds all .BIN folders in given path
Args:
file_info: dictionary with individual file information
Pin, Date, Time, Year, Encoded .BIN file
Returns:
result: boolean to determine if file inputs were appropriate type
"""
pin = file_info['Pin']
date = file_info['Date']
time = file_info['Time']
year = file_info['Year']
msg = "Data is ok"
try:
int(pin)
except KeyError:
msg = "No pin number attached."
print(msg)
return False
except ValueError:
msg = "Pin number is not a number"
print(msg)
return False
try:
datetime.strptime(date, "%m-%d-%Y")
except ValueError:
msg = "Date entry is not Month-Day-Year"
print(msg)
return False
try:
datetime.strptime(time, "%H:%M:%S")
except ValueError:
msg = "Time entry is not Hour:Minute:Second"
print(msg)
return False
try:
datetime.strptime(str(year), "%Y")
except ValueError:
msg = "Season entry is not Year"
print(msg)
return False
print(msg)
return True
def check_exist(file_info):
"""
Function checks if file already exists in database
Args:
file_info: file info dictionary
Pin, Date, Time, Year, Encoded .BIN file
Returns:
result: boolean to determine if file was found.
True is file was found. False if not.
"""
pin = file_info['Pin']
date = file_info['Date']
time_in = file_info['Time']
try:
# HeadData.objects.raw({"date_measured": date,
# "time": time_in,
# "pin_number": str(pin)}).first()
HeadData.objects.raw({"pin_number": str(pin),
"date_measured": date,
"time": time_in}).first()
except pymodm.errors.DoesNotExist:
return False
return True
def create_new(file_info):
"""
Non flask function to create new database file
not necessary for server to run
Args:
file_info: file info dictionary
Pin, Date, Time, Year, Encoded .BIN file
Returns:
result: hd, HeadData object created
"""
set_logging()
pin = file_info['Pin']
szn = file_info['Year']
date = file_info['Date']
data = file_info['Encoded .BIN file']
t = file_info['Time']
hd = HeadData(szn, pin, date, data, t)
hd.save()
logging.info(file_info)
return hd
def set_logging():
app.logger.disabled = True
log = logging.getLogger('werkzeug')
log.disabled = True
logging.basicConfig(filename='data_server.txt',
format='%(asctime)s %(message)s',
datefmt='%m/%d/%Y %I:%M:%S %p',
level=logging.DEBUG)
return
if __name__ == "__main__":
app.run(host="127.0.0.1", port=5000)
| 2.734375 | 3 |
tests/test_connection.py | accuenmedia/amazonadapi | 0 | 12796252 | <reponame>accuenmedia/amazonadapi
import unittest
import config
from amazonadapi.connection import Connection
class ConnectionTestCase(unittest.TestCase):
def test_connection(self):
connection = Connection(config.ad_client_id, config.ad_client_secret, config.region, config.refresh_token)
token = connection.auto_refresh_token()
print(token)
self.assertIsNotNone(token["access_token"])
self.assertIsNotNone(connection.token)
| 2.578125 | 3 |
GasGrid/OntoGasGrid/grid_component_owl_generator/closest_point_locate.py | mdhillmancmcl/TheWorldAvatar-CMCL-Fork | 0 | 12796253 | import pandas as pd
import matplotlib.pyplot as plt
from tqdm import tqdm
import numpy as np
pipelines = pd.read_csv('OntoGasGrid/pipeline_owl_generator/pipeline_split.csv').to_numpy()
offtakes = pd.read_csv('OntoGasGrid/grid_component_owl_generator/grid_component_data.csv').to_numpy()
n_offt = len(offtakes[:,0])
n_cons = len(pipelines[:,0])
closest_connection = np.zeros((n_offt,2),dtype=object)
def connection_name_get(i):
grid_line = pipelines[i,3]
connect_num = pipelines[i,8]
return grid_line + ' ' + str(connect_num) + ' Connection'
for i in tqdm(range(n_offt)):
if offtakes[i,2] != '#VALUE!':
dist_store = []
max_dist = 1000
off_lat = float(offtakes[i,2])
off_lng = float(offtakes[i,1])
for ii in range(n_cons):
con_lat = float(pipelines[ii,0])
con_lng = float(pipelines[ii,1])
dist = np.sqrt((off_lat-con_lat)**2+(off_lng-con_lng)**2)
if dist < max_dist:
closest_connection[i,0] = connection_name_get(ii)
closest_connection[i,1] = pipelines[ii,2]
max_dist = dist
closest_connection = pd.DataFrame(closest_connection).to_csv('OntoGasGrid/grid_component_owl_generator/closest connection.csv',index=False,header=False)
| 2.75 | 3 |
governor/__init__.py | billcap/governor | 0 | 12796254 | import logging
import time
import os
import subprocess as sp
from governor.etcd import Client as Etcd
from governor.postgresql import Postgresql
from governor.ha import Ha
import etcd
class Governor:
INIT_SCRIPT_DIR = '/docker-entrypoint-initdb.d'
def __init__(self, config, psql_config):
self.advertise_url = config.advertise_url
self.loop_time = config.loop_time
self.connect_to_etcd(config)
self.psql = Postgresql(config, psql_config)
self.ha = Ha(self.psql, self.etcd)
self.name = self.psql.name
def run_init_scripts(self):
# run all the scripts /docker-entrypoint-initdb.d/*.sh
if not os.path.isdir(self.INIT_SCRIPT_DIR):
return
for file in os.listdir(self.INIT_SCRIPT_DIR):
file = os.path.join(self.INIT_SCRIPT_DIR, file)
if not file.endswith('.sh') or not os.path.isfile(file):
continue
logging.info('Running init script: %s', file)
if sp.call(['sh', file]) != 0:
logging.warn('Failed to run init script: %s', file)
def connect_to_etcd(self, config):
while True:
logging.info('waiting on etcd')
try:
self.etcd = Etcd(config)
except (ConnectionRefusedError, etcd.EtcdConnectionFailed) as e:
logging.error('Error communicating with etcd: %s', e)
else:
return
time.sleep(5)
def keep_alive(self):
value = self.advertise_url
try:
self.etcd.write_scoped(self.name, value, ttl=self.etcd.ttl, prevValue=value)
except etcd.EtcdKeyNotFound:
self.etcd.write_scoped(self.name, value, ttl=self.etcd.ttl, prevExist=False)
def initialize(self, force_leader=False):
self.keep_alive()
# is data directory empty?
if not self.psql.data_directory_empty():
self.load_psql()
elif not self.init_cluster(force_leader):
self.sync_from_leader()
self.run_init_scripts()
def init_cluster(self, force_leader=False):
try:
self.etcd.init_cluster(self.name)
except etcd.EtcdAlreadyExist:
if not force_leader:
return False
self.psql.initialize()
self.etcd.take_leadership(self.name, first = not force_leader)
self.psql.start()
self.psql.create_users()
return True
def sync_from_leader(self):
while True:
logging.info('resolving leader')
try:
cluster = self.etcd.get_cluster()
except etcd.EtcdKeyNotFound:
cluster = None
if cluster and cluster.leader:
logging.info('syncing with leader')
if self.psql.sync_from_leader(cluster.leader):
self.psql.write_recovery_conf(cluster.leader)
self.psql.start()
return True
time.sleep(5)
def load_psql(self):
self.psql.start()
if self.psql.is_running():
self.psql.load_replication_slots()
def run(self):
while True:
self.keep_alive()
logging.info(self.ha.run_cycle())
self.ha.sync_replication_slots()
time.sleep(self.loop_time)
def cleanup(self):
self.psql.stop()
self.etcd.delete(os.path.join(self.etcd.scope, self.name))
try:
self.etcd.vacate_leadership(self.name)
except (etcd.EtcdCompareFailed, etcd.EtcdKeyNotFound):
pass
| 2.140625 | 2 |
emptytime/timetable/models.py | hyeongonkim/OSS-emptytime | 0 | 12796255 | <filename>emptytime/timetable/models.py
from django.db import models
class User(models.Model):
token_text = models.CharField(max_length=100, null=True)
username_text = models.CharField(max_length=8)
pw_text = models.CharField(max_length=30)
def __str__(self):
return self.username_text | 2.453125 | 2 |
tensortrade/base/__init__.py | bwcknr/tensortrade | 34 | 12796256 | from .clock import Clock
from .component import *
from .context import *
from .core import *
from .exceptions import *
| 1.054688 | 1 |
networkit/test/test_matching_algorithms.py | angriman/network | 366 | 12796257 | #!/usr/bin/env python3
import random
import unittest
import networkit as nk
class TestMatchingAlgorithms(unittest.TestCase):
def generateRandomWeights(self, g):
if not g.isWeighted():
g = nk.graphtools.toWeighted(g)
for e in g.iterEdges():
g.setWeight(e[0], e[1], random.random())
return g
def setUp(self):
self.g = nk.readGraph("input/PGPgiantcompo.graph", nk.Format.METIS)
self.gw = self.generateRandomWeights(self.g)
def hasUnmatchedNeighbors(self, g, m):
for e in g.iterEdges():
if not m.isMatched(e[0]) and not m.isMatched(e[1]):
return True
return False
def testPathGrowingMatcher(self):
def runAlgo(g):
pgm = nk.matching.PathGrowingMatcher(self.g)
pgm.run()
m = pgm.getMatching()
runAlgo(self.g)
runAlgo(self.gw)
def testSuitorMatcher(self):
def doTest(g):
m1 = nk.matching.SuitorMatcher(g, False).run().getMatching()
nk.graphtools.sortEdgesByWeight(g, True)
self.assertTrue(m1.isProper(g))
self.assertFalse(self.hasUnmatchedNeighbors(g, m1))
m2 = nk.matching.SuitorMatcher(g, True).run().getMatching()
self.assertTrue(m2.isProper(g))
self.assertFalse(self.hasUnmatchedNeighbors(g, m2))
for u in g.iterNodes():
self.assertEqual(m1.mate(u), m2.mate(u))
doTest(self.g)
doTest(self.gw)
if __name__ == "__main__":
unittest.main()
| 2.703125 | 3 |
scripts/oauth_login.py | longShot-88/hello_twitter | 0 | 12796258 | <filename>scripts/oauth_login.py<gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import twitter
''' oauth_login into twitter '''
# Docs
# https://dev.twitter.com/oauth
# LogIn
def oauth_login():
# credentials for OAuth
CONSUMER_KEY = ' ---- '
CONSUMER_SECRET = ' ---- '
OAUTH_TOKEN = ' ---- '
OAUTH_TOKEN_SECRET = ' ---- '
# Creating the authentification
auth = twitter.oauth.OAuth( OAUTH_TOKEN, OAUTH_TOKEN_SECRET, CONSUMER_KEY, CONSUMER_SECRET )
# Twitter instance
twitter_api = twitter.Twitter(auth=auth)
return twitter_api
# LogIn
twitter_api = oauth_login()
print twitter_api
| 2.6875 | 3 |
tests/unittests/test_subprocess.py | bossjones/scarlett-os | 5 | 12796259 | <gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_subprocess
----------------------------------
"""
import builtins
import os
import re
import signal
import sys
from _pytest.monkeypatch import MonkeyPatch
import pytest
import scarlett_os
# R0201 = Method could be a function Used when a method doesn't use its bound instance,
# and so could be written as a function.
# pylint: disable=R0201
# pylint: disable=C0111
# source: https://github.com/pytest-dev/pytest/issues/363
@pytest.fixture(scope="session")
def monkeysession(request):
mpatch = MonkeyPatch()
yield mpatch
mpatch.undo()
@pytest.mark.scarlettonly
@pytest.mark.unittest
@pytest.mark.scarlettonlyunittest
class TestScarlettSubprocess(object):
"""Units tests for Scarlett Subprocess, subclass of GObject.Gobject."""
def test_check_pid_os_error(self, mocker):
mocker.stopall()
# mock
kill_mock = mocker.MagicMock(name=__name__ + "_kill_mock_OSError")
kill_mock.side_effect = OSError
# patch
mocker.patch.object(scarlett_os.subprocess.os, "kill", kill_mock)
# When OSError occurs, throw False
assert not scarlett_os.subprocess.check_pid(4353634632623)
# Verify that os.kill only called once
assert kill_mock.call_count == 1
mocker.stopall()
def test_check_pid(self, mocker):
mocker.stopall()
# mock
kill_mock = mocker.MagicMock(name=__name__ + "_kill_mock")
mocker.patch.object(scarlett_os.subprocess.os, "kill", kill_mock)
result = scarlett_os.subprocess.check_pid(123)
assert kill_mock.called
# NOTE: test against signal 0
# sending the signal 0 to a given PID just checks if any
# process with the given PID is running and you have the
# permission to send a signal to it.
kill_mock.assert_called_once_with(123, 0)
assert result is True
mocker.stopall()
def test_subprocess_init(self, mocker):
mocker.stopall()
mock_check_command_type = mocker.MagicMock(
name=__name__ + "_mock_check_command_type"
)
mock_check_command_type.return_value = True
mock_fork = mocker.MagicMock(name=__name__ + "_mock_fork")
mock_logging_debug = mocker.MagicMock(name=__name__ + "_mock_logging_debug")
# mock
mocker.patch.object(
scarlett_os.subprocess.logging.Logger, "debug", mock_logging_debug
)
mocker.patch.object(
scarlett_os.subprocess.Subprocess,
"check_command_type",
mock_check_command_type,
)
mocker.patch.object(scarlett_os.subprocess.Subprocess, "fork", mock_fork)
# NOTE: On purpose this is an invalid cmd. Should be of type array
test_command = ["who"]
test_name = "test_who"
test_fork = False
s_test = scarlett_os.subprocess.Subprocess(
test_command, name=test_name, fork=test_fork
)
# action
assert s_test.check_command_type(test_command) is True
mock_check_command_type.assert_called_with(["who"])
assert not s_test.process
assert not s_test.pid
assert s_test.name == "test_who"
assert not s_test.forked
assert s_test.stdout is True
assert s_test.stderr is True
mock_logging_debug.assert_any_call("command: ['who']")
mock_logging_debug.assert_any_call("name: test_who")
mock_logging_debug.assert_any_call("forked: False")
mock_logging_debug.assert_any_call("process: None")
mock_logging_debug.assert_any_call("pid: None")
mock_fork.assert_not_called()
mocker.stopall()
def test_subprocess_map_type_to_command(self, mocker):
"""Using the mock.patch decorator (removes the need to import builtins)"""
mocker.stopall()
mock_logging_debug = mocker.MagicMock(name=__name__ + "_mock_logging_debug")
# mock
mocker.patch.object(
scarlett_os.subprocess.logging.Logger, "debug", mock_logging_debug
)
# NOTE: On purpose this is an invalid cmd. Should be of type array
test_command = ["who", "-b"]
test_name = "test_who"
test_fork = False
# create subprocess object
s_test = scarlett_os.subprocess.Subprocess(
test_command, name=test_name, fork=test_fork, run_check_command=False
)
spy = mocker.spy(s_test, "map_type_to_command")
assert isinstance(s_test.map_type_to_command(test_command), list)
# NOTE: According to this blog post, assert_called_once didn't get added till 3.6??
# source: https://allanderek.github.io/posts/unittestmock-small-gotcha/
# "So Python 3.4 and 3.6 pass as we expect. But Python3.5 gives an error stating that
# there is no assert_called_once method on the mock object, which is true since that
# method was not added until version 3.6. This is arguably what Python3.4 should have done."
# assert s_test.map_type_to_command.assert_called_once_with(test_command)
spy.assert_called_once_with(test_command)
# map_output = s_test.map_type_to_command(test_command)
# test
# assert isinstance(map_output, list)
# assert s_test.check_command_type(test_command)
# assert s_test.check_command_type(
# test_command) == mock_check_command_type.return_value
mocker.stopall()
def test_subprocess_check_command_type(self, mocker):
"""Using the mock.patch decorator (removes the need to import builtins)"""
mocker.stopall()
test_command = ["who", "-b"]
test_name = "test_who"
test_fork = False
# mock
mock_map_type_to_command = mocker.MagicMock(name="mock_map_type_to_command")
# mock_map_type_to_command.return_value = int
mock_map_type_to_command.side_effect = [int, [int, int]]
mock_fork = mocker.MagicMock(name="mock_fork")
mock_logging_debug = mocker.MagicMock(name="mock_logging_debug")
mocker.patch.object(
scarlett_os.subprocess.logging.Logger, "debug", mock_logging_debug
)
mocker.patch.object(
scarlett_os.subprocess.Subprocess,
"map_type_to_command",
mock_map_type_to_command,
)
mocker.patch.object(scarlett_os.subprocess.Subprocess, "fork", mock_fork)
# action
with pytest.raises(TypeError) as excinfo:
scarlett_os.subprocess.Subprocess(
test_command, name=test_name, fork=test_fork, run_check_command=True
)
assert (
str(excinfo.value)
== "Variable types should return a list in python3. Got: <class 'int'>"
)
with pytest.raises(TypeError) as excinfo:
scarlett_os.subprocess.Subprocess(
test_command, name=test_name, fork=test_fork, run_check_command=True
)
assert (
str(excinfo.value)
== "Executables and arguments must be str objects. types: <class 'int'>"
)
mocker.stopall()
############################### START HERE HERON ###############################################
# @mock.patch('scarlett_os.subprocess.logging.Logger.debug') # 2
def test_subprocess_fork(self, mocker):
"""Test fork class method process."""
mocker.stopall()
test_command = ["who", "-b"]
test_name = "test_who"
test_fork = True
pid = 7
# mock
mock_logging_debug = mocker.MagicMock(name="mock_logging_debug")
mock_os_fork = mocker.MagicMock(name="mock_os_fork", return_value=pid)
mock_sys_exit = mocker.MagicMock(name="mock_sys_exit")
mock_os_chdir = mocker.MagicMock(name="mock_os_chdir")
mock_os_setsid = mocker.MagicMock(name="mock_os_setsid")
mock_os_umask = mocker.MagicMock(name="mock_os_umask")
# patch
mocker.patch.object(
scarlett_os.subprocess.logging.Logger, "debug", mock_logging_debug
)
mocker.patch.object(scarlett_os.subprocess.os, "fork", mock_os_fork)
mocker.patch.object(scarlett_os.subprocess.sys, "exit", mock_sys_exit)
mocker.patch.object(scarlett_os.subprocess.os, "chdir", mock_os_chdir)
mocker.patch.object(scarlett_os.subprocess.os, "setsid", mock_os_setsid)
mocker.patch.object(scarlett_os.subprocess.os, "umask", mock_os_umask)
tfork1 = scarlett_os.subprocess.Subprocess(
test_command, name=test_name, fork=test_fork
)
assert mock_sys_exit.call_count == 2
assert tfork1.stdout == False
assert tfork1.stderr == False
assert mock_os_chdir.call_count == 1
assert mock_os_setsid.call_count == 1
assert mock_os_umask.call_count == 1
assert mock_os_fork.call_count == 2
mock_os_chdir.assert_called_once_with("/")
mocker.stopall()
def test_subprocess_fork_exception(self, mocker):
"""Test fork class method process."""
mocker.stopall()
test_command = ["fake", "command"]
test_name = "fake_command"
test_fork = True
# mock
mock_logging_debug = mocker.MagicMock(name="mock_logging_debug")
mock_os_fork = mocker.MagicMock(name="mock_os_fork", side_effect=OSError)
mock_sys_exit = mocker.MagicMock(name="mock_sys_exit")
mock_os_chdir = mocker.MagicMock(name="mock_os_chdir")
mock_os_setsid = mocker.MagicMock(name="mock_os_setsid")
mock_os_umask = mocker.MagicMock(name="mock_os_umask")
# patch
mocker.patch.object(
scarlett_os.subprocess.logging.Logger, "debug", mock_logging_debug
)
mocker.patch.object(scarlett_os.subprocess.os, "fork", mock_os_fork)
mocker.patch.object(scarlett_os.subprocess.sys, "exit", mock_sys_exit)
mocker.patch.object(scarlett_os.subprocess.os, "chdir", mock_os_chdir)
mocker.patch.object(scarlett_os.subprocess.os, "setsid", mock_os_setsid)
mocker.patch.object(scarlett_os.subprocess.os, "umask", mock_os_umask)
tfork2 = scarlett_os.subprocess.Subprocess(
test_command, name=test_name, fork=test_fork
)
# NOTE: Bit of duplication we have going here.
assert mock_sys_exit.call_count == 2
assert tfork2.stdout == False
assert tfork2.stderr == False
assert mock_os_chdir.call_count == 1
assert mock_os_setsid.call_count == 1
assert mock_os_umask.call_count == 1
assert mock_os_fork.call_count == 2
mock_os_chdir.assert_called_once_with("/")
mocker.stopall()
def test_subprocess_fork_pid0(self, mocker):
"""Test fork class method process."""
mocker.stopall()
test_command = ["who", "-b"]
test_name = "test_who"
test_fork = True
pid = 0
# mock
mock_logging_debug = mocker.MagicMock(name="mock_logging_debug")
mock_os_fork = mocker.MagicMock(name="mock_os_fork", return_value=pid)
mock_sys_exit = mocker.MagicMock(name="mock_sys_exit")
mock_os_chdir = mocker.MagicMock(name="mock_os_chdir")
mock_os_setsid = mocker.MagicMock(name="mock_os_setsid")
mock_os_umask = mocker.MagicMock(name="mock_os_umask")
# patch
mocker.patch.object(
scarlett_os.subprocess.logging.Logger, "debug", mock_logging_debug
)
mocker.patch.object(scarlett_os.subprocess.os, "fork", mock_os_fork)
mocker.patch.object(scarlett_os.subprocess.sys, "exit", mock_sys_exit)
mocker.patch.object(scarlett_os.subprocess.os, "chdir", mock_os_chdir)
mocker.patch.object(scarlett_os.subprocess.os, "setsid", mock_os_setsid)
mocker.patch.object(scarlett_os.subprocess.os, "umask", mock_os_umask)
scarlett_os.subprocess.Subprocess(test_command, name=test_name, fork=test_fork)
assert mock_sys_exit.call_count == 0
mocker.stopall()
def test_subprocess_fork_pid0_exception(self, mocker):
"""Test fork class method process."""
mocker.stopall()
test_command = ["who", "-b"]
test_name = "test_who"
test_fork = True
pid = 0
# mock
mock_logging_debug = mocker.MagicMock(name="mock_logging_debug")
mock_logging_error = mocker.MagicMock(name="mock_logging_error")
mock_os_fork = mocker.MagicMock(name="mock_os_fork", side_effect=[pid, OSError])
mock_sys_exit = mocker.MagicMock(name="mock_sys_exit")
mock_os_chdir = mocker.MagicMock(name="mock_os_chdir")
mock_os_setsid = mocker.MagicMock(name="mock_os_setsid")
mock_os_umask = mocker.MagicMock(name="mock_os_umask")
# patch
mocker.patch.object(
scarlett_os.subprocess.logging.Logger, "debug", mock_logging_debug
)
mocker.patch.object(
scarlett_os.subprocess.logging.Logger, "error", mock_logging_error
)
mocker.patch.object(scarlett_os.subprocess.os, "fork", mock_os_fork)
mocker.patch.object(scarlett_os.subprocess.sys, "exit", mock_sys_exit)
mocker.patch.object(scarlett_os.subprocess.os, "chdir", mock_os_chdir)
mocker.patch.object(scarlett_os.subprocess.os, "setsid", mock_os_setsid)
mocker.patch.object(scarlett_os.subprocess.os, "umask", mock_os_umask)
scarlett_os.subprocess.Subprocess(test_command, name=test_name, fork=test_fork)
mock_logging_error.assert_any_call("Error forking process second time")
mocker.stopall()
# FIXME: Re-enable these guys
# @mock.patch('scarlett_os.subprocess.logging.Logger.debug')
# def test_subprocess_fork_and_spawn_command(self, mock_logging_debug):
# """Test a full run connamd of Subprocess.run()"""
# mocker.stopall()
# test_command = ["who", "-b"]
# test_name = 'test_who'
# test_fork = False
# # mock
# with mock.patch('scarlett_os.subprocess.os.fork', mocker.Mock(name='mock_os_fork')) as mock_os_fork: # noqa
# with mock.patch('scarlett_os.subprocess.sys.exit', mocker.Mock(name='mock_sys_exit')) as mock_sys_exit: # noqa
# with mock.patch('scarlett_os.subprocess.os.chdir', mocker.Mock(name='mock_os_chdir')) as mock_os_chdir: # noqa
# with mock.patch('scarlett_os.subprocess.os.setsid', mocker.Mock(name='mock_os_setsid')) as mock_os_setsid: # noqa
# with mock.patch('scarlett_os.subprocess.os.umask', mocker.Mock(name='mock_os_umask')) as mock_os_umask: # noqa
# # Import module locally for testing purposes
# from scarlett_os.internal.gi import gi, GLib
# # Save unpatched versions of the following so we can reset everything after tests finish
# before_patch_gi_pid = gi._gi._glib.Pid
# before_path_glib_spawn_async = GLib.spawn_async
# before_path_child_watch_add = GLib.child_watch_add
# test_pid = mocker.Mock(spec=gi._gi._glib.Pid, return_value=23241, name='Mockgi._gi._glib.Pid')
# test_pid.real = 23241
# test_pid.close = mocker.Mock(name='Mockgi._gi._glib.Pid.close')
# # Mock function GLib function spawn_async
# GLib.spawn_async = mock.create_autospec(GLib.spawn_async, return_value=(test_pid, None, None, None), name='MockGLib.spawn_async')
# # Mock call to child_watch
# GLib.child_watch_add = mock.create_autospec(GLib.child_watch_add)
# # action
# tfork1 = scarlett_os.subprocess.Subprocess(test_command,
# name=test_name,
# fork=test_fork)
# with mock.patch('scarlett_os.subprocess.Subprocess.exited_cb', mocker.Mock(name='mock_exited_cb', spec=scarlett_os.subprocess.Subprocess.exited_cb)) as mock_exited_cb:
# with mock.patch('scarlett_os.subprocess.Subprocess.emit', mocker.Mock(name='mock_emit', spec=scarlett_os.subprocess.Subprocess.emit)) as mock_emit:
# # action, kick off subprocess run
# tfork1.run()
# # assert
# mock_logging_debug.assert_any_call("command: {}".format(test_command))
# mock_logging_debug.assert_any_call("stdin: {}".format(None))
# mock_logging_debug.assert_any_call("stdout: {}".format(None))
# mock_logging_debug.assert_any_call("stderr: {}".format(None))
# assert tfork1.pid != 23241
# assert tfork1.stdin == None
# assert tfork1.stdout == None
# assert tfork1.stderr == None
# assert tfork1.forked == False
# assert mock_emit.call_count == 0
# GLib.spawn_async.assert_called_once_with(test_command,
# flags=GLib.SpawnFlags.SEARCH_PATH | GLib.SpawnFlags.DO_NOT_REAP_CHILD
# )
# GLib.child_watch_add.assert_called_once_with(GLib.PRIORITY_HIGH, test_pid, mock_exited_cb)
# # now unpatch all of these guys
# gi._gi._glib.Pid = before_patch_gi_pid
# GLib.spawn_async = before_path_glib_spawn_async
# GLib.child_watch_add = before_path_child_watch_add
# mocker.stopall()
# # NOTE: Decorators get applied BOTTOM to TOP
# def test_check_command_type_is_array_of_str(self, mocker):
# mocker.stopall()
# mock_init = mocker.MagicMock(name='mock_init',
# # spec=scarlett_os.subprocess.Subprocess.__init__,
# autospec=scarlett_os.subprocess.Subprocess.__init__,
# return_value=None)
# mocker.patch.object(scarlett_os.subprocess.Subprocess, '__init__', mock_init)
# # # source: http://stackoverflow.com/questions/28181867/how-do-a-mock-a-superclass-that-is-part-of-a-library
| 2.140625 | 2 |
ClimaTempo/lerbd.py | Daniel-H-C-P/APIClima-parqueDasDunas | 0 | 12796260 | <reponame>Daniel-H-C-P/APIClima-parqueDasDunas
import datetime
###################### 1 #####################
#leitura dos arquivos json
import json
#Para abrir o arquivo climaAgora
def climanow():
with open('/home/daniel/webdev/tcc2019/APIClima-parqueDasDunas/ClimaTempo/climaAgora.json') as f:
data = json.load(f)
#Lendo o arquivo
climaatual = data['weather'][0]['description']
#print('Clima atual:')
#print(climaatual)
return climaatual
#climanow()
#Para abrir o arquivo clima5dias
def climaprev():
with open('clima5dias.json') as g:
data2 = json.load(g)
#Lendo o arquivo e criando uma lista
numheader = 0
previsao = []
while numheader < 40:
climaprint = (str(data2['list'][numheader]['dt_txt']).split( ))
climaprint.append(str(data2['list'][numheader]['weather'][0]['description']))
previsao.append(climaprint)
numheader +=1
#Criando uma lista de dias
dias = []
separador = 0
while separador < 40:
daynum = str(previsao[int(separador)][0])
dias.append(daynum)
separador +=1
diasuniq= list(set(dias))
diasuniq.sort()
#print(diasuniq)
#print(len(diasuniq))
# organizando tudo
prevorganizada = []
for x in diasuniq:
prevorganizada.append([])
y = 0
for item in previsao:
z = 0
while z < len(diasuniq):
if item[0] == diasuniq[z]:
prevorganizada[z].append(item)
z +=1
else:
z +=1
continue
y +=1
prevfinal = ''
numx = 0
for num1 in prevorganizada:
prevfinal+= ('\n\nDia {}:'.format(prevorganizada[numx][0][0]))
for num2 in num1:
prevfinal+= '\n' + (num2[1] + ": " + num2[2])
numx +=1
#print("\nExemplo de exibição específica:")
#print(prevorganizada[2][0][2])
#print(prevfinal)
return prevorganizada
#climaprev()
def prevhoje():
with open('/home/daniel/webdev/tcc2019/APIClima-parqueDasDunas/ClimaTempo/clima5dias.json') as g:
data2 = json.load(g)
#Lendo o arquivo e criando uma lista
numheader = 0
previsao = []
while numheader < 40:
climaprint = (str(data2['list'][numheader]['dt_txt']).split( ))
climaprint.append(str(data2['list'][numheader]['weather'][0]['description']))
previsao.append(climaprint)
numheader +=1
#Criando uma lista de dias
dias = []
separador = 0
while separador < 40:
daynum = str(previsao[int(separador)][0])
dias.append(daynum)
separador +=1
diasuniq= list(set(dias))
diasuniq.sort()
#print(diasuniq)
#print(len(diasuniq))
# organizando tudo
prevorganizada = []
for x in diasuniq:
prevorganizada.append([])
y = 0
for item in previsao:
z = 0
while z < len(diasuniq):
if item[0] == diasuniq[z]:
prevorganizada[z].append(item)
z +=1
else:
z +=1
continue
y +=1
dia_hoje = 'Dia {}: '.format(prevorganizada[0][0][0])
for num1 in prevorganizada[0]:
dia_hoje += ' ' + (num1[1] + ": " + num1[2] +',')
#print (dia_hoje)
return dia_hoje
#prevhoje()
#################### 2 ####################
# Mandar email
climatual = climanow()
prevclima = prevhoje()
import smtplib
from minhachaveOW import meuemail, minhasenha
def mandaemail(nome, contato, dia, hora, climatual, prevclima):
meu_email = meuemail
minha_s = minhasenha
#climatual = climanow()
#prevclima = prevhoje()
with smtplib.SMTP('smtp.gmail.com', 587) as smtp:
smtp.ehlo()
smtp.starttls()
smtp.ehlo()
smtp.login(meu_email, minha_s)
assunto = 'Alerta climático'
corpo = 'Olá, {}!\n Esta é a previsão para o Parque das Dunas após seu alerta({} - {}):\n Clima Atual -> {}.\n Previsão -> {}.\n Esperamos que tenha uma boa trilha!\n Equipe ClimaTempo'.format(nome, dia, hora, climatual, prevclima)
mensagem = f'Subject: {assunto}\n\n{corpo}'
smtp.sendmail( meu_email, contato, mensagem.encode('utf-8'))
# mandaremail("Daniel", "<EMAIL>", "hoje2", "agora2")
#Para mudar de pasta
import os
# print(os.path.abspath(os.curdir)) #só pra checar o dir base
os.chdir('/home/daniel/webdev/tcc2019/APIClima-parqueDasDunas/')
# print(os.path.abspath(os.curdir)) #só pra checar o dir acima
##########################################
'''
Para ler diretamente dos modelos:
from cadastro.models import EmailCliente
pessoas = EmailCliente.objects.all()
print (pessoas)
'''
#########################################
#Parte que lê o bd em sqlite3:
import sqlite3
conn = sqlite3.connect('db.sqlite3') #cria a conexão
c = conn.cursor() #permite fazer operações no banco
c.execute("SELECT * FROM cadastro_emailcliente WHERE mandado=0 ") #execute -> sql
lista = c.fetchall() #busca todos os resultados e cria uma lista
for pessoa in lista:
#print(pessoa)
#print(pessoa[0]) #id
#print(pessoa[1]) #nome
#print(pessoa[2]) #contato
#print(pessoa[3]) #dia
#print(pessoa[4]) #hora
alerta = pessoa[3] + ' ' + pessoa[4]
alerta = datetime.datetime.strptime(alerta, "%Y-%m-%d %H:%M:%S")
if datetime.datetime.now() >= alerta:
mandaemail(pessoa[1],pessoa[2],pessoa[3],pessoa[4], climatual, prevclima)
############################## ESTAS DUAS LINHAS MUDAM O BD PELO ID DO OBJETO!!!!
c.execute('UPDATE cadastro_emailcliente SET mandado = 1 WHERE id = {}'.format(int(pessoa[0])))
conn.commit()
##############################
#comando loop para interagir com os emails
#pegar nome, contato, dia, hora e interagir com mandarEmail
#conn.commit() # comete as alterações na tabela
c.close() # fecha a coneção 1
conn.close() # fecha a coneção 2
| 3.4375 | 3 |
algorithm-and-hardware/py3/gravity_utils.py | zcjl/python-works | 0 | 12796261 | <reponame>zcjl/python-works<gh_stars>0
# coding:utf-8
import serial
import time
import binascii
from enum import Enum
class GravityUtils:
Code = Enum('Code', 'MARK CHECK')
def __init__(self):
# 打开端口
self.ports = {
# "g_sensor_1": serial.Serial(port='/dev/tty.usbserial-FT2J03F3A', timeout=0.04),
# "g_sensor_2": serial.Serial(port='/dev/tty.usbserial-FT2J03F3B', timeout=0.04),
# "g_sensor_3": serial.Serial(port='/dev/tty.usbserial-FT2J03F3C', timeout=0.04),
# "g_sensor_4": serial.Serial(port='/dev/tty.usbserial-FT2J03F3D', timeout=0.04)
# "g_sensor_1": serial.Serial(port='/dev/ttyUSB1', timeout=0.03),
# "g_sensor_2": serial.Serial(port='/dev/ttyUSB3', timeout=0.03),
# "g_sensor_3": serial.Serial(port='/dev/ttyUSB0', timeout=0.03),
# "g_sensor_4": serial.Serial(port='/dev/ttyUSB2', timeout=0.03)
}
self.offset = 100 # 原始重力值读数的位移差量 -> 20 33 33 30 34
"""
通过USB转串口,读取重力感应器重力值数据
"""
def read_gravity(self):
results = {}
for name, port in self.ports.items():
while True:
port.flush()
response = port.read(8)
print('read data from %s, result is %s' % (name, binascii.b2a_hex(response)))
if len(response) == 0 or response[:1] != b'\x2B':
continue
if len(response) < 8:
response = bytearray(response)
response.extend(port.read(8))
ok, gravity = self.convert_gravity(response)
if not ok:
print(gravity)
continue
else:
results[name] = gravity
break
return {"success": True, "data": results}
"""
把串口读到的原始二进制数据,转换为实际的重力值数据
数据样例为: 2B 20 33 33 34 39 0D 0A
其中,第一位是标示位2B,第2-6位为重力数据,最后两位是校验位 0D 0A (回车换行符)
"""
def convert_gravity(self, binary):
if binary[:1] != b'\x2B':
return False, self.Code.MARK
if binary[6:8] != b'\x0D\x0A':
return False, self.Code.CHECK
raw = self.convert_binary(binary[1:6])
result = raw - self.offset
return True, result
"""
把原始二进制数据,转换为对应的十进制数值
数据样例为: 20 33 33 34 39
从右到左依次为"个十百千万",单位是克
"""
def convert_binary(self, binary):
result = ''
for data in binary:
result = result + chr(data)
return int(result)
begin = time.time()
print(GravityUtils().read_gravity())
end = time.time()
print('调用耗时:%f\n\n' % (end - begin))
| 2.609375 | 3 |
tools/nntool/graph/matches/matcher.py | knmcguire/gap_sdk | 0 | 12796262 | # Copyright (C) 2019 GreenWaves Technologies
# All rights reserved.
# This software may be modified and distributed under the terms
# of the BSD license. See the LICENSE file for details.
from abc import ABC, abstractmethod
from typing import Generator, Sequence
from graph.graph_identity import GraphIdentity
from utils.graph import GraphView, Node, MatchNode
class MatchNodeType(MatchNode):
def __init__(self, name, node_class):
super().__init__(name)
self.__node_class = node_class
def _match(self, G, node, edge):
del G, edge
return isinstance(node, self.__node_class)
class Matcher(ABC):
NAME = '__NOT_SET__'
DESCRIPTION = '__NOT_SET__'
def __init__(self, identity: str = None):
if identity is None:
self._identity = self.NAME
else:
self._identity = identity
def set_identity(self, G):
if hasattr(G, 'graph_identity') and self._identity != '__NOT_SET__':
G.graph_identity.fusions.append(self._identity)
@abstractmethod
def match(self, G: GraphView, set_identity: bool = True):
pass
class DefaultMatcher(Matcher):
@abstractmethod
def match_function(self, G: GraphView) -> Generator[GraphView, None, None]:
pass
@abstractmethod
def replace_function(self, G: GraphView, subgraph: GraphView) -> Node:
pass
def match(self, G: GraphView, set_identity: bool = True):
replaced = True
while replaced:
replaced = False
for subgraph in self.match_function(G):
replacement = self.replace_function(G, subgraph)
if not replacement:
G.remove_fragment(subgraph)
elif isinstance(replacement, Node):
G.replace_fragment(subgraph, replacement)
else:
raise TypeError("unexcepted return value from replace_function")
replaced = True
break
if set_identity:
self.set_identity(G)
# This can be used to define groups of matches to be selected
# from the command line
# It also can be inherited to group matches together
class MatchGroup(Matcher):
def __init__(self, *args: Sequence[Matcher], identity: str = None):
super().__init__(identity)
self.matches = list(args)
def add_match(self, match: Matcher):
self.matches.append(match)
def match(self, G: GraphView, set_identity: bool = True):
for match_instance in self.matches:
match_instance.match(G, False)
if set_identity:
self.set_identity(G)
| 2.6875 | 3 |
main.py | OneFoggyScreen/Amicable-Bot | 1 | 12796263 | #The heart of the bot.
#--------- Libaries ---------#
import discord, os, settings
from tools.logging import ABLog
from discord.ext import commands
#--------- Variables ---------#
INTENTS = discord.Intents.all()
client = commands.Bot(command_prefix = settings.ABPrefixes, intents = INTENTS, help_command=None)
client.remove_command('help')
#--------- Code ---------#
@client.command() #Loads specified cog.
async def load(ctx, extension):
if ctx.message.author.id == settings.AdminID:
client.load_extension(f'cogs.{extension}')
ABLog(f"Loading {extension}")
await ctx.send(f'The cog {extension} was loaded')
@client.command() #Reloads specified cog.
async def reload(ctx, extension):
if ctx.message.author.id == settings.AdminID:
client.reload_extension(f'cogs.{extension}')
ABLog(f"Reloading {extension}")
await ctx.send(f'The cog {extension} was reloaded')
@client.command() #Unloads specified cog.
async def unload(ctx, extension):
if ctx.message.author.id == settings.AdminID:
client.unload_extension(f'cogs.{extension}')
ABLog(f"Unloading {extension}")
await ctx.send(f'The cog {extension} was unloaded')
for filename in os.listdir('./cogs'): #Initial load of all cogs.
if filename.endswith('.py'):
client.load_extension(f'cogs.{filename[:-3]}')
print(f"Loaded {filename}")
@client.event #Changes the custom status.
async def on_ready():
activity = discord.Activity(name=settings.Custom_Status, type=discord.ActivityType.playing)
ABLog("Bot loaded.")
print(f'{client.user} has connected to Discord!')
await client.change_presence(activity=activity)
client.run(settings.Token) #Gets the bot online!
| 2.140625 | 2 |
mwedittypes/__init__.py | geohci/edit-types | 1 | 12796264 | <reponame>geohci/edit-types
from .mwedittypes import EditTypes
__title__ = "mwedittypes"
__summary__ = "mwedittypes is a package that supports edit diffs and action detection for Wikipedia"
__url__ = "https://github.com/geohci/edit-types"
__version__ = "1.1.5"
__license__ = "MIT License"
__all__ = ["EditTypes"]
| 1.460938 | 1 |
isobutane.py | erkandem/isobutane | 0 | 12796265 | <gh_stars>0
"""
a combination of the
- ideal gas law and
- a simple chemical reaction
But still, no warranty.
"""
# %% constants
AVOGARDRO = 6.02214076 * 10 ** 23 # 1/mol
BOLTZMANN = 1.380649 * 10 ** -23 # J/K or Nm/K or (kgm^2)/(Ks^2)
UNIVERSAL_GAS = AVOGARDRO * BOLTZMANN # J/(molK) or (kgm^2)/(molKs^2)
# %% atomic masses in kg/mol or units/atom
CARBON_MOLAR = 12 / 1000
OXYGEN_MOLAR = 16 / 1000
HYDROGEN_MOLAR = 1 / 1000
NITROGEN_MOLAR = 14 / 1000
ISOBUTANE_MOLAR = 4 * CARBON_MOLAR + 10 * HYDROGEN_MOLAR
# %% environment
AIR_COMPOSITION_BY_VOLUME = {
'nitrogen': 0.79,
'oxygen': 0.21
}
# using standard temperature and pressure
TEMPERATURE = 273.15 # K
PRESSURE = 1.0 * 100000 # Pa or kg/(ms^2)
# %% reaction
# (4 * C + 10 * H) + 6.5 * O_2 --react--> 5 * H_2O + 4 * CO2
# 1 mol of isobutane needs 6.5 mols of oxygen to burn completly
# or 1 mol of oxygen can completely burn (1 / 6.5) mols of isobutane
OXYGEN_TO_ISOBUTANE_RATIO = 6.5
def get_oxygen_volume_of_air(air):
return air * AIR_COMPOSITION_BY_VOLUME['oxygen']
def get_air_volume_of_oxygen(oxygen):
return oxygen / AIR_COMPOSITION_BY_VOLUME['oxygen']
def calc_volume_to_mol(volume):
"""
pV = NRT solved for N: N = PV/(RT)
expects `volume` to be cubic meters m^3
"""
return (volume * PRESSURE) / (UNIVERSAL_GAS * TEMPERATURE)
def calc_mol_to_volume(mols):
"""
pV = NRT solved for V: V = NRT/p
returns in `V` in cubic meters m^3
"""
return (mols * UNIVERSAL_GAS * TEMPERATURE) / PRESSURE
def oxygen_needed_for_isobutane(mols):
return mols * OXYGEN_TO_ISOBUTANE_RATIO
def isobutane_needed_for_oxygen(mols):
return mols / OXYGEN_TO_ISOBUTANE_RATIO
def air_volume_for_isobutane_mols(mols):
"""returns cubic meters m^3"""
oxygen_mols = oxygen_needed_for_isobutane(mols)
oxygen_volume = calc_mol_to_volume(oxygen_mols)
air_volume = get_air_volume_of_oxygen(oxygen_volume)
return air_volume
def isobutane_vol_to_air_vol(volume_isobutane):
"""returns cubic meters m^3"""
mols_isobutane = calc_volume_to_mol(volume_isobutane)
return air_volume_for_isobutane_mols(mols_isobutane)
def air_vol_to_isobutane_vol(volume_air):
"""returns cubic meters m^3"""
volume_oxygen = get_oxygen_volume_of_air(volume_air)
mols_oxygen = calc_volume_to_mol(volume_oxygen)
mols_isobutane = isobutane_needed_for_oxygen(mols_oxygen)
return calc_mol_to_volume(mols_isobutane)
def print_needed_air(isobutane_volume):
"""does not account for container volume"""
print(
f'{isobutane_vol_to_air_vol(isobutane_volume) * 1000:.3f} liters of air'
f' are needed for a stoichiometric reaction of'
f' {isobutane_volume * 1000:.3f} liters of isobutane'
)
def print_needed_isobutane(air_volume):
"""does not account for container volume"""
print(
f'{air_vol_to_isobutane_vol(air_volume) * 1000:.3f} liters of isobutane'
f' are needed for a stoichiometric reaction of'
f' {air_volume * 1000:.3f} liters of air'
)
def main():
ISOBUTANE_VOLUME = 0.1 / 1000 # m^3
AIR_VOLUME = 3.095 / 1000 # m^3
print_needed_air(ISOBUTANE_VOLUME)
print_needed_isobutane(AIR_VOLUME)
if __name__ == '__main__':
main()
| 3.078125 | 3 |
mwdust/Drimmel03.py | jan-rybizki/mwdust | 21 | 12796266 | <reponame>jan-rybizki/mwdust
###############################################################################
#
# Drimmel03: extinction model from Drimmel et al. 2003 2003A&A...409..205D
#
###############################################################################
import copy
import numpy
from scipy.ndimage import map_coordinates
from scipy import optimize
try:
import healpy
except ImportError: pass
from mwdust.util.extCurves import aebv
from mwdust.util import read_Drimmel
from mwdust.util.tools import cos_sphere_dist
from mwdust.DustMap3D import DustMap3D
_DEGTORAD= numpy.pi/180.
class Drimmel03(DustMap3D):
"""extinction model from Drimmel et al. 2003 2003A&A...409..205D"""
def __init__(self,filter=None,sf10=True):
"""
NAME:
__init__
PURPOSE:
Initialize the Drimmel03 dust map
INPUT:
filter= filter to return the extinction in
sf10= (True) if True, use the Schlafly & Finkbeiner calibrations
OUTPUT:
object
HISTORY:
2013-12-10 - Started - Bovy (IAS)
"""
DustMap3D.__init__(self,filter=filter)
self._sf10= sf10
#Read the maps
drimmelMaps= read_Drimmel.readDrimmelAll()
self._drimmelMaps= drimmelMaps
#Sines and cosines of sky positions of COBE pixels
self._rf_sintheta= numpy.sin(numpy.pi/2.-self._drimmelMaps['rf_glat']*_DEGTORAD)
self._rf_costheta= numpy.cos(numpy.pi/2.-self._drimmelMaps['rf_glat']*_DEGTORAD)
self._rf_sinphi= numpy.sin(self._drimmelMaps['rf_glon']*_DEGTORAD)
self._rf_cosphi= numpy.cos(self._drimmelMaps['rf_glon']*_DEGTORAD)
#Various setups
self._xsun= -8.
self._zsun= 0.015
#Global grids
self._nx_disk, self._ny_disk, self._nz_disk= 151, 151, 51
self._dx_disk, self._dy_disk, self._dz_disk= 0.2, 0.2, 0.02
self._nx_ori, self._ny_ori, self._nz_ori= 76, 151, 51
self._dx_ori, self._dy_ori, self._dz_ori= 0.05, 0.05, 0.02
#Local grids
self._nx_diskloc, self._ny_diskloc, self._nz_diskloc= 31, 31, 51
self._dx_diskloc, self._dy_diskloc, self._dz_diskloc= 0.05, 0.05, 0.02
self._nx_ori2, self._ny_ori2, self._nz_ori2= 101, 201, 51
self._dx_ori2, self._dy_ori2, self._dz_ori2= 0.02, 0.02, 0.02
return None
def _evaluate(self,l,b,d,norescale=False,
_fd=1.,_fs=1.,_fo=1.):
"""
NAME:
_evaluate
PURPOSE:
evaluate the dust-map
INPUT:
l- Galactic longitude (deg)
b- Galactic latitude (deg)
d- distance (kpc) can be array
norescale= (False) if True, don't apply re-scalings
_fd, _fs, _fo= (1.) amplitudes of the different components
OUTPUT:
extinction
HISTORY:
2013-12-10 - Started - Bovy (IAS)
"""
if isinstance(l,numpy.ndarray) or isinstance(b,numpy.ndarray):
raise NotImplementedError("array input for l and b for Drimmel dust map not implemented")
cl= numpy.cos(l*_DEGTORAD)
sl= numpy.sin(l*_DEGTORAD)
cb= numpy.cos(b*_DEGTORAD)
sb= numpy.sin(b*_DEGTORAD)
#Setup arrays
avori= numpy.zeros_like(d)
avspir= numpy.zeros_like(d)
avdisk= numpy.zeros_like(d)
#Find nearest pixel in COBE map for the re-scaling
rfIndx= numpy.argmax(cos_sphere_dist(self._rf_sintheta,
self._rf_costheta,
self._rf_sinphi,
self._rf_cosphi,
numpy.sin(numpy.pi/2.-b*_DEGTORAD),
numpy.cos(numpy.pi/2.-b*_DEGTORAD),
sl,cl))
rfdisk, rfspir, rfori= 1., 1., 1,
if self._drimmelMaps['rf_comp'][rfIndx] == 1 and not norescale:
rfdisk= self._drimmelMaps['rf'][rfIndx]
elif self._drimmelMaps['rf_comp'][rfIndx] == 2 and not norescale:
rfspir= self._drimmelMaps['rf'][rfIndx]
elif self._drimmelMaps['rf_comp'][rfIndx] == 3 and not norescale:
rfori= self._drimmelMaps['rf'][rfIndx]
#Find maximum distance
dmax= 100.
if b != 0.: dmax= .49999/numpy.fabs(sb) - self._zsun/sb
if cl != 0.:
tdmax= (14.9999/numpy.fabs(cl)-self._xsun/cl)
if tdmax < dmax: dmax= tdmax
if sl != 0.:
tdmax = 14.9999/numpy.fabs(sl)
if tdmax < dmax: dmax= tdmax
d= copy.copy(d)
d[d > dmax]= dmax
#Rectangular coordinates
X= d*cb*cl
Y= d*cb*sl
Z= d*sb+self._zsun
#Local grid
#Orion
locIndx= (numpy.fabs(X) < 1.)*(numpy.fabs(Y) < 2.)
if numpy.sum(locIndx) > 0:
xi = X[locIndx]/self._dx_ori2+float(self._nx_ori2-1)/2.
yj = Y[locIndx]/self._dy_ori2+float(self._ny_ori2-1)/2.
zk = Z[locIndx]/self._dz_ori2+float(self._nz_ori2-1)/2.
avori[locIndx]= map_coordinates(self._drimmelMaps['avori2'],
[xi,yj,zk],
mode='constant',cval=0.)
#local disk
locIndx= (numpy.fabs(X) < 0.75)*(numpy.fabs(Y) < 0.75)
if numpy.sum(locIndx) > 0:
xi = X[locIndx]/self._dx_diskloc+float(self._nx_diskloc-1)/2.
yj = Y[locIndx]/self._dy_diskloc+float(self._ny_diskloc-1)/2.
zk = Z[locIndx]/self._dz_diskloc+float(self._nz_diskloc-1)/2.
avdisk[locIndx]= map_coordinates(self._drimmelMaps['avdloc'],
[xi,yj,zk],
mode='constant',cval=0.)
#Go to Galactocentric coordinates
X= X+self._xsun
#Stars beyond the local grid
#Orion
globIndx= True^(numpy.fabs(X-self._xsun) < 1.)*(numpy.fabs(Y) < 2.)
if numpy.sum(globIndx) > 0:
#Orion grid is different from other global grids, so has its own dmax
dmax= 100.
if b != 0.: dmax= .49999/numpy.fabs(sb) - self._zsun/sb
if cl > 0.:
tdmax = (2.374999/numpy.fabs(cl))
if tdmax < dmax: dmax= tdmax
if cl < 0.:
tdmax = (1.374999/numpy.fabs(cl))
if tdmax < dmax: dmax= tdmax
if sl != 0.:
tdmax = (3.749999/numpy.fabs(sl))
if tdmax < dmax: dmax= tdmax
dori= copy.copy(d)
dori[dori > dmax]= dmax
Xori= dori*cb*cl+self._xsun
Yori= dori*cb*sl
Zori= dori*sb+self._zsun
xi = Xori[globIndx]/self._dx_ori + 2.5*float(self._nx_ori-1)
yj = Yori[globIndx]/self._dy_ori + float(self._ny_ori-1)/2.
zk = Zori[globIndx]/self._dz_ori + float(self._nz_ori-1)/2.
avori[globIndx]= map_coordinates(self._drimmelMaps['avori'],
[xi,yj,zk],
mode='constant',cval=0.)
#disk & spir
xi = X/self._dx_disk+float(self._nx_disk-1)/2.
yj = Y/self._dy_disk+float(self._ny_disk-1)/2.
zk = Z/self._dz_disk+float(self._nz_disk-1)/2.
avspir= map_coordinates(self._drimmelMaps['avspir'],
[xi,yj,zk],
mode='constant',cval=0.)
globIndx= True^(numpy.fabs(X-self._xsun) < 0.75)*(numpy.fabs(Y) < 0.75)
if numpy.sum(globIndx) > 0:
avdisk[globIndx]= map_coordinates(self._drimmelMaps['avdisk'],
[xi,yj,zk],
mode='constant',
cval=0.)[globIndx]
#Return
out=_fd*rfdisk*avdisk+_fs*rfspir*avspir+_fo*rfori*avori
if self._filter is None: # From Rieke & Lebovksy (1985); if sf10, first put ebv on SFD scale
return out/3.09/((1-self._sf10)+self._sf10*0.86)
else:
return out/3.09/((1-self._sf10)+self._sf10*0.86)\
*aebv(self._filter,sf10=self._sf10)
def dust_vals_disk(self,lcen,bcen,dist,radius):
"""
NAME:
dust_vals_disk
PURPOSE:
return the distribution of extinction within a small disk as samples
INPUT:
lcen, bcen - Galactic longitude and latitude of the center of the disk (deg)
dist - distance in kpc
radius - radius of the disk (deg)
OUTPUT:
(pixarea,extinction) - arrays of pixel-area in sq rad and extinction value
HISTORY:
2015-03-07 - Written - Bovy (IAS)
"""
# Convert the disk center to a HEALPIX vector
vec= healpy.pixelfunc.ang2vec((90.-bcen)*_DEGTORAD,lcen*_DEGTORAD)
# We pixelize the map with a HEALPIX grid with nside=256, to somewhat
# oversample the Drimmel resolution
nside= 256
# Find the pixels at this resolution that fall within the disk
ipixs= healpy.query_disc(nside,vec,radius*_DEGTORAD,
inclusive=False,nest=False)
# Query the HEALPIX map for pixels that lie within the disk
pixarea= healpy.pixelfunc.nside2pixarea(nside)+numpy.zeros(len(ipixs))
extinction= []
for ii, ipix in enumerate(ipixs):
# Get glon and glat
b9, l= healpy.pixelfunc.pix2ang(nside,ipix,nest=False)
b= 90.-b9/_DEGTORAD
l/= _DEGTORAD
# Now evaluate
extinction.append(self._evaluate(l,b,dist))
extinction= numpy.array(extinction)
return (pixarea,extinction)
def fit(self,l,b,dist,ext,e_ext):
"""
NAME:
fit
PURPOSE:
fit the amplitudes of the disk, spiral, and Orion parts of the
Drimmel map to other data
INPUT:
l,b- Galactic longitude and latitude in degree
dist - distance in kpc
ext - extinction at dist
e_ext - error in extinction
OUTPUT:
(fd,fs,fo,dist_stretch) amplitudes of disk, spiral, and Orion parts
and a 'distance stretch' applied to the model
(applied as self(l,b,dist*dist_stretch))
HISTORY:
2013-12-16 - Written - Bovy (IAS)
"""
#Fit consists of
#a) overall amplitude A
#b) relative amplitude fd/A, fs/A
#c) distance stretch
pars= numpy.array([0.,numpy.log(1./3.),numpy.log(1./3.),0.])
pars=\
optimize.fmin_powell(_fitFunc,pars,args=(self,l,b,dist,ext,e_ext))
amp= numpy.exp(pars[0])
fd= amp*numpy.exp(pars[1])
fs= amp*numpy.exp(pars[2])
fo= amp*(1.-fd-fs)
return (fd,fs,fo,numpy.exp(pars[3]))
def _fitFunc(pars,drim,l,b,dist,ext,e_ext):
amp= numpy.exp(pars[0])
fd= amp*numpy.exp(pars[1])
fs= amp*numpy.exp(pars[2])
fo= amp*(1.-fd-fs)
dist_stretch= numpy.exp(pars[3])
model_ext= drim(l,b,dist*dist_stretch,_fd=fd,_fs=fs,_fo=fo)
return 0.5*numpy.sum((model_ext-ext)**2./e_ext**2.)
| 2.140625 | 2 |
Introduction/Write a function.py | inarazim34/homework | 0 | 12796267 | #!/usr/bin/env python
# coding: utf-8
# In[ ]:
def is_leap(year):
leap = False
# Write your logic here
if year % 4 == 0 and (year % 400 == 0 or year % 100 != 0) :
leap = True
return leap
| 3.890625 | 4 |
frontera/tests/test_codecs.py | TeamHG-Memex/frontera | 3 | 12796268 | # -*- coding: utf-8 -*-
from frontera.contrib.backends.remote.codecs.json import Encoder as JsonEncoder, Decoder as JsonDecoder
from frontera.contrib.backends.remote.codecs.msgpack import Encoder as MsgPackEncoder, Decoder as MsgPackDecoder
from frontera.core.models import Request, Response
import pytest
@pytest.mark.parametrize(
('encoder', 'decoder'), [
(MsgPackEncoder, MsgPackDecoder),
(JsonEncoder, JsonDecoder)
]
)
def test_codec(encoder, decoder):
def check_request(req1, req2):
assert req1.url == req2.url and req1.meta == req2.meta and req1.headers == req2.headers
enc = encoder(Request, send_body=True)
dec = decoder(Request, Response)
req = Request(url="http://www.yandex.ru", meta={"test": "shmest"}, headers={'reqhdr': 'value'})
req2 = Request(url="http://www.yandex.ru/search")
msgs = [
enc.encode_add_seeds([req]),
enc.encode_page_crawled(Response(url="http://www.yandex.ru", body='SOME CONTENT', headers={'hdr': 'value'},
request=req), [req2]),
enc.encode_request_error(req, "Host not found"),
enc.encode_update_score("1be68ff556fd0bbe5802d1a100850da29f7f15b1", 0.51, "http://yandex.ru", True),
enc.encode_new_job_id(1),
enc.encode_offset(0, 28796),
enc.encode_request(req)
]
it = iter(msgs)
o = dec.decode(it.next())
assert o[0] == 'add_seeds'
assert type(o[1]) == list
req_d = o[1][0]
check_request(req_d, req)
assert type(req_d) == Request
o = dec.decode(it.next())
assert o[0] == 'page_crawled'
assert type(o[1]) == Response
assert o[1].url == req.url and o[1].body == 'SOME CONTENT' and o[1].meta == req.meta
assert type(o[2]) == list
req_d = o[2][0]
assert type(req_d) == Request
assert req_d.url == req2.url
o_type, o_req, o_error = dec.decode(it.next())
assert o_type == 'request_error'
check_request(o_req, req)
assert o_error == "Host not found"
o_type, fprint, score, url, schedule = dec.decode(it.next())
assert o_type == 'update_score'
assert fprint == "1be68ff556fd0bbe5802d1a100850da29f7f15b1"
assert score == 0.51
assert url == "http://yandex.ru"
assert schedule is True
o_type, job_id = dec.decode(it.next())
assert o_type == 'new_job_id'
assert job_id == 1
o_type, partition_id, offset = dec.decode(it.next())
assert o_type == 'offset'
assert partition_id == 0
assert offset == 28796
o = dec.decode_request(it.next())
check_request(o, req) | 2.28125 | 2 |
webfrontend/shadowsocks/models.py | josephwuzw/shadowsocks | 0 | 12796269 | <filename>webfrontend/shadowsocks/models.py
# This is an auto-generated Django model module.
# You'll have to do the following manually to clean this up:
# * Rearrange models' order
# * Make sure each model has one field with primary_key=True
# * Remove `managed = False` lines if you wish to allow Django to create and delete the table
# Feel free to rename the models, but don't rename db_table values or field names.
#
# Also note: You'll have to insert the output of 'django-admin.py sqlcustom [appname]'
# into your database.
from __future__ import unicode_literals
from django.db import models
from django.conf import settings
class SSInstance(models.Model):
email = models.CharField('Email', max_length=32)
servicepwd = models.CharField('AdminPWD', db_column='pass', max_length=16, default='<PASSWORD>', editable=False) # Field renamed because it was a Python reserved word.
passwd = models.CharField('ServicePWD', max_length=16)
t = models.IntegerField('LastAlive', default=0, editable=False)
u = models.BigIntegerField('Upload', default=0)
d = models.BigIntegerField('Download', default=0)
transfer_enable = models.BigIntegerField('Threshold', default=settings.DEFAULT_THRESHOLD)
port = models.IntegerField('ServicePort', unique=True)
switch = models.IntegerField(default=1, editable=False)
enable = models.BooleanField('Enable', default=1)
type = models.IntegerField('Type', default=7, editable=False)
last_get_gitf_time = models.IntegerField(default=1, editable=False)
last_rest_pass_time = models.IntegerField(default=1, editable=False)
class Meta:
managed = False
db_table = 'user'
def __unicode__(self):
return "%s:%s U:%sM(%s) D:%sM(%s) Lmt:%sM(%s)" % (self.enable, self.port, "%.2f" % (float(self.u)/(1024*1024)), self.u, "%.2f" % (float(self.d)/(1024*1024)), self.d, "%.2f" % (float(self.transfer_enable)/(1024*1024)), self.transfer_enable)
| 2.046875 | 2 |
SocialNetwork/api/models.py | cmput404-ahyyy/group-webproject | 0 | 12796270 | <reponame>cmput404-ahyyy/group-webproject
from django.db import models
# Create your models here.
from Author.models import Author, FriendRequest, Friends
from Posts.models import Post, Comment, Image | 1.65625 | 2 |
manifold/mnist_tools.py | kteavery/CS5173-ManifoldLearning | 1 | 12796271 | import keras
'''
Helper methods and variables for mnist models and manifolds
'''
color_list = [
"red",
"orange",
"yellow",
"lime",
"green",
"cyan",
"blue",
"purple",
"fuchsia",
"peru",
]
# # Returns 4D np array (1, HEIGHT, WIDTH, 1)
# def tensor_to_numpy(t):
# sess = K.get_session()
# t_np = sess.run(t)
# # Get rid of the extra dimension
# t_np = t_np.reshape(1, HEIGHT, WIDTH, 1)
# return t_np
def convert_to_model(seq_model):
# From https://github.com/keras-team/keras/issues/10386
input_layer = keras.layers.Input(batch_shape=seq_model.layers[0].input_shape)
prev_layer = input_layer
for layer in seq_model.layers:
layer._inbound_nodes = []
prev_layer = layer(prev_layer)
funcmodel = keras.models.Model([input_layer], [prev_layer])
return funcmodel | 3.15625 | 3 |
1601-1700/1633-Strings That Satisfies The Condition/1633-Strings That Satisfies The Condition.py | jiadaizhao/LintCode | 77 | 12796272 | class Solution:
"""
@param target: the target string
@param s:
@return: output all strings containing target in s
"""
def getAns(self, target, s):
# Write your code here
result = []
for word in s:
i = 0
for c in word:
if c == target[i]:
i += 1
if i == len(target):
result.append(word)
break
return result
| 3.59375 | 4 |
for-proriv/myfuture/users/views.py | DmitryAA/EdVision | 0 | 12796273 | <filename>for-proriv/myfuture/users/views.py
from django.shortcuts import render,render_to_response
from django.contrib import auth
from django.http import HttpResponseRedirect
from django.template.context_processors import csrf
def login(request):
args = {}
args.update(csrf(request))
if request.method == "POST":
username = request.POST.get("username", "")
password = request.POST.get("password", "")
user = auth.authenticate(username=username, password=password)
if user is not None:
auth.login(request, user)
return HttpResponseRedirect("/")
else:
args['login_error'] = "Не верный логин или пароль"
return render_to_response('users/login.html', args)
else:
return render(request, 'users/login.html')
def logout(request):
auth.logout(request)
return HttpResponseRedirect("/")
| 2.125 | 2 |
examples/login_app.py | argabor/dash-desktop | 0 | 12796274 | # package imports
import dash
import dash_bootstrap_components as dbc
import dash_html_components as html
import dash_core_components as dcc
from dash.dependencies import Input, Output, State
from dash import no_update
from flask import session
# local imports
from auth import authenticate_user, validate_login_session
from server import app, server, ui
# login layout content
def login_layout():
return html.Div(
[
dcc.Location(id='login-url', pathname='/login', refresh=False),
dbc.Container(
[
dbc.Row(
dbc.Col(
dbc.Card(
[
html.H4('Login', className='card-title'),
dbc.Input(id='login-email', placeholder='User', autoFocus=True),
dbc.Input(id='login-password', placeholder='Password', type='password'),
dbc.Button('Submit', id='login-button', color='success', block=True),
html.Br(),
html.Div(id='login-alert')
],
body=True
),
width=6
),
justify='center'
)
]
)
]
)
# home layout content
are_sure = dbc.Modal(
[
dbc.ModalHeader("Logout"),
dbc.ModalBody("Are you sure?"),
dbc.ModalFooter(
dbc.Row(
[
dbc.Col(dbc.Button("Yes", id="yes-are_sure")),
dbc.Col(dbc.Button("Close", id="close-are_sure")),
],
justify="center",
)
),
],
id="modal-are_sure",
centered=True,
)
test_page = html.Div([
html.H1("DashUI test!"),
html.Br(),
html.H6("Change the value in the text box to see callbacks in action!"),
dbc.Input(id='my-input', value='initial value', type='text', autoFocus=True),
html.Br(),
html.Div(id='my-output'),
are_sure,
])
@validate_login_session
def app_layout():
return \
html.Div([
dcc.Location(id='home-url',pathname='/home'),
dbc.Container(
[
dbc.Row(
dbc.Col(
test_page,
),
justify='center'
),
html.Br(),
dbc.Row(
dbc.Col(
dbc.Button('Logout', id='logout-button', color='danger', block=True, size='sm'),
width=4
),
justify='center'
),
html.Br()
],
)
]
)
# main app layout
app.layout = html.Div(
[
dcc.Location(id='url', refresh=False),
html.Div(
login_layout(),
id='page-content'
),
]
)
###############################################################################
# utilities
###############################################################################
# router
@app.callback(
Output('page-content', 'children'),
[Input('url', 'pathname')]
)
def router(url):
if url=='/home':
return app_layout()
elif url=='/login':
return login_layout()
else:
return login_layout()
# authenticate
@app.callback(
[Output('url', 'pathname'),
Output('login-alert', 'children')],
[Input('login-button', 'n_clicks'),
Input('login-email',' n_submit'),
Input('login-password', '<PASSWORD>'),
],
[State('login-email', 'value'),
State('login-password', 'value')])
def login_auth(n_clicks, n_submit_email, n_submit_password, email ,pw):
'''
check credentials
if correct, authenticate the session
otherwise, authenticate the session and send user to login
'''
if n_clicks is None \
and n_submit_email is None \
and n_submit_password is None:
return no_update, no_update
credentials = {'user':email, "password":pw}
if authenticate_user(credentials):
session['authed'] = True
return '/home', ''
session['authed'] = False
return no_update, dbc.Alert('Incorrect credentials.', color='danger', dismissable=True)
@app.callback(
Output('home-url', 'pathname'),
[Input('yes-are_sure', 'n_clicks')]
)
def logout_(n_clicks):
'''clear the session and send user to login'''
if n_clicks is None:
return no_update
session['authed'] = False
return '/login'
@app.callback(
[Output('modal-are_sure', 'is_open'),
Output('close-are_sure', 'n_clicks')],
[Input('logout-button', 'n_clicks'),
Input('close-are_sure', 'n_clicks')],
[State('modal-are_sure', 'is_open')],
)
def logout_modal(logout_click, close_click, is_open):
if close_click is not None:
return False, None
elif logout_click is not None:
return True, None
else:
return is_open, close_click
###############################################################################
# callbacks
###############################################################################
# @app.callback(
# Output('...'),
# [Input('...')]
# )
# def func(...):
# ...
@app.callback(
Output(component_id='my-output', component_property='children'),
[Input(component_id='my-input', component_property='value')]
)
def update_output_div(input_value):
return f'Output: {input_value}'
###############################################################################
# run app
###############################################################################
if __name__ == "__main__":
ui.run()
| 2.328125 | 2 |
addtodb.py | myxxxsquared/search_concept | 0 | 12796275 | <reponame>myxxxsquared/search_concept
import dbconf
sql = 'INSERT INTO `search_concept` (concept_name) VALUES (?)'
lines = [[line.strip()] for line in open('names.txt', encoding='utf8') if line.strip()]
conn = dbconf.getconn()
cur = conn.cursor()
cur.executemany(sql, lines)
conn.commit()
| 2.46875 | 2 |
main.py | joelwright-dev/PyFaceDetection | 0 | 12796276 | <reponame>joelwright-dev/PyFaceDetection<gh_stars>0
import sys
import cv2
from random import *
#Load pre-trained data on face frontals from opencv (haar cascade algorithm)
trained_data = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
def Webcam():
#Choose an image to detect faces in
webcam = cv2.VideoCapture(0)
while True:
successful_frame_read, frame = webcam.read()
frame = cv2.flip(frame, 1)
grayscaled_img = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
face_coordinates = trained_data.detectMultiScale(grayscaled_img)
# Draw rectangle around faces
i=0
for face in face_coordinates:
(x, y, w, h) = face_coordinates[i]
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 3)
cv2.putText(frame, 'Detected face ' + str(i+1), (x, y-20), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2, cv2.LINE_AA)
i += 1
cv2.imshow('opencv Face Detection', frame)
key = cv2.waitKey(1)
if key==81 or key==113:
break
webcam.release()
def Image(img):
img = cv2.imread(img)
#Convert image to grayscale
grayscaled_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
#Detect where the face is
face_coordinates = trained_data.detectMultiScale(grayscaled_img)
# Draw rectangle around faces
i=0
for face in face_coordinates:
(x, y, w, h) = face_coordinates[i]
cv2.rectangle(img, (x, y), (x+w, y+h), (randrange(50,256), randrange(50,256), randrange(50,256)), 3)
cv2.putText(img, 'Detected face ' + str(i+1), (x, y-20), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2, cv2.LINE_AA)
i += 1
#Show the opencv image
cv2.imshow('opencv Face Detector', img)
#Wait until a key is pressed
cv2.waitKey()
if len(sys.argv) > 1:
if sys.argv[1] == "--image":
image = sys.argv[2]
Image(image)
elif sys.argv[1] == "--webcam":
Webcam()
else:
print("Must use --image [path to image] or --webcam")
else:
print("Must use --image [path to image] or --webcam")
print("---END---") | 3 | 3 |
{{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/__init__.py | ALBA-Synchrotron/cookiecutter-albalib | 0 | 12796277 | <reponame>ALBA-Synchrotron/cookiecutter-albalib
{% set klass = cookiecutter.project_slug.capitalize() -%}
{% set is_open_source = cookiecutter.open_source_license != 'Not open source' -%}
# -*- coding: utf-8 -*-
#
# This file is part of the {{ cookiecutter.project_name }} project
#
# Copyright (c) {% now 'local', '%Y' %} {{ cookiecutter.full_name }}
{% if is_open_source -%}
# Distributed under the {{ cookiecutter.open_source_license }}. See LICENSE for more info.
{% endif %}
"""Top-level package for {{ cookiecutter.project_name }}."""
__author__ = """{{ cookiecutter.full_name }}"""
__email__ = '{{ cookiecutter.email }}'
__version__ = '{{ cookiecutter.version }}'
from .core import {{ klass }}
| 1.648438 | 2 |
MGTU-final-stage/3.py | webkadiz/olympiad-problems | 0 | 12796278 | <filename>MGTU-final-stage/3.py
def to_dec_num(hex_str):
return int(hex_str, 16)
def to_hex_str(dec_num):
return hex(dec_num).upper()[2:]
def diff_mul(mul1, mul2):
# Возвращает индекс ошибки записи в произведении, если такая ошибка одна, иначе -1
len_mul1 = len(mul1)
len_mul2 = len(mul2)
cnt_wrongs = 0
idx_wrong = -1
if len_mul1 != len_mul2:
return -1
for i in range(len_mul1):
if mul1[i] != mul2[i]:
cnt_wrongs += 1
idx_wrong = i
if cnt_wrongs >= 2: return -1
return idx_wrong
def define_rights_numbers(dec_num1, dec_num2, dec_mul):
div1 = dec_mul / dec_num2 # потенциально правильное число под номером 1
div2 = dec_mul / dec_num1 # потенциально правильное число под номером 2
if int(div1) == div1:
return to_hex_str(int(div1)), num2
elif int(div2) == div2:
return num1, to_hex_str(int(div2))
else:
assert 0
num1 = input()
num2 = input()
mul = input()
dec_num1 = to_dec_num(num1)
dec_num2 = to_dec_num(num2)
dec_mul = to_dec_num(mul)
if dec_mul == 0 and dec_num1 * dec_num2 != 0: # крайний случай
if len(num1) != 1:
num1 = 0
else:
num2 = 0
print(num1)
print(num2)
print(mul)
exit()
hex_mul_right = to_hex_str(dec_num1 * dec_num2) # правильное произведение
idx_wrong = diff_mul(mul, hex_mul_right)
if ~idx_wrong:
print(num1)
print(num2)
print(hex_mul_right)
else: # mul и dec_mul правильные произведения
# только одно число изменится
new_num1, new_num2 = define_rights_numbers(dec_num1, dec_num2, dec_mul)
print(new_num1)
print(new_num2)
print(mul)
| 3.078125 | 3 |
tests/data/tupleassign.py | alexchamberlain/black | 2 | 12796279 | <reponame>alexchamberlain/black
sdfjklsdfsjldkflkjsf, sdfjsdfjlksdljkfsdlkf, sdfsdjfklsdfjlksdljkf, sdsfsdfjskdflsfsdf = 1, 2, 3
# output
(
sdfjklsdfsjldkflkjsf,
sdfjsdfjlksdljkfsdlkf,
sdfsdjfklsdfjlksdljkf,
sdsfsdfjskdflsfsdf,
) = (1, 2, 3)
| 1.429688 | 1 |
bonked/__init__.py | brl0/bonked | 1 | 12796280 | <filename>bonked/__init__.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Top-level package for bonked."""
__author__ = "<NAME>"
__email__ = "<EMAIL>"
__license__ = "MIT"
__version__ = "0.1.2dev"
| 1.0625 | 1 |
ml/radomforest.py | wang1365/algorithm-practice | 0 | 12796281 | from random import shuffle
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.datasets import load_iris
import numpy as np
iris = load_iris()
print(type(iris), len(iris.data))
def test1():
XY = np.array(zip(iris.data, iris.target))
np.random.shuffle(XY)
X, Y = XY[:, :1][:100], XY[:, 1:][:100]
X_test, Y_test = XY[:, :1][100:], XY[:, 1:][100:]
X.shape, Y.shape = -1, -1
X_test.shape, Y_test.shape = -1, -1
X = [list(i) for i in X]
X_test = [list(i) for i in X_test]
print('X:', X)
print('Y:', Y)
# Train model
rf = RandomForestRegressor()
rf.fit(X, Y)
# Predict new sample
Y_pre = rf.predict(X_test)
print('Y_test:', Y_test)
print('Y_pre:', Y_pre)
def test2():
from sklearn.cross_validation import cross_val_score, ShuffleSplit
X, Y, names = iris.data, iris.target, iris['feature_names']
rf = RandomForestRegressor()
scores = []
for i in range(X.shape[1]):
score = cross_val_score(rf, X[:, i:i + 1], Y,
scoring='r2',
cv=ShuffleSplit(len(X), 3, .3))
scores.append((round(np.mean(score), 3), names[i]))
print(sorted(scores, reverse=True))
if __name__ == '__main__':
test1()
test2()
| 2.84375 | 3 |
ProyectoHuevos/apps/carrito/apps.py | EstefaniaLaverde/ProyectoHuevos | 0 | 12796282 | <filename>ProyectoHuevos/apps/carrito/apps.py
from django.apps import AppConfig
class CarritoConfig(AppConfig):
name = 'carrito'
| 1.25 | 1 |
modular_exponentiation.py | KnugiHK/SingleScript | 0 | 12796283 | def fast_expo(b, e, n):
seq = bin(e)[3:]
result = b
for element in seq:
result = pow(result, 2)
if element == "1":
result *= b
result %= n
return result
result = fast_expo(
123456789123456789,
123456789123456789123456789123456789,
123456789123456789123456789123456789123456789
)
print(
result == pow(
123456789123456789,
123456789123456789123456789123456789,
123456789123456789123456789123456789123456789
)
)
print(result)
| 3.15625 | 3 |
utils/data_parser.py | Jingil-Integrated-Management/JIM_backend | 0 | 12796284 | <filename>utils/data_parser.py
from datetime import date
from openpyxl import load_workbook
from apps.client.models import Client
from apps.division.models import Division
from apps.drawing.models import Drawing
from apps.part.models import Part, Material, OutSource
def _get(data, index):
try:
if data[index].value != 0:
return data[index].value
else:
return None
except IndexError:
return None
def load(path, worksheet):
wb = load_workbook(path, read_only=True)
ws = wb[worksheet]
return ws
def parse():
tmp_drawing = 'SW-'
CNT = 0 # Just for temporary usage
worksheets = ['상우정밀', '성우금형(제작)']
for ws in worksheets:
data = load('utils/data.xlsx', ws)
first_row = False
for row in data.rows:
if not first_row:
first_row = True
continue
x = str(_get(row, 0))
y = str(_get(row, 1))
z = str(_get(row, 2))
material = _get(row, 3)
price = _get(row, 4)
main_division = _get(row, 5)
sub_division = _get(row, 6)
drawing = _get(row, 7)
client = _get(row, 8)
material_price = _get(row, 9)
material_client = _get(row, 10)
milling_price = _get(row, 11)
milling_client = _get(row, 12)
heat_treat_price = _get(row, 13)
heat_treat_client = _get(row, 14)
wire_price = _get(row, 15)
wire_client = _get(row, 16)
try:
main_division = int(main_division)
except:
pass
try:
sub_division = int(sub_division)
except:
pass
if not main_division or main_division == ' ':
continue
if x:
print('{},{},{} - {}W {}'.format(x,
y, z, price, drawing))
client_obj, _ = Client.objects.get_or_create(
name=client
)
div_obj, _ = Division.objects.get_or_create(
main_division=main_division,
sub_division=sub_division,
client=client_obj
)
is_os = False
if material_price or milling_price or \
heat_treat_price or wire_price:
is_os = True
if drawing:
drawing, _ = Drawing.objects.get_or_create(
name=drawing,
client=client_obj,
is_closed=True,
created_at=str(date.today()),
is_outsource=is_os
)
else:
drawing = Drawing.objects.create(
name=tmp_drawing + '%05d' % CNT,
client=client_obj,
is_closed=True,
created_at=str(date.today()),
is_outsource=is_os
)
CNT += 1
material_client_obj = Client.objects.get(
name=material_client) if material_price else None
milling_client_obj = Client.objects.get(
name=milling_client) if milling_price else None
heat_treat_client_obj = Client.objects.get(
name=heat_treat_client) if heat_treat_price else None
wire_client_obj = Client.objects.get(
name=wire_client) if wire_price else None
outsource = None
if is_os:
outsource = OutSource.objects.create(
material_price=int(
material_price) if material_price else None,
milling_price=int(
milling_price) if milling_price else None,
heat_treat_price=int(
heat_treat_price) if heat_treat_price else None,
wire_price=int(
wire_price) if wire_price else None,
material_client=material_client_obj,
milling_client=milling_client_obj,
heat_treat_client=heat_treat_client_obj,
wire_client=wire_client_obj
)
material_obj, _ = Material.objects.get_or_create(
name=material
)
Part.objects.create(
drawing=drawing,
division=div_obj,
x=x, y=y, z=z,
price=int(price),
material=material_obj,
outsource=outsource
)
| 2.28125 | 2 |
soccermetrics/rest/resources/personnel.py | soccermetrics/soccermetrics-client-py | 43 | 12796285 | from soccermetrics.rest.resources import Resource
class Personnel(Resource):
"""
Represents a Personnel REST resource (/personnel/<resource> endpoint).
The Personnel resources let you access biographic and demographic
data on the following personnel involved in a football match:
* Players,
* Managers,
* Match referees.
Derived from :class:`Resource`.
"""
def __init__(self, resource, base_uri, auth):
"""
Constructor of Personnel class.
:param resource: Name of resource.
:type resource: string
:param base_uri: Base URI of API.
:type base_uri: string
:param auth: Authentication credential.
:type auth: tuple
"""
super(Personnel, self).__init__(base_uri,auth)
self.endpoint += "/personnel/%s" % resource | 3.28125 | 3 |
buybacks2/management/commands/buybacks_load_types.py | hullwarning/aa-buybacks | 1 | 12796286 | from django.core.management import call_command
from django.core.management.base import BaseCommand
from ... import __title__
from ...constants import (
EVE_CATEGORY_ID_ASTEROID,
EVE_CATEGORY_ID_CHARGE,
EVE_CATEGORY_ID_COMMODITY,
EVE_CATEGORY_ID_DRONE,
EVE_CATEGORY_ID_FIGHTER,
EVE_CATEGORY_ID_MATERIAL,
EVE_CATEGORY_ID_MODULE,
EVE_CATEGORY_ID_PLANETARY_COMMODITY,
EVE_CATEGORY_ID_SHIP,
EVE_GROUP_ID_HARVESTABLE_CLOUD,
)
class Command(BaseCommand):
help = "Preloads data required for aa-buybacks2 from ESI"
def handle(self, *args, **options):
call_command(
"eveuniverse_load_types",
__title__,
"--category_id",
str(EVE_CATEGORY_ID_MATERIAL),
"--category_id",
str(EVE_CATEGORY_ID_SHIP),
"--category_id",
str(EVE_CATEGORY_ID_MODULE),
"--category_id",
str(EVE_CATEGORY_ID_CHARGE),
"--category_id",
str(EVE_CATEGORY_ID_COMMODITY),
"--category_id",
str(EVE_CATEGORY_ID_DRONE),
"--category_id",
str(EVE_CATEGORY_ID_ASTEROID),
"--category_id",
str(EVE_CATEGORY_ID_PLANETARY_COMMODITY),
"--category_id",
str(EVE_CATEGORY_ID_FIGHTER),
"--group_id",
str(EVE_GROUP_ID_HARVESTABLE_CLOUD),
)
| 1.867188 | 2 |
scripts/examples/OpenMV/32-modbus/modbus_apriltag.py | jiskra/openmv | 1,761 | 12796287 | <gh_stars>1000+
import sensor, image
import time
from pyb import UART
from modbus import ModbusRTU
sensor.reset()
sensor.set_pixformat(sensor.GRAYSCALE)
sensor.set_framesize(sensor.QQVGA) # we run out of memory if the resolution is much bigger...
uart = UART(3,115200, parity=None, stop=2, timeout=1, timeout_char=4)
modbus = ModbusRTU(uart, register_num=9999)
sensor.skip_frames(time = 2000)
clock = time.clock()
while(True):
if modbus.any():
modbus.handle(debug=True)
else:
clock.tick()
img = sensor.snapshot()
tags = img.find_apriltags() # defaults to TAG36H11 without "families".
modbus.clear()
modbus.REGISTER[0] = len(tags)
if tags:
print(tags)
i = 1
for tag in tags:
img.draw_rectangle(tag.rect(), color = 127)
modbus.REGISTER[i] = tag.family()
i += 1
modbus.REGISTER[i] = tag.id()
i += 1
modbus.REGISTER[i] = tag.cx()
i += 1
modbus.REGISTER[i] = tag.cy()
i += 1
#print(modbus.REGISTER[0:15])
#print(clock.fps())
| 2.40625 | 2 |
setup.py | timeoutdigital/cfn-resource-timeout | 4 | 12796288 | # -*- coding:utf-8 -*-
from __future__ import absolute_import
import codecs
from setuptools import setup
with codecs.open('README.rst') as readme_file:
readme = readme_file.read()
with codecs.open('HISTORY.rst') as history_file:
history = history_file.read()
setup(
name='cfn-resource-timeout',
version='1.2.0',
description=(
'Wrapper decorators for building CloudFormation custom resources'
),
long_description=readme + '\n\n' + history,
url='https://github.com/timeoutdigital/cfn-resource-timeout',
author='<NAME>',
author_email='<EMAIL>',
maintainer='<NAME>',
maintainer_email='<EMAIL>',
license='MIT',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
],
keywords='cloudformation aws cloud custom resource amazon',
py_modules=["cfn_resource"],
install_requires=["requests"],
python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*',
package_data={},
data_files=[],
entry_points={},
)
| 1.539063 | 2 |
utils/Logger.py | JiwooKimAR/MWP-solver-with-pretrained-language-model | 5 | 12796289 | <reponame>JiwooKimAR/MWP-solver-with-pretrained-language-model
import os
from time import strftime
import logging
def make_log_dir(log_dir):
"""
Generate directory path to log
:param log_dir:
:return:
"""
if not os.path.exists(log_dir):
os.mkdir(log_dir)
log_dirs = os.listdir(log_dir)
if len(log_dirs) == 0:
idx = 0
else:
idx_list = sorted([int(d.split('_')[0]) for d in log_dirs])
idx = idx_list[-1] + 1
cur_log_dir = '%d_%s' % (idx, strftime('%Y%m%d-%H%M'))
full_log_dir = os.path.join(log_dir, cur_log_dir)
if not os.path.exists(full_log_dir):
os.mkdir(full_log_dir)
return full_log_dir
class Logger:
def __init__(self, log_dir):
log_file_format = "[%(lineno)d]%(asctime)s: %(message)s"
log_console_format = "%(message)s"
# Main logger
self.log_dir = log_dir
self.logger = logging.getLogger(log_dir)
self.logger.setLevel(logging.INFO)
self.logger.propagate = False
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.INFO)
console_handler.setFormatter(logging.Formatter(log_console_format))
file_handler = logging.FileHandler(os.path.join(log_dir, 'experiments.log'), encoding='UTF-8')
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(logging.Formatter(log_file_format))
self.logger.addHandler(console_handler)
self.logger.addHandler(file_handler)
def info(self, msg):
self.logger.info(msg)
def close(self):
for handle in self.logger.handlers[:]:
self.logger.removeHandler(handle)
logging.shutdown()
def setup_logger(log_dir):
log_file_format = "[%(lineno)d]%(asctime)s: %(message)s"
log_console_format = "%(message)s"
# Main logger
logger = logging.getLogger()
logger.setLevel(logging.INFO)
logger.propagate = False
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.INFO)
console_handler.setFormatter(logging.Formatter(log_console_format))
file_handler = logging.FileHandler(os.path.join(log_dir, 'experiments.log'))
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(logging.Formatter(log_file_format))
logger.addHandler(console_handler)
logger.addHandler(file_handler)
return logger
# class logger:
# def __init__(self, log_dir, filename='log.txt'):
# """
# Initialize logger.
#
# :param str log_dir: directory to save log file
# :param filename: log filename
# """
# self.log_dir = log_dir
# self.logger = logging.getLogger('StarRec - ' + filename)
# self.logger.setLevel(logging.INFO)
# self.logger.propagate = False
#
# # File handler
# self.fh = logging.FileHandler(os.path.join(log_dir, filename))
# self.fh.setLevel(logging.DEBUG)
# fh_format = logging.Formatter('%(asctime)s: %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
# self.fh.setFormatter(fh_format)
# self.logger.addHandler(self.fh)
#
# # Console handler
# self.ch = logging.StreamHandler(sys.stdout)
# self.ch.setLevel(logging.INFO)
# ch_format = logging.Formatter('%(message)s')
# self.ch.setFormatter(ch_format)
# self.logger.addHandler(self.ch)
#
# def info(self, msg):
# """
# Log given msg.
#
# :param str msg: string to log
# """
# self.logger.info(msg)
#
# def close(self):
# """
# close file and command line log stream.
#
# """
# self.logger.removeHandler(self.fh)
# self.logger.removeHandler(self.ch)
# logging.shutdown()
| 2.125 | 2 |
ner/data/raw/scripts/ira_seq.py | zhang-informatics/UMLS_iDISK | 0 | 12796290 | import os
import argparse
import datetime
import numpy as np
from glob import glob
from typing import List, Set, Tuple
"""
Author: <NAME> (<EMAIL>)
Computes character-level Cohen's kappa and percentage
agreement for a set of brat annotated files from two
annotators for a sequence labeling task (e.g. NER).
"""
class BratANN(object):
"""
A brat annotation.
>>> ann = "T1\tent 1 4\tcat"
>>> b1 = BratANN("T3", "ent", 1, 4, "cat")
>>> b2 = BratANN.from_string(ann)
>>> b1 == b2
True
>>> b3 = BratANN("T3", "ent", 1, 5, "cat ")
>>> b1 == b3
False
"""
def __init__(self, num: str, label: str, start: int, end: int, text: str):
self.num = num
self.label = label
self.start = int(start)
self.end = int(end)
self.text = text
@classmethod
def from_string(cls, string: str):
(n, l, s, e, t) = string.split(maxsplit=4)
return cls(n, l, int(s), int(e), t)
def __str__(self) -> str:
return f"{self.num}\t{self.label} {self.start} {self.end}\t{self.text}" # noqa
def __repr__(self) -> str:
return f"<ira.BratANN '{self.num}, {self.label}, {self.start}, {self.end}, {self.text}'>" # noqa
def __eq__(self, other) -> bool:
"""
Overrides the default implementation
Two BratANNs are considering equal iff they have the same label,
offset, and text.
Equality does not consider the annotation number, e.g. T1
"""
if isinstance(other, BratANN):
return all([self.label == other.label,
self.start == other.start,
self.end == other.end,
self.text == other.text])
else:
return False
def parse_args():
def usage():
return """ira.py
[--help, Show this help message and exit]
[--test, Test the ira function]
[--docdir, Directory containing the documents that were annotated.
If not specified, looks in indir1.]
--indir1, Directory containing first annotators annotations
--indir2, Directory containing second annotators annotations
--annotation_conf, The brat annotation.conf that was used
for this annotation task
--disagreements, Whether to suppress, print, or log files
in which annotators disagree. Possible values
are "suppress", "print", "log". Default is
"suppress". If "log", writes file names to
"disagreements.log" in the current working
directory.
"""
desc = """Computes Cohen's kappa at the token
level for a sequence labeling task."""
parser = argparse.ArgumentParser(description=desc, usage=usage())
parser.add_argument("--test", action="store_true", default=False,
help="""Test the ira function.""")
args, remainder = parser.parse_known_args()
if args.test is True:
return args
parser = argparse.ArgumentParser(usage=usage())
parser.add_argument("--indir1", type=str, required=True)
parser.add_argument("--indir2", type=str, required=True)
parser.add_argument("--annotation_conf", type=str, required=True)
parser.add_argument("--docdir", type=str, required=False, default=None)
parser.add_argument("--disagreements", type=str,
required=False,
default="suppress",
choices=["suppress", "print", "log"])
args = parser.parse_args(remainder)
args.test = False
return args
def main(indir1: str, indir2: str, ann_conf: str,
docdir: str = None, disagreements: str = "suppress"):
"""
param indir{1,2}: Input directories containing the first and second
annotators .ann files, respectively.
param ann_conf: Path to the annotation.conf file.
param docdir: Directory containing the .txt files which were annotated.
If None, uses indir1.
param disagreements: How disagreements are logged. Possible values are
"suppress", "print" and "log". If "suppress",
do nothing. If "print", prints files that disagree
to the console. If "log", files that disagree
will be written to "disagreements.log" in the current
working directory.
"""
# Read in the documents.
if docdir is not None:
doc_fnames = glob(f"{docdir}/*.txt")
else:
doc_fnames = glob(f"{indir1}/*.txt")
docs = read_docs(doc_fnames)
# Read in the annotations.
basenames = [os.path.splitext(os.path.basename(fn))[0]
for fn in doc_fnames]
ann_fnames1 = [os.path.join(indir1, f"{bn}.ann") for bn in basenames]
ann_fnames2 = [os.path.join(indir2, f"{bn}.ann") for bn in basenames]
anns1 = read_anns(ann_fnames1)
anns2 = read_anns(ann_fnames2)
if not len(docs) == len(anns1) == len(anns2):
raise ValueError("Different numbers of documents and annotations.")
# Read the entity labels.
labels = read_labels(ann_conf)
# Compute inter rater agreement.
kappa, agreement, disagree_idxs = ira(docs, anns1, anns2, labels)
summary(kappa, "Cohen's Kappa")
summary(agreement, "Percentage Agreement")
# Do something with disagreements.
if disagreements == "print":
print("=== Disagreements ===")
for (idx, p_o) in disagree_idxs:
bn = os.path.basename(doc_fnames[idx])
print(f"{bn}: Agreement={p_o:.3f}")
if disagreements == "log":
with open("disagreements.log", 'w') as outF:
outF.write(str(datetime.datetime.now() + '\n'))
for (idx, p_o) in disagree_idxs:
bn = os.path.basename(doc_fnames[idx])
outF.write(f"{bn}: Agreement={p_o:.3f}\n")
def read_docs(fnames: List[str]) -> List[str]:
"""
Reads in the documents.
param fnames: List of paths to .txt files to read.
returns: List of input documents.
"""
all_docs = []
for docfile in fnames:
doc = open(docfile, 'r').read()
all_docs.append(doc)
return all_docs
def read_anns(fnames: List[str]) -> List[List[BratANN]]:
"""
Reads all .ann files and converts their
annotations to BratANN objects.
param fnames: List of paths to .ann files to read.
returns: List of annotations.
"""
all_anns = []
for annfile in fnames:
anns = [BratANN.from_string(a.strip()) for a in open(annfile, 'r')]
all_anns.append(anns)
return all_anns
def read_labels(ann_conf: str) -> Set[str]:
"""
Reads the entity labels from annotation.conf.
param ann_conf: Path to annotation.conf
returns: set of entity labels.
"""
labels = set()
with open(ann_conf, 'r') as infile:
copy = False
for line in infile:
# Skip blank lines and comments.
if not line.strip() or line.strip().startswith('#'):
continue
if line.strip() == "[entities]":
copy = True
elif line.strip() == "[relations]":
copy = False
elif copy is True:
labels.add(line.strip())
return labels
def ira(docs: List[str],
anns1: List[List[BratANN]],
anns2: List[List[BratANN]],
labels: Set[str]) -> Tuple[np.array, np.array, List[Tuple[int, float]]]: # noqa
"""
Computes Cohen's kappa and percentage agreement between two annotators.
param docs: List of documents, output of read_docs().
param anns1: List of first annotators annotations, output of read_anns().
param anns2: List of second annotators annotations, output of read_anns().
param labels: Set of labels annotated, output of read_labels().
returns: Kappa and percentage agreement for each document.
"""
n_docs = len(docs)
p_os = np.zeros(n_docs)
kappas = np.zeros(n_docs)
disagree_idxs_po = []
for i in range(n_docs):
denom = len(docs[i])
v1 = label_vector(docs[i], anns1[i], labels)
v2 = label_vector(docs[i], anns2[i], labels)
# Observed agreement: How often the two annotators actually agreed.
# Equivalent to accuracy.
p_o = np.sum(v1 == v2) / denom
if p_o != 1.0:
disagree_idxs_po.append((i, p_o))
# Expected agreement: How often the two annotators are expected to
# agree. For number of items N, labels k, and the number of times
# rater j predicted label k, n_j_k:
# p_e = (1/N^2) * sum_k (n_1_k * n_2_k)
p_e = (1/denom**2) * np.sum([np.sum(v1 == k) * np.sum(v2 == k)
for k in range(len(labels)+1)])
if p_e == 1:
k = 0.0
else:
k = (p_o - p_e) / (1 - p_e)
p_os[i] = p_o
kappas[i] = k
return (kappas, p_os, disagree_idxs_po)
def label_vector(doc: List[str],
anns: List[List[BratANN]],
labels: Set[str]) -> np.array:
"""
Converts the document into an integer vector. The value
of each element corresponds to the entity type of the
annotation at that character position, with 0 indicating
no annotation. So an annotation task with 3 annotation types
would have a vector of 0s, 1s, 2s, and 3s.
param doc: Document that was annotated.
param anns: Annotations for each document.
param labels: Set of entity labels for this task.
returns: Vector of character level annotations.
"""
v = np.zeros(len(doc)) # For each character
for (i, lab) in enumerate(labels):
i += 1 # 0 is reserved for no label
idxs = [np.arange(a.start, a.end) for a in anns if a.label == lab]
idxs = [j for mask in idxs for j in mask]
v[idxs] = i
return v
def summary(results: np.array, varname: str = None):
"""
Prints summary statistics for the supplied results.
param results: Numeric array of results (e.g. kappas).
param varname: (Optional) Name of the variable being summarized.
"""
if varname is not None:
print(varname)
if len(results) == 1:
print(f"{results[0]:.3f}")
else:
rmean = np.mean(results)
rmax = np.max(results)
rmin = np.min(results)
rstd = np.std(results)
print(f"""Mean: {rmean:.3f} +/-{rstd:.3f}\nRange: ({rmin:.3f}, {rmax:.3f})""") # noqa
def test():
"""
A small example to test ira().
"""
docs = ["The cats sat on the mat"]
ann_strs1 = ["T1\tent 4 8\tcats",
"T2\tent 9 12\tsat",
"T3\tent 20 23\tmat"]
anns1 = [[BratANN.from_string(s) for s in ann_strs1]]
ann_strs2 = ["T1\tent 4 7\tcat", "T2\tent 20 23 mat"]
anns2 = [[BratANN.from_string(s) for s in ann_strs2]]
labels = ["ent"]
kappas, agreements, disagreements = ira(docs, anns1, anns2, labels)
assert(np.isclose(kappas[0], 0.629, atol=1e-03))
assert(np.isclose(agreements[0], 0.826, atol=1e-03))
print("All tests passed.")
if __name__ == "__main__":
args = parse_args()
if args.test is True:
import doctest
doctest.testmod()
test()
else:
main(args.indir1, args.indir2, args.annotation_conf,
docdir=args.docdir, disagreements=args.disagreements)
| 2.96875 | 3 |
att_app/migrations/0001_initial.py | tunir27/django-Attendance | 3 | 12796291 | <gh_stars>1-10
# Generated by Django 2.0.1 on 2018-02-10 08:11
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Student_Attendance',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.CharField(default='00/00/00', help_text='Enter the Date', max_length=15, verbose_name='Date')),
('in_time', models.CharField(default='00:00:00', help_text='Enter the IN Time', max_length=15, verbose_name='IN Time')),
('out_time', models.CharField(blank=True, help_text='Enter the OUT Time', max_length=15, null=True, verbose_name='OUT Time')),
('duration', models.CharField(blank=True, help_text='Enter the Duration', max_length=15, null=True, verbose_name='Duration')),
('status', models.CharField(default='0', help_text='Enter the Status', max_length=1, verbose_name='Student Status')),
('st_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Student_Details',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(help_text='Enter the First-Name', max_length=50, null=True, verbose_name='First Name')),
('last_name', models.CharField(help_text='Enter the Last Name', max_length=50, null=True, verbose_name='Last Name')),
('dob', models.DateField(help_text='Enter Date of Birth', max_length=8, null=True, verbose_name='Date of Birth')),
('address', models.CharField(help_text='Enter the Address', max_length=50, null=True, verbose_name='Address')),
('g_name', models.CharField(help_text='Enter the Student Guardian Name', max_length=50, null=True, verbose_name='Guardian Name')),
('phone', models.CharField(help_text='Enter Guardian Number', max_length=15, null=True, verbose_name='Guardian Phone')),
('st_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Teacher_Details',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(help_text='Enter the First-Name', max_length=50, null=True, verbose_name='First Name')),
('last_name', models.CharField(help_text='Enter the Last Name', max_length=50, null=True, verbose_name='Last Name')),
('dob', models.DateField(help_text='Enter Date of Birth', max_length=8, null=True, verbose_name='Date of Birth')),
('address', models.CharField(help_text='Enter the Address', max_length=50, null=True, verbose_name='Address')),
('phone', models.CharField(help_text='Enter Phone Number', max_length=15, null=True, verbose_name='Phone No')),
('t_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 1.75 | 2 |
keras_based/exchange/core/models/multivariate_lstm.py | TianyuDu/AnnEconForecast | 7 | 12796292 | import datetime
import os
import keras
import numpy as np
import pandas as pd
from base_model import BaseModel
from multivariate_container import MultivariateContainer
from typing import Union
class MultivariateLSTM(BaseModel):
def __init__(
self,
container: MultivariateContainer,
config: bool=None,
create_empty: bool=False) -> None:
"""
Initialization method.
"""
_, self.time_steps, self.num_fea = container.train_X.shape
print(f"MultivariateLSTM Initialized: \
\n\tTime Step: {self.time_steps}\
\n\tFeature: {self.num_fea}")
self.config = config
self.container = container
self.hist = None
if create_empty:
self.core = None
else:
self.core = self._construct_lstm_model(self.config)
self._gen_file_name()
print(
f"\tMultivariateLSTM: Current model will be save to ./saved_models/f{self.file_name}/")
def _construct_lstm_model(
self,
config: dict,
verbose: bool=True
) -> keras.Model:
"""
Construct the Stacked lstm model,
Note: Modify this method to change model configurations.
# TODO: Add arbitray layer support.
"""
print("MultivariateLSTM: Generating LSTM model using Model API.")
input_sequence = keras.layers.Input(
shape=(self.time_steps, self.num_fea),
dtype="float32",
name="input_sequence")
normalization = keras.layers.BatchNormalization()(input_sequence)
lstm = keras.layers.LSTM(
units=config["nn.lstm1"],
return_sequences=False
)(normalization)
dense1 = keras.layers.Dense(
units=config["nn.dense1"],
name="Dense1"
)(lstm)
predictions = keras.layers.Dense(
1,
name="Prediction"
)(dense1)
model = keras.Model(inputs=input_sequence, outputs=predictions)
model.compile(loss="mse", optimizer="adam")
if verbose:
print("\tMultivariateLSTM: LSTM model constructed with configuration: ")
keras.utils.print_summary(model)
return model
def _construct_lstm_sequential(
self,
config: dict,
verbose: bool=True
) -> keras.Sequential:
"""
Construct the Stacked lstm model,
Note: Modify this method to change model configurations.
# TODO: Add arbitray layer support.
"""
print("MultivariateLSTM: Generating LSTM model with Keras Sequential API")
model = keras.Sequential()
model.add(keras.layers.LSTM(
units=config["nn.lstm1"],
input_shape=(self.time_steps, self.num_fea),
return_sequences=True,
name="LSTM1"
))
model.add(
keras.layers.LSTM(
units=config["nn.lstm2"],
name="LSTM2"
))
model.add(
keras.layers.Dense(
units=config["nn.dense1"],
name="Dense1"
))
model.add(
keras.layers.Dense(
units=1,
name="Dense_output"
))
model.compile(loss="mse", optimizer="adam")
if verbose:
print("\tMultivariateLSTM: LSTM model constructed with configuration: ")
keras.utils.print_summary(model)
return model
def update_config(
self,
new_config: dict
) -> None:
"""
Update the neural network configuration, and re-construct, re-compile the core.
"""
# TODO: add check configuration method here.
print("MultivariateLSTM: Updating neural network configuration...")
self.prev_config = self.config
self.config = new_config
self.core = self._construct_lstm_model(self.config, verbose=False)
print("\tDone.")
def fit_model(
self,
epochs: int=10
) -> None:
start_time = datetime.datetime.now()
print("MultivariateLSTM: Start fitting.")
self.hist = self.core.fit(
self.container.train_X,
self.container.train_y,
epochs=epochs,
batch_size=32 if self.config is None else self.config["batch_size"],
validation_split=0.1 if self.config is None else self.config["validation_split"]
)
finish_time = datetime.datetime.now()
time_taken = finish_time - start_time
print(f"\tFitting finished, {epochs} epochs for {str(time_taken)}")
def predict(
self,
X_feed: np.ndarray
) -> np.ndarray:
y_hat = self.core.predict(X_feed, verbose=1)
# y_hat = self.container.scaler_y.inverse_transform(y_hat)
# y_hat returned used to compare with self.container.*_X directly.
return y_hat
def save_model(
self,
file_dir: str=None
) -> None:
if file_dir is None:
# If no file directory specified, use the default one.
file_dir = self.file_name
# Try to create record folder.
try:
folder = f"./saved_models/{file_dir}/"
os.system(f"mkdir {folder}")
print(f"Experiment record directory created: {folder}")
except:
print("Current directory: ")
_ = os.system("pwd")
raise FileNotFoundError(
"Failed to create directory, please create directory ./saved_models/")
# Save model structure to JSON
print("Saving model structure...")
model_json = self.core.to_json()
with open(f"{folder}model_structure.json", "w") as json_file:
json_file.write(model_json)
print("Done.")
# Save model weight to h5
print("Saving model weights...")
self.core.save_weights(f"{folder}model_weights.h5")
print("Done")
# Save model illustration to png file.
print("Saving model visualization...")
try:
keras.utils.plot_model(
self.core,
to_file=f"{folder}model.png",
show_shapes=True,
show_layer_names=True)
except:
print("Model illustration cannot be saved.")
# Save training history (if any)
if self.hist is not None:
hist_loss = np.squeeze(np.array(self.hist.history["loss"]))
hist_val_loss = np.squeeze(np.array(self.hist.history["val_loss"]))
combined = np.stack([hist_loss, hist_val_loss])
combined = np.transpose(combined)
df = pd.DataFrame(combined, dtype=np.float32)
df.columns = ["loss", "val_loss"]
df.to_csv(f"{folder}hist.csv", sep=",")
print(f"Training history is saved to {folder}hist.csv...")
else:
print("No training history found.")
print("Done.")
def load_model(
self,
folder_dir: str
) -> None:
"""
#TODO: doc
"""
if not folder_dir.endswith("/"):
# Assert the correct format, folder_dir should be
folder_dir += "/"
print(f"Load model from folder {folder_dir}")
# construct model from json
print("Reconstruct model from Json file...")
try:
json_file = open(f"{folder_dir}model_structure.json", "r")
except FileNotFoundError:
raise Warning(
f"Json file not found. Expected: {folder_dir}model_structure.json"
)
model_file = json_file.read()
json_file.close()
self.core = keras.models.model_from_json(model_file)
print("Done.")
# load weights from h5
print("Loading model weights...")
try:
self.core.load_weights(
f"{folder_dir}model_weights.h5", by_name=True)
except FileNotFoundError:
raise Warning(
f"h5 file not found. Expected: {folder_dir}model_weights.h5"
)
print("Done.")
self.core.compile(loss="mse", optimizer="adam")
def summarize_training(self):
"""
Summarize training result to string file.
- Loss
- Epochs
- Time taken
"""
raise NotImplementedError
def visualize_training(self):
"""
Visualize the training result:
- Plot training set loss and validation set loss.
"""
# TODO: move visualize training to general methods.
raise NotImplementedError
| 2.734375 | 3 |
dreamerv2/training/__init__.py | baecm/dreamerv2 | 0 | 12796293 | <filename>dreamerv2/training/__init__.py
from .trainer import Trainer
from .config import Config
from .evaluator import Evaluator | 1.109375 | 1 |
dependencies/src/4Suite-XML-1.0.2/Ft/Xml/Xslt/_4xslt.py | aleasims/Peach | 0 | 12796294 | ########################################################################
# $Header: /var/local/cvsroot/4Suite/Ft/Xml/Xslt/_4xslt.py,v 1.50.2.1 2006/10/16 21:52:39 jkloth Exp $
"""
Implementation of '4xslt' command
(functions defined here are used by the Ft.Lib.CommandLine framework)
Copyright 2006 Fourthought, Inc. (USA).
Detailed license and copyright information: http://4suite.org/COPYRIGHT
Project home, documentation, distributions: http://4suite.org/
"""
import re, os, sys, traceback, cPickle, time
from cStringIO import StringIO
from Ft import GetConfigVars
from Ft.Lib import UriException, CloseStream
from Ft.Lib.CommandLine import CommandLineApp, Options, Arguments
from Ft.Lib.CommandLine.CommandLineUtil import SourceArgToInputSource
from Ft.Lib.Uri import OsPathToUri, Absolutize
from Ft.Xml.InputSource import InputSourceFactory, DefaultFactory
from Ft.Xml.XPath import RuntimeException, CompiletimeException
from Ft.Xml.Xslt import Processor, XsltException
from Ft.Xml import SplitQName
g_paramBindingPattern = re.compile(r"([\d\D_\.\-]*:?[\d\D_\.\-]+)=(.*)")
g_prefixBindingPattern = re.compile(r"([\d\D_\.\-]+)=(.+)")
from Ft import MAX_PYTHON_RECURSION_DEPTH
sys.setrecursionlimit(MAX_PYTHON_RECURSION_DEPTH)
class XsltCommandLineApp(CommandLineApp.CommandLineApp):
from Ft.__config__ import \
NAME as project_name, VERSION as project_version, URL as project_url
name = '4xslt'
summary = ('command-line tool for performing XSLT transformations on XML'
'documents')
description = """4XSLT command-line application"""
options = [
Options.Option(
'v', 'validate',
'Validate the input file as it is being parsed'),
Options.Option(
'i', 'ignore',
'Ignore <?xml-stylesheet ...?> instructions'),
Options.Option(
None, 'media=MEDIA',
'Set media to honor in xml-stylesheet PIs'),
Options.Option(
'D', 'define=NAME=VALUE',
'Bind a top-level parameter'),
Options.Option(
'P', 'prefix=PREFIX=NSURI',
'Assign a namespace to a prefix used in a top-level parameter'),
Options.Option(
'I', 'alt-sty-path=PATH',
"Same as --alt-sty-uri but uses OS path"),
Options.Option(
None, 'alt-sty-uri=URI',
"Define an add'l base URI for imports and includes"),
Options.Option(
'o', 'outfile=FILE',
'Direct transformation output to FILE (file will be overwritten'
' if it exists)'),
Options.Option(
'e', 'stacktrace-on-error',
'Display a stack trace when an error occurs'),
Options.Option(
None, 'noxinclude',
'Do not expand XIncludes in source document and stylesheet'),
Options.Option(
None, 'trace',
'Send execution trace output to stderr or file set by'
' --trace-file'),
Options.Option(
None, 'trace-file=FILE',
'Trace file for execution trace output when using --trace'),
Options.ExclusiveOptions([
Options.Option(
None, 'reflex',
'Reflexive transform (use the stylesheet as the source'
' document).'),
Options.Option(
None, 'compile',
'Compile an instant stylesheet. The result is written to'
' stdout, unless -o is used.'),
Options.Option(
None, 'instant',
'The stylesheet is "instant" (compiled). Only one stylesheet'
' can be specified with this option.'),
Options.Option(
None, 'chain',
'Chain the stylesheets (result of transforming with the first'
' is the source document for transforming with the second, and'
' so on). Without this option, each extra stylesheet is'
' imported by the preceding one.'),
]),
Options.Option(
None, 'time',
'Display the elapsed transformation time on stderr'),
Options.Option(
None, 'msg-prefix=STRING',
'Prepend string to xsl:message output'),
Options.Option(
None, 'msg-suffix=STRING',
'Append string to xsl:message output'),
Options.Option(
None, 'no-messages',
'Suppress xsl:message output and warnings'),
]
arguments = [
Arguments.RequiredArgument(
'source-uri',
'The URI of the XML document to transform, or "-" to indicate'
' standard input. If using --reflex, it is also the stylesheet.'
' If using --compile, it is the stylesheet to compile.'),
Arguments.ZeroOrMoreArgument(
'stylesheet-uri',
'The URI(s) of the stylesheet(s) to apply.'),
]
def validate_options(self, options):
if options.has_key('trace'):
outputfile = options.get('outfile')
tracefile = options.get('trace-file')
msg = ''
if not outputfile and not tracefile:
msg = 'When using --trace, you must specify an output' \
' file for the trace info (--trace-file) and/or' \
' for the transformation result (-o or --outfile).'
else:
outputfile_abspath = outputfile and os.path.abspath(outputfile) or None
tracefile_abspath = tracefile and os.path.abspath(tracefile) or None
if outputfile_abspath == tracefile_abspath:
msg = 'The trace and result output destinations must differ.'
for path in (outputfile, outputfile_abspath):
if not path:
pass # we already handled the one case that matters
elif path.endswith(os.sep):
msg = 'The output file %s would be a directory.' % path
elif os.path.isdir(path):
msg = 'The output file %s is a directory.' % path
for path in (tracefile, tracefile_abspath):
if not path:
pass # we already handled the one case that matters
elif path.endswith(os.sep):
msg = 'The trace file %s would be a directory.' % path
elif os.path.isdir(path):
msg = 'The trace file %s is a directory.' % path
if msg:
raise SystemExit('%s\n See "%s -h" for usage info.' % (msg, sys.argv[0]))
return CommandLineApp.CommandLineApp.validate_options(self, options)
def run(self, options, arguments):
# 1st arg ('source-uri') will be the source doc normally, or the
# source doc & stylesheet in the case of --reflex, or the
# 1st stylesheet in the case of --compile. It is never OK to have
# zero args. For --reflex, there must be only one arg. For
# --instant, there must be exactly two. For --chain, three or more.
msg = ''
argslen = len(arguments)
if argslen != 2 and options.has_key('instant'):
msg = 'When using --instant, exactly 1 source doc URI and 1 stylesheet URI are required.'
elif argslen < 3 and options.has_key('chain'):
msg = 'When using --chain, 1 source doc URI and at least 2 stylesheet URIs are required.'
elif argslen > 1:
if options.has_key('reflex'):
msg = 'When using --reflex, only 1 source/stylesheet URI may be given.'
elif arguments.values().count('-') > 1:
msg = 'Standard input may be used for only 1 source document or stylesheet.'
if msg:
raise SystemExit('%s\n See "%s -h" for usage info.' % (msg, sys.argv[0]))
return Run(options, arguments)
def StySourceArgToInputSource(arg, factory, *v_args, **kw_args):
"""
A wrapper for SourceArgToInputSource().
If an InputSource cannot be created from the source argument,
then the argument is resolved against alternative stylesheet base
URIs (if any) until an InputSource is successfully created or the
list of URIs is exhausted.
"""
isrc = None
stylesheetAltUris = None
if kw_args.has_key('stylesheetAltUris'):
stylesheetAltUris = kw_args['stylesheetAltUris']
del kw_args['stylesheetAltUris']
try:
isrc = SourceArgToInputSource(arg, factory, *v_args, **kw_args)
except (OSError, UriException), error:
if stylesheetAltUris:
for alt_uri in stylesheetAltUris:
try:
new_uri = factory.resolver.normalize(arg, alt_uri)
isrc = factory.fromUri(new_uri)
break
except (OSError, UriException):
pass
if not isrc:
raise error
return isrc
def ReportFatalException(e, stacktrace_on_error=False):
"""
Formats various exceptions; raises SystemExit, never returns.
"""
if isinstance(e, XsltException) or \
isinstance(e, RuntimeException) or \
isinstance(e, CompiletimeException):
if stacktrace_on_error:
traceback.print_exc(1000, sys.stderr)
raise SystemExit(''.join([c.encode(sys.getdefaultencoding(), 'ignore')
or "&#%d;" % ord(c) for c in e.message]))
else:
if stacktrace_on_error:
traceback.print_exc(1000, sys.stderr)
msg = ''
else:
exceptiontype = str(sys.exc_type)
if exceptiontype.startswith('exceptions.'):
exceptiontype = exceptiontype[11:]
msg = 'An unexpected error occurred while processing.\n' + \
'The error was: %s: %s\n' % (exceptiontype, str(e)) + \
'Use the -e (--stacktrace-on-error) option for a full stack trace.'
if msg:
raise SystemExit(''.join([c.encode(sys.getdefaultencoding(), 'ignore')
or "&#%d;" % ord(c) for c in msg]))
else:
sys.exit()
def Run(options, args):
stacktrace_on_error = options.has_key('stacktrace-on-error')
# -- Set up output streams (will die if files unwritable) ----------
# (assumes output destinations have been checked for uniqueness)
if options.has_key('compile'):
output_flags = "wb"
else:
output_flags = "w"
out_file = options.has_key('outfile') \
and open(options['outfile'], output_flags) or sys.stdout
trace_file = None
if options.has_key('trace'):
trace_file_name= options.get('trace-file')
if trace_file_name:
trace_file_name = os.path.abspath(trace_file_name)
out_file_name = options.get('outfile')
if out_file_name:
out_file_name = os.path.abspath(out_file_name)
trace_file = options.has_key('trace-file') \
and open(options['trace-file'], 'w') or sys.stderr
# -- Set up XSLT processor (without stylesheets) -------------------
# gather alt base URIs for xsl:include/import resolution
#
# An ordered list of absolute URIs is derived from these sources:
# 1. command-line option(s) alt-sty-uri=../../foo.xml
# 2. command-line option(s) alt-sty-path=C:\foo.xml
# 3. environment variable XSLTINCLUDE=\a\b\foo.xml
alt_sty_uris = options.get('alt-sty-uri', [])
if type(alt_sty_uris) != list:
alt_sty_uris = [alt_sty_uris]
alt_sty_paths = options.get('alt-sty-path', [])
if type(alt_sty_paths) != list:
alt_sty_paths = [alt_sty_paths]
more_sty_uris = [OsPathToUri(path, attemptAbsolute=1) for path in alt_sty_paths]
alt_sty_uris.extend(more_sty_uris)
if os.environ.has_key('XSLTINCLUDE'):
more_sty_uris = [
OsPathToUri(path, attemptAbsolute=1)
for path in os.environ["XSLTINCLUDE"].split(os.pathsep)
]
alt_sty_uris.extend(more_sty_uris)
del more_sty_uris
# tracing requires special setup.
if options.has_key('trace'):
from Ft.Xml.Xslt import ExtendedProcessingElements, StylesheetHandler
processor = ExtendedProcessingElements.ExtendedProcessor(stylesheetAltUris=alt_sty_uris)
processor._4xslt_trace = True
processor._4xslt_traceStream = trace_file
StylesheetHandler._ELEMENT_MAPPING = ExtendedProcessingElements.GetMappings()
else:
processor = Processor.Processor(stylesheetAltUris=alt_sty_uris)
# media prefs affect xsl:stylesheet PI selection
processor.mediaPref = options.get('media', None)
# register extension modules
moduleList = os.environ.get("EXTMODULES")
if moduleList:
processor.registerExtensionModules(moduleList.split(":"))
# set up the source document reader
from Ft.Xml import Domlette
validate_flag = options.has_key('validate')
if validate_flag:
reader = Domlette.ValidatingReader
else:
reader = Domlette.NonvalidatingReader
processor.setDocumentReader(reader)
#Customize message behavior
if options.has_key('no-messages'):
processor.messageControl(1)
else:
processor.messageControl(0)
if options.has_key('msg-prefix'):
prefix = options['msg-prefix']
prefix = prefix.replace('\\n', '\n')
prefix = prefix.replace('\\r', '\r')
prefix = prefix.replace('\\t', '\t')
processor.msgPrefix = prefix
if options.has_key('msg-suffix'):
suffix = options['msg-suffix']
suffix = suffix.replace('\\n', '\n')
suffix = suffix.replace('\\r', '\r')
suffix = suffix.replace('\\t', '\t')
processor.msgSuffix = suffix
# -- Handle compile operation --------------------------------------
if options.has_key('compile'):
xinclude = not options.has_key('noxinclude')
all_source_args = [args['source-uri']] + args['stylesheet-uri']
try:
sty_isrcs = map(lambda arg: StySourceArgToInputSource(arg,
DefaultFactory, processIncludes=xinclude,
stylesheetAltUris=alt_sty_uris),
all_source_args)
for isrc in sty_isrcs:
processor.appendStylesheet(isrc)
CloseStream(isrc, quiet=True)
# use better pickle format in Python 2.3 and up
if hasattr(cPickle, 'HIGHEST_PROTOCOL'):
cPickle.dump(processor.stylesheet.root, out_file, cPickle.HIGHEST_PROTOCOL)
else:
cPickle.dump(processor.stylesheet.root, out_file, 1)
except Exception, e:
ReportFatalException(e, stacktrace_on_error)
CloseStream(out_file, quiet=True)
if out_file is sys.stdout:
dest = 'standard output'
elif hasattr(out_file, 'name'):
dest = out_file.name
elif options.has_key('outfile'):
dest = options['outfile']
else:
dest = 'unknown destination(!)'
sys.stderr.write('Compiled stylesheet written to %s.\n' % dest)
sys.stderr.flush()
return
# -- Prepare for any transform -------------------------------------
# source document will be an InputSource
source_isrc = None
# list of InputSources for stylesheets (only if chaining)
sty_chain = None
# -- Prepare for reflexive transform -------------------------------
if options.has_key('reflex'):
xinclude = not options.has_key('noxinclude')
source_arg = args['source-uri']
try:
isrc = StySourceArgToInputSource(source_arg, DefaultFactory,
processIncludes=xinclude,
stylesheetAltUris=alt_sty_uris)
# We could parse the doc and use processor.appendStylesheetNode(),
# but if there are XSLT errors, the line & column # won't show up
# in the error message. So we will cache the doc in a reusable
# stream in memory.
stream = StringIO(isrc.read())
CloseStream(isrc, quiet=True)
stream.reset()
source_isrc = isrc.clone(stream)
del isrc
processor.appendStylesheet(source_isrc)
source_isrc.reset()
except Exception, e:
ReportFatalException(e, stacktrace_on_error)
# -- Prepare for regular transform ---------------------------------
else:
xinclude = not options.has_key('noxinclude')
instant = options.has_key('instant')
source_arg = args['source-uri']
if instant:
sty_arg = args['stylesheet-uri'][0]
try:
sty_isrc = StySourceArgToInputSource(sty_arg, DefaultFactory,
processIncludes=xinclude,
stylesheetAltUris=alt_sty_uris)
sty_obj = cPickle.load(sty_isrc)
CloseStream(sty_isrc, quiet=True)
processor.appendStylesheetInstance(sty_obj, refUri=sty_isrc.uri)
source_isrc = SourceArgToInputSource(source_arg, DefaultFactory,
processIncludes=xinclude)
except cPickle.UnpicklingError:
ReportFatalException(ValueError(
'%s does not appear to be a compiled stylesheet object.' % sty_arg),
stacktrace_on_error)
except Exception, e:
ReportFatalException(e, stacktrace_on_error)
else:
sty_args = args['stylesheet-uri']
chain = options.has_key('chain')
try:
sty_isrcs = map(lambda arg: StySourceArgToInputSource(arg,
DefaultFactory, processIncludes=xinclude,
stylesheetAltUris=alt_sty_uris),
sty_args)
if chain and len(sty_isrcs) > 1:
sty_chain = sty_isrcs
else:
for isrc in sty_isrcs:
processor.appendStylesheet(isrc)
CloseStream(isrc, quiet=True)
source_isrc = SourceArgToInputSource(source_arg, DefaultFactory,
processIncludes=xinclude)
except Exception, e:
ReportFatalException(e, stacktrace_on_error)
# -- Gather transformation-time options ----------------------------
# top-level params
nsmappings = {}
prefixes = options.get('prefix', [])
if not isinstance(prefixes, list):
prefixes = [prefixes]
for p in prefixes:
match = g_prefixBindingPattern.match(p)
if match is None:
raise TypeError('Error in -P/--prefix arguments')
nsmappings[match.group(1)] = match.group(2)
defs = options.get('define', [])
if not isinstance(defs, list):
defs = [defs]
top_level_params = {}
for d in defs:
match = g_paramBindingPattern.match(d)
if match is None:
raise TypeError('Error in -D/--define arguments')
name = match.group(1)
prefix, local = SplitQName(name)
if prefix in nsmappings:
name = (nsmappings[prefix], local)
top_level_params[name] = match.group(2)
# misc runtime flags
ignore_pis = options.has_key('ignore')
checktime = options.has_key('time')
# -- Do the transformation -----------------------------------------
try:
if source_isrc is None:
raise TypeError('No source document to transform!')
if sty_chain:
resultstream = StringIO()
if checktime:
start = time.time()
i = 0
for sty_isrc in sty_chain[:-1]:
i += 1
# FIXME:
# use RtfWriter to make each result be a Domlette document,
# for speed, and so that the input to each chained stylesheet
# is the previous transformation's result tree (not the
# previous transformation's serialized result tree)
processor.appendStylesheet(sty_isrc)
CloseStream(sty_isrc, quiet=True)
processor.run(source_isrc, ignore_pis,
topLevelParams=top_level_params,
outputStream=resultstream)
processor.reset()
resultstream.reset()
sourcestream = resultstream
new_uri = Absolutize('chained-transformation-result-%d' % i,
source_isrc.uri)
source_isrc = source_isrc.clone(sourcestream, new_uri)
resultstream = StringIO()
processor.appendStylesheet(sty_chain[-1])
processor.run(source_isrc, ignore_pis,
topLevelParams=top_level_params,
outputStream=out_file)
if checktime:
end = time.time()
CloseStream(source_isrc, quiet=True)
else:
if checktime:
start = time.time()
processor.run(source_isrc, ignore_pis,
topLevelParams=top_level_params,
outputStream=out_file)
if checktime:
end = time.time()
CloseStream(source_isrc, quiet=True)
except Exception, e:
ReportFatalException(e, stacktrace_on_error)
# -- Handle post-transformation tasks ------------------------------
try:
if out_file.isatty():
out_file.flush()
sys.stderr.write('\n')
else:
out_file.close()
except (IOError, ValueError):
pass
if checktime:
sys.stderr.write("Transformation time: %dms\n" % (1000 * (end - start)))
return
| 1.96875 | 2 |
apps/bazaar/views/RequestView.py | Nelson-Morais/HA-OOAD | 0 | 12796295 | <filename>apps/bazaar/views/RequestView.py
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
RequestView controls activity belonging to requests
@author <NAME>, <NAME> ,<NAME>
Projekt OOAD Hausarbeit WiSe 2020/21
"""
from django.contrib.auth.decorators import login_required
from django.shortcuts import redirect, render
from django.views import View
from apps.bazaar.adapters.notifications_adapter import add_notification
from apps.bazaar.forms import RequestForm
from apps.bazaar.models import OfferModel, RequestModel
class RequestView(View):
"""Request View Class"""
@login_required(login_url='login')
def create_request(request, offer_id):
"""
saves data from request creation form (offer details view)
:param offer_id: ID of the offer attached to the request
:return: renders a page and sends a dictionary to the view
"""
form = RequestForm(request.POST or None)
if form.is_valid():
req = form.save(commit=False)
req.userowner_id = request.user.id
req.offer_id = offer_id
req.save()
offer = OfferModel.objects.filter(is_deleted=False).get(id=offer_id)
add_notification(request.user.id, "Anfrage verschickt", req.text)
add_notification(offer.userowner_id, "Anfrage erhalten!", req.text)
return redirect('offer_list')
return render(request, "create_request.html", {
'form': form,
'offer_id': offer_id
})
@login_required(login_url='login')
def accept_request(request, offer_id, request_id):
"""
accepts one request from offer, discards all other requests from offer
:param offer_id: ID of the offer attached to the requests
:param request_id: ID of the accepted request
:return: redirects to a page
"""
offer = OfferModel.objects.filter(is_deleted=False).get(id=offer_id)
offer.is_closed = True
offer.save()
requests = RequestModel.objects.filter(is_deleted=False).filter(offer_id=offer_id).order_by('-created_at')
for req in requests:
if req.id == request_id:
req.status = 2
req.save()
add_notification(req.userowner_id, "Anfrage angenommen!", offer.title)
else:
req.status = 3
req.save()
add_notification(req.userowner_id, "Anfrage abgelehnt", offer.title)
return redirect('personal_offer_list')
@login_required(login_url='login')
def delete_request(request, request_id):
"""
deletes the request
:param request_id: ID of the request to be deleted
:return: redirects to a page
"""
req = RequestModel.objects.filter(is_deleted=False).get(id=request_id)
if req.status == 1:
req.is_deleted = True
req.save()
add_notification(request.user.id, "Anfrage gelöscht", req.text)
offer = OfferModel.objects.filter(is_deleted=False).get(id=req.offer_id)
add_notification(offer.userowner_id, "Eingehende Anfrage gelöscht", req.text)
return redirect('personal_request_list')
@login_required(login_url='login')
def personal_request_listing(request):
"""
displays a list of the users requests to othS er offers
:return: renders a page and sends a dictionary to the view
"""
object_list = []
requests = RequestModel.objects.filter(is_deleted=False).filter(userowner_id=request.user.id).order_by(
'-created_at')
for req in requests:
offer = OfferModel.objects.get(id=req.offer_id)
if not offer.is_deleted:
object_list.append({
'request': req,
'offer': offer,
})
return render(request, "my_request_list.html", {
'objects': object_list
})
| 1.570313 | 2 |
App1/admin.py | surajjare7/community | 0 | 12796296 | from django.contrib import admin
from .models import Post
from . models import Query
from .models import Solution
# Register your models here.
admin.site.register(Post)
admin.site.register(Query)
# admin.site.register(Services)
# admin.site.register(Contact)
admin.site.register(Solution) | 1.40625 | 1 |
aws_managers/athena/functions/window.py | vahndi/aws-managers | 0 | 12796297 | <filename>aws_managers/athena/functions/window.py
"""
https://prestodb.io/docs/current/functions/window.html
"""
from typing import Optional, List, Union
"""
https://prestodb.io/docs/current/functions/window.html#value-functions
"""
class LagMixin(object):
name: str
def lag(
self,
offset: int = 1,
partition_by: Optional[Union[str, List[str]]] = None,
order_by: Optional[Union[str, List[str]]] = None,
as_: Optional[str] = None
) -> str:
"""
Returns the value at the specified offset from beginning the window.
Offsets start at 1. The offset can be any scalar expression.
If the offset is null or greater than the number of values in the
window, null is returned.
It is an error for the offset to be zero or negative.
"""
str_out = f'lag({self.name}, {offset})'
if partition_by is not None or order_by is not None:
str_out += ' over ('
if partition_by is not None:
if not isinstance(partition_by, list):
partition_by = [partition_by]
str_out += 'partition by ' + ', '.join(map(str, partition_by))
if partition_by is not None and order_by is not None:
str_out += ' '
if order_by is not None:
if not isinstance(order_by, list):
order_by = [order_by]
str_out += 'order by ' + ', '.join(map(str, order_by))
if partition_by is not None or order_by is not None:
str_out += ')'
if as_ is None:
as_ = f'{self.name}__lag_{offset}'
str_out += f' as {as_}'
return str_out
| 2.96875 | 3 |
code/board.py | eulerlab/spectral-scanner | 1 | 12796298 | # ----------------------------------------------------------------------------
# board.py
# Pin definitions
#
# The MIT License (MIT)
# Copyright (c) 2020 <NAME>
# 2020-11-21, v1
# ----------------------------------------------------------------------------
from micropython import const
# Spectrometer (CM12880MA)
TRG = const(14)
STA = const(15)
CLK = const(21)
VID = const(36)
# I2C for compass etc.
SDA = const(23)
SCL = const(22)
I2C_FRQ = const(400000)
# Serial for extensions
TX = const(17)
RX = const(16)
# Servos
SERVO_PAN = const(27)
PAN_RANGE_US = [1010, 1931]
PAN_RANGE_DEG = [-45, 45]
SERVO_TLT = const(33)
TLT_RANGE_US = [1033, 1916]
TLT_RANGE_DEG = [-45, 45]
# NeoPixel
NEOPIX = const(32)
# ----------------------------------------------------------------------------
| 1.984375 | 2 |
wsgi.py | anhbaysgalan1/Docker-nginx-flask-auto-ssl | 3 | 12796299 | from application import app
# Starts the application
if __name__ == "__main__":
app.run() | 1.6875 | 2 |
django_leek/models.py | Volumental/django-leek | 1 | 12796300 | <reponame>Volumental/django-leek<gh_stars>1-10
import base64
import pickle
from typing import Any
from django.db import models
class Task(models.Model):
pickled_task = models.BinaryField(max_length=4096)
pool = models.CharField(max_length=256, null=True)
queued_at = models.DateTimeField(auto_now_add=True)
started_at = models.DateTimeField(null=True)
finished_at = models.DateTimeField(null=True)
pickled_exception = models.BinaryField(max_length=2048, null=True)
pickled_return = models.BinaryField(max_length=4096, null=True)
@property
def exception(self):
if self.pickled_exception is None:
return None
return pickle.loads(base64.b64decode(self.pickled_exception))
@property
def return_value(self):
if self.pickled_return is None:
return None
return pickle.loads(base64.b64decode(self.pickled_return))
def started(self) -> bool:
return self.started_at is not None
def finished(self) -> bool:
return self.finished_at is not None
def successful(self) -> bool:
return self.finished() and self.pickled_return is not None
| 2.25 | 2 |
Python_scripts/PyevolveEx1.py | janaobsteter/Genotype_CODES | 1 | 12796301 | <reponame>janaobsteter/Genotype_CODES
from pyevolve import G1DList
from pyevolve import GSimpleGA
from pyevolve import Selectors
from pyevolve import Statistics
from pyevolve import DBAdapters
import pyevolve
# This function is the evaluation function, we want
# to give high score to more zero'ed chromosomes
def eval_func(chromosome):
score = 0.0
# iterate over the chromosome
# score = len(filter(lambda x: x==0, chromosome.genomeList))
for value in chromosome:
if value==0:
score += 1
return score
# Enable the pyevolve logging system
pyevolve.logEnable()
# Genome instance, 1D List of 50 elements
genome = G1DList.G1DList(10)
# Sets the range max and min of the 1D List
genome.setParams(rangemin=0, rangemax=30)
# The evaluator function (evaluation function)
genome.evaluator.set(eval_func)
# Genetic Algorithm Instance
ga = GSimpleGA.GSimpleGA(genome)
# Set the Roulette Wheel selector method, the number of generations and
# the termination criteria
ga.selector.set(Selectors.GRouletteWheel)
ga.setGenerations(100)
ga.terminationCriteria.set(GSimpleGA.ConvergenceCriteria)
# Sets the DB Adapter, the resetDB flag will make the Adapter recreate
# the database and erase all data every run, you should use this flag
# just in the first time, after the pyevolve.db was created, you can
# omit it.
sqlite_adapter = DBAdapters.DBSQLite(identify="ex1", resetDB=True)
ga.setDBAdapter(sqlite_adapter)
# Do the evolution, with stats dump
# frequency of 20 generations
ga.evolve(freq_stats=20)
# Best individual
print ga.bestIndividual() | 2.84375 | 3 |
LeetCode/queue-stack/1-design-circular-queue.py | safiulanik/problem-solving | 0 | 12796302 | """
URL: https://leetcode.com/explore/learn/card/queue-stack/228/first-in-first-out-data-structure/1337/
Problem Statement:
------------------
Design your implementation of the circular queue. The circular queue is a linear data structure in which the operations are performed based on FIFO (First In First Out) principle and the last position is connected back to the first position to make a circle. It is also called "Ring Buffer".
One of the benefits of the circular queue is that we can make use of the spaces in front of the queue. In a normal queue, once the queue becomes full, we cannot insert the next element even if there is a space in front of the queue. But using the circular queue, we can use the space to store new values.
Implementation the MyCircularQueue class:
- MyCircularQueue(k) Initializes the object with the size of the queue to be k.
- int Front() Gets the front item from the queue. If the queue is empty, return -1.
- int Rear() Gets the last item from the queue. If the queue is empty, return -1.
- boolean enQueue(int value) Inserts an element into the circular queue. Return true if the operation is successful.
- boolean deQueue() Deletes an element from the circular queue. Return true if the operation is successful.
- boolean isEmpty() Checks whether the circular queue is empty or not.
- boolean isFull() Checks whether the circular queue is full or not.
- You must solve the problem without using the built-in queue data structure in your programming language.
Example 1:
Input:
["MyCircularQueue", "enQueue", "enQueue", "enQueue", "enQueue", "Rear", "isFull", "deQueue", "enQueue", "Rear"]
[[3], [1], [2], [3], [4], [], [], [], [4], []]
Output:
[null, true, true, true, false, 3, true, true, true, 4]
Explanation
- MyCircularQueue myCircularQueue = new MyCircularQueue(3);
- myCircularQueue.enQueue(1); // return True
- myCircularQueue.enQueue(2); // return True
- myCircularQueue.enQueue(3); // return True
- myCircularQueue.enQueue(4); // return False
- myCircularQueue.Rear(); // return 3
- myCircularQueue.isFull(); // return True
- myCircularQueue.deQueue(); // return True
- myCircularQueue.enQueue(4); // return True
- myCircularQueue.Rear(); // return 4
Constraints:
1 <= k <= 1000
0 <= value <= 1000
At most 3000 calls will be made to enQueue, deQueue, Front, Rear, isEmpty, and isFull.
"""
from typing import List
class MyCircularQueue:
def __init__(self, k: int):
self.arr = [None] * k
self.front = -1
self.rear = -1
self.size = k
def enQueue(self, value: int) -> bool:
if self.isFull():
return False
if self.isEmpty():
self.front = 0
self.rear = (self.rear + 1) % self.size
self.arr[self.rear] = value
return True
def deQueue(self) -> bool:
if self.isEmpty():
return False
if self.front == self.rear:
self.front = self.rear = -1
else:
self.front = (self.front + 1) % self.size
return True
def Front(self) -> int:
if self.isEmpty():
return -1
return self.arr[self.front]
def Rear(self) -> int:
if self.isEmpty():
return -1
return self.arr[self.rear]
def isEmpty(self) -> bool:
return self.front == -1
def isFull(self) -> bool:
return (self.rear + 1) % self.size == self.front
# Your MyCircularQueue object will be instantiated and called as such:
obj = MyCircularQueue(3)
assert obj.enQueue(1) is True
assert obj.enQueue(2) is True
assert obj.enQueue(3) is True
assert obj.enQueue(4) is False
assert obj.Rear() == 3
assert obj.isFull() is True
assert obj.deQueue() is True
assert obj.enQueue(4) is True
assert obj.Rear() == 4
| 3.515625 | 4 |
oscar_mws/admin.py | ButchershopCreative/django-oscar-mws | 12 | 12796303 | <reponame>ButchershopCreative/django-oscar-mws<gh_stars>10-100
from django.contrib import admin
from django.db.models import get_model
admin.site.register(get_model("oscar_mws", "FeedSubmission"))
admin.site.register(get_model("oscar_mws", "FeedReport"))
admin.site.register(get_model("oscar_mws", "FeedResult"))
admin.site.register(get_model("oscar_mws", "AmazonProfile"))
admin.site.register(get_model("oscar_mws", "ShipmentPackage"))
admin.site.register(get_model("oscar_mws", "FulfillmentOrder"))
admin.site.register(get_model("oscar_mws", "FulfillmentOrderLine"))
admin.site.register(get_model("oscar_mws", "FulfillmentShipment"))
| 1.609375 | 2 |
src/analyzer/preprocessor.py | diegowendel/TSAP | 1 | 12796304 | import re
import nltk
from string import punctuation
from nltk.tokenize import TweetTokenizer
from nltk.corpus import stopwords
#nltk.download('rslp')
#nltk.download('stopwords')
#nltk.download('punkt')
class PreProcessor(object):
stemmer = nltk.stem.RSLPStemmer()
tokenizer = TweetTokenizer(reduce_len=True, preserve_case=False)
special_char = ['$', '%', '&', '*', '(', ')', '_', '-', '+', '=', '{', '[', '}', ']', '~', '.', ',', ';', 'º', 'ª', '°', '¹', '²', '³']
# UniLex: Método Léxico para Análise de Sentimentos Textuais sobre Conteúdo de Tweets em Português Brasileiro*
stoplist_uniLex = ['a', 'agora', 'ainda', 'alguem', 'algum', 'alguma', 'algumas', 'alguns', 'ampla', 'amplas', 'amplo', 'amplos',
'ante', 'antes', 'ao', 'aos', 'apos', 'aquela', 'aquelas', 'aquele', 'aqueles', 'aquilo', 'as', 'ate', 'atraves',
'cada', 'coisa', 'coisas', 'com', 'como', 'contra', 'contudo', 'da', 'daquele', 'daqueles', 'das', 'de', 'dela',
'delas', 'dele', 'deles', 'depois', 'dessa', 'dessas', 'desse', 'desses', 'desta', 'destas', 'deste', 'deste',
'destes', 'deve', 'devem', 'devendo', 'dever', 'devera', 'deverao', 'deveria', 'deveriam', 'devia', 'deviam',
'disse', 'disso', 'disto', 'dito', 'diz', 'dizem', 'do', 'dos', 'e', 'ela', 'elas', 'ele', 'eles', 'em',
'enquanto', 'entre', 'era', 'essa', 'essas', 'esse', 'esses', 'esta', 'estamos', 'estao', 'estas', 'estava',
'estavam', 'estavamos', 'este', 'estes', 'estou', 'eu', 'fazendo', 'fazer', 'feita', 'feitas', 'feito', 'feitos',
'foi', 'for', 'foram', 'fosse', 'fossem', 'grande', 'grandes', 'ha', 'isso', 'isto', 'ja', 'la', 'lhe', 'lhes',
'lo', 'mas', 'me', 'mesma', 'mesmas', 'mesmo', 'mesmos', 'meu', 'meus', 'minha', 'minhas', 'muita', 'muitas',
'muito', 'muitos', 'na', 'nao', 'nas', 'nem', 'nenhum', 'nessa', 'nessas', 'nesta', 'nestas', 'ninguem', 'no',
'nos', 'nossa', 'nossas', 'nosso', 'nossos', 'num', 'numa', 'nunca', 'o', 'os', 'ou', 'outra', 'outras', 'outro',
'outros', 'para', 'pela', 'pelas', 'pelo', 'pelos', 'pequena', 'pequenas', 'pequeno', 'pequenos', 'per', 'perante',
'pode', 'podendo', 'poder', 'poderia', 'poderiam', 'podia', 'podiam', 'pois', 'por', 'porem', 'porque', 'posso',
'pouca', 'poucas', 'pouco', 'poucos', 'primeiro', 'primeiros', 'propria', 'proprias', 'proprio', 'proprios',
'quais', 'qual', 'quando', 'quanto', 'quantos', 'que', 'quem', 'sao', 'se', 'seja', 'sejam', 'sem', 'sempre',
'sendo', 'sera', 'serao', 'seu', 'seus', 'si', 'sido', 'so', 'sob', 'sobre', 'sua', 'suas', 'talvez', 'tambem',
'tampouco', 'te', 'tem', 'tendo', 'tenha', 'ter', 'teu', 'teus', 'ti', 'tido', 'tinha', 'tinham', 'toda', 'todas',
'todavia', 'todo', 'todos', 'tu', 'tua', 'tuas', 'tudo', 'ultima', 'ultimas', 'ultimo', 'ultimos', 'um', 'uma',
'umas', 'uns', 'vendo', 'ver', 'vez', 'vindo', 'vir', 'vos', 'vos']
# Stopwords do nltk + stopwords do UniLex
stoplist = sorted(set(stoplist_uniLex + stopwords.words('portuguese')))
def process(self, tweet):
tweet = self.to_lower(tweet)
tweet = self.remove_links(tweet)
tweet = self.remove_mentions(tweet)
tweet = self.remove_hashtags(tweet)
tweet = self.remove_numbers(tweet)
tweet = self.replace_three_or_more(tweet)
palavras = self.tokenizer.tokenize(tweet)
palavras = self.remove_punctuation(palavras)
palavras = self.remove_stopwords(palavras)
palavras_processadas = []
for palavra in palavras:
# Replace emoji
if len(palavra) <= 3:
# replace good emoticons
palavra = re.sub('[:;=8][\-=^*\']?[)\]Dpb}]|[cCqd{(\[][\-=^*\']?[:;=8]', 'bom', palavra)
# replace bad emoticons
palavra = re.sub('[:;=8][\-=^*\']?[(\[<{cC]|[D>)\]}][\-=^*\']?[:;=8]', 'ruim', palavra)
# Stemming
# palavra = self.stemmer.stem(palavra)
# Remove small words
if len(palavra) <= 2:
palavra = ''
for s in self.special_char:
palavra = palavra.replace(s, '')
palavras_processadas.append(palavra)
tweet = ' '.join(palavras_processadas)
tweet = self.remove_duplicated_spaces(tweet)
return tweet
def to_lower(self, tweet):
return tweet.lower()
def remove_links(self, tweet):
# http matches literal characters and \S+ matches all non-whitespace characters (the end of the url)
return re.sub("http\S+", "", tweet)
def remove_mentions(self, tweet):
return re.sub("@\S+", "", tweet)
def remove_hashtags(self, tweet):
return re.sub("#", "", tweet)
def remove_numbers(self, tweet):
return re.sub("\d+", "", tweet)
def replace_three_or_more(self, tweet):
# pattern to look for three or more repetitions of any character, including newlines
pattern = re.compile(r"(.)\1{2,}", re.DOTALL)
return pattern.sub(r"\1\1", tweet)
def remove_duplicated_spaces(self, tweet):
tweet = tweet.strip() # Remove spaces before and after string
return re.sub(" +", " ", tweet)
def remove_stopwords(self, palavras):
return [palavra for palavra in palavras if palavra not in self.stoplist]
def remove_punctuation(self, palavras):
return [palavra for palavra in palavras if palavra not in list(punctuation)]
| 3.109375 | 3 |
qkoubot/validators/database_validate.py | pddg/qkouserver | 0 | 12796305 | import os
from static import SQLITE_DIR_PATH, USE_MYSQL, MYSQL_USERNAME, MYSQL_PASSWORD, MYSQL_HOST, MYSQL_DATABASE_NAME
def db_path_validate():
assert os.path.exists(SQLITE_DIR_PATH), "{path} is not exists.".format(path=SQLITE_DIR_PATH)
if USE_MYSQL:
assert MYSQL_USERNAME is not None, "MYSQL_USERNAME is not given."
assert MYSQL_PASSWORD is not None, "MYSQL_PASSWORD is not given."
assert MYSQL_HOST is not None, "MYSQL_HOST is not given."
assert MYSQL_DATABASE_NAME is not None, "MYSQL_DATABASE_NAME is not given."
| 3.046875 | 3 |
data/audio/audio_downloader.py | vuthede/speech_separation | 0 | 12796306 | # Before running, make sure avspeech_train.csv and avspeech_test.csv are in catalog.
# if not, see the requirement.txt
# download and preprocess the data from AVspeech dataset
import sys
sys.path.append("../lib")
import AVHandler as avh
import pandas as pd
import multiprocessing
from multiprocessing import Process
def m_link(youtube_id):
# return the youtube actual link
link = 'https://www.youtube.com/watch?v='+youtube_id
return link
def m_audio(loc,name,cat,start_idx,end_idx):
# make concatenated audio following by the catalog from AVSpeech
# loc | the location for file to store
# name | name for the wav mix file
# cat | the catalog with audio link and time
# start_idx | the starting index of the audio to download and concatenate
# end_idx | the ending index of the audio to download and concatenate
for i in range(start_idx,end_idx):
#f_name = name+str(i)
f_name = f'{name}_{cat.loc[i, "link"]}_{i}' # auio_train_id_indexofaudio
link = m_link(cat.loc[i,'link'])
start_time = cat.loc[i,'start_time']
end_time = start_time + 3.0
avh.download(loc,f_name,link)
avh.cut(loc,f_name,start_time,end_time)
cat_train = pd.read_csv('catalog/avspeech_train.csv')
cat_train.columns = ['link', 'start_time', 'end_time', 'x','y']
#cat_test = pd.read_csv('catalog/avspeech_test.csv')
# create 80000-90000 audios data from 290K
avh.mkdir('audio_train')
# Multiprocess
processes = []
n_process = 10
sample_per_process = 2000
for i in range(n_process):
proc = Process(target=m_audio, kwargs={'loc':'audio_train', 'name': 'audio_train','cat':cat_train, 'start_idx':i*sample_per_process, 'end_idx':(i+1)*sample_per_process})
processes.append(proc)
proc.start()
print("Start process: ", i)
for proc in processes:
proc.join()
#m_audio('audio_train','audio_train',cat_train,80000,80500)
| 2.890625 | 3 |
comments/api/views.py | rkarthikdev/blog_api_django | 0 | 12796307 | <reponame>rkarthikdev/blog_api_django
from rest_framework import viewsets
from .serializers import CommentSerializer
from comments.models import Comment
from .permissions import IsGetOrIsAdmin
class CommentViewSet(viewsets.ModelViewSet):
queryset = Comment.objects.all()
serializer_class = CommentSerializer
permission_classes = (IsGetOrIsAdmin,) | 1.84375 | 2 |
3 Year/SDL/Python/Assignment 3/Assignment 3.py | bhushanasati25/College | 4 | 12796308 | import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
df = pd.read_csv('medals_data.csv')
df[['Gold','Silver','Bronze']].plot(kind='bar',stacked=True)
plt.title('India Olympics Medal')
plt.xlabel('Years')
plt.ylabel('Medals')
n = len(df['Games'])
labels = df.Games.str.slice(0,4)
plt.xticks(np.arange(n),labels,rotation='horizontal')
plt.show() | 3.203125 | 3 |
tatoebatools/transcriptions.py | eumiro/tatoebatools | 14 | 12796309 | <filename>tatoebatools/transcriptions.py
class Transcription:
"""A sentence transcription in an auxiliary or alternative script"""
def __init__(
self, sentence_id, lang, script_name, username, transcription
):
# the id of the sentence
self._sid = sentence_id
# the language of the sentence
self._lg = lang
# the name of the script of the transcription defined according to
# the ISO 15924 standard.
self._scp = script_name
# the name of the user indicates the user who last reviewed and
# possibly modified it. A transcription without a username has not
# been marked as reviewed.
self._usr = username
# the transcription itself
self._trs = transcription
@property
def sentence_id(self):
"""Get the id of the sentence of this transcription"""
return int(self._sid)
@property
def lang(self):
"""Get the language for this transcription"""
return self._lg
@property
def script_name(self):
"""Get the name of the script in which this transcription is made"""
return self._scp
@property
def username(self):
"""Get the name of the user who have this language skill"""
return self._usr
@property
def transcription(self):
"""Get the text of this transcription"""
return self._trs
| 2.984375 | 3 |
vol3/109.py | EdisonAlgorithms/ProjectEuler | 0 | 12796310 | <reponame>EdisonAlgorithms/ProjectEuler
if __name__ == "__main__":
doubles = [2 * x for x in range(1, 21)]
doubles.append(50)
singles = [x for x in range(1, 21)]
singles.append(25)
triples = [3 * x for x in range(1, 21)]
scores = singles + doubles + triples
scores.sort()
limit = 100
ans = 0
for i in doubles:
if i < limit:
ans += 1
else:
break
for i in scores:
for j in doubles:
if i + j < limit:
ans += 1
length = len(scores)
for i in range(length):
for j in range(i, length):
if scores[i] + scores[j] >= limit:
break
for k in doubles:
if scores[i] + scores[j] + k >= limit:
break
ans += 1
print ans
| 3.328125 | 3 |
Codeforces/A_Pashmak_and_Garden.py | anubhab-code/Competitive-Programming | 0 | 12796311 | x1, y1, x2, y2 = list(map(int,input().split()))
if x1 == x2 :
d = abs(y2-y1)
x3 , y3 = x2+d , y2
x4 , y4 = x1+d , y1
print("{x3} {y3} {x4} {y4}".format(x3=x3, y3=y3, x4=x4, y4=y4))
elif y1 == y2:
d = abs(x2-x1)
x3 , y3 = x2 , y2+d
x4 , y4 = x1 , y1+d
print("{x3} {y3} {x4} {y4}".format(x3=x3, y3=y3, x4=x4, y4=y4))
else:
d = abs(x2-x1)
true_dist = (x2-x1)**2 + (y2-y1)**2
apparent_dist = 2*(d**2)
if true_dist == apparent_dist:
if min(y1,y2) == y1:
dist = x2-x1
x3, y3 = x1 + dist, y1
x4, y4 = x2 - dist, y2
print("{x3} {y3} {x4} {y4}".format(x3=x3, y3=y3, x4=x4, y4=y4))
else:
dist = x1-x2
x3, y3 = x2 + dist, y2
x4, y4 = x1 - dist, y1
print("{x3} {y3} {x4} {y4}".format(x3=x3, y3=y3, x4=x4, y4=y4))
else:
print(-1) | 3.359375 | 3 |
main.py | F1shh-sec/Page-Permission-Checker | 0 | 12796312 | import requests
import requests
import urllib3
import webbrowser
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
cookies = {
}
headers = {
}
sites_200 = []
sites_403 = []
def printPage(response):
with open('test.html', "w") as output:
badchars = ['\\n', '\\t', 'b\'']
responseContent = str(response.content).strip()
for elm in badchars:
responseContent = responseContent.replace(elm, "")
output.write(responseContent)
def test_html_code(response_code, page_url):
if response_code == 200:
print(f"{page_url} : Success : {response_code}")
sites_200.append(page_url)
elif response_code == 403:
print(f"{page_url}: Success : {response_code}")
sites_403.append(page_url)
elif response_code == 404:
print(f"{page_url}: Failed : {response_code}")
def write_report():
with open('success.txt', "w") as output:
output.write("PAGES THAT 200:\n")
for elm in sites_200:
# webbrowser.open(elm)
output.write(f"{elm}\n")
output.write("\n\nPAGES THAT 403:\n")
for elm in sites_403:
output.write(f"{elm}\n")
def main():
with open('test.txt', "r") as sites:
lines = sites.readlines()
for line in lines:
page_url = line.strip()
response = requests.get(page_url, headers=headers, cookies=cookies, verify=False)
test_html_code(response.status_code, page_url)
write_report()
if __name__ == '__main__':
main() | 3.15625 | 3 |
lifegame.py | k28/python_lifegame | 0 | 12796313 | <filename>lifegame.py
# coding: utf-8
import random
import sys, time
import copy
import os
LIVE = '*'
DEAD = ' '
MAX_WIDTH = 40
MAX_HEIGHT = 20
# Any live cell with fewer than two live neighbours dies, as if caused by underpopulation.
# Any live cell with two or three live neighbours lives on to the next generation.
# Any live cell with more than three live neighbours dies, as if by overpopulation.
# Any dead cell with exactly three live neighbours becomes a live cell, as if by reproduction.
def count_live_neighbours(checkList, index, width):
count = 0
if checkList[index - width - 1] == LIVE:
count += 1
if checkList[index - width] == LIVE:
count += 1
if checkList[index - width + 1] == LIVE:
count += 1
if checkList[index - 1] == LIVE:
count += 1
if checkList[index + 1] == LIVE:
count += 1
if checkList[index + width - 1] == LIVE:
count += 1
if checkList[index + width] == LIVE:
count += 1
if checkList[index + width + 1] == LIVE:
count += 1
return count
def check_life(oldList, newList, index, width):
life_cell_count = count_live_neighbours(oldList, index, width)
if oldList[index] == LIVE:
if life_cell_count <= 1:
newList[index] = DEAD
elif life_cell_count == 2 or life_cell_count == 3:
newList[index] = LIVE
else:
newList[index] = DEAD
elif oldList[index] == DEAD:
if life_cell_count == 3:
newList[index] = LIVE
def print_life(life_list, width, height):
os.system('clear')
for index in range(width * height):
if index < width:
pass
elif index % width == 0:
pass
elif index % width == width - 1:
pass
elif index >= width * (height - 1):
pass
else:
sys.stdout.write(str(life_list[index]))
if index % width == width - 2:
print("")
width = MAX_WIDTH + 2
height = MAX_HEIGHT + 2
first_list = [DEAD] * (width * height)
for index in range(width * height):
# 枠はDEADで固定, 枠内部はランダムに設定しておく
if index < width:
first_list[index] = DEAD
elif index % width == 0:
first_list[index] = DEAD
elif index % width == width - 1:
first_list[index] = DEAD
elif index >= width * (height - 1):
first_list[index] = DEAD
else:
first_list[index] = random.choice([LIVE, DEAD])
print_life(first_list, width, height)
old_list = first_list
while True:
time.sleep(0.5)
new_list = copy.deepcopy(old_list)
for index in range(width * height):
# 枠は何もしない
if index < width:
pass
elif index % width == 0:
pass
elif index % width == width - 1:
pass
elif index >= width * (height - 1):
pass
# 枠の内部は世代を進める
else:
check_life(old_list, new_list, index, width)
print_life(new_list, width, height)
old_list = new_list
| 3.75 | 4 |
tello_ros/src/camera_info_publisher.py | PedroS235/TelloSwarm | 0 | 12796314 | <reponame>PedroS235/TelloSwarm<gh_stars>0
#!/usr/bin/python3
import rospy
from sensor_msgs.msg import CameraInfo
from datetime import datetime
def publisher():
"""
This method contains the information to calibrate the tello camera
"""
print("Starting calibrator node")
publisher = rospy.Publisher("camera/camera_info", CameraInfo, queue_size=10)
rospy.init_node("calibrator")
rate = rospy.Rate(15)
fixed_msg = CameraInfo(
height=960,
width=720,
distortion_model="plumb_bob",
D=[-0.013335, -0.018951, 0.000913, -0.005454, 0.000000],
K=[
897.137424, 0.000000, 453.939111,
0.000000, 902.273567, 354.958881,
0.000000, 0.000000, 1.000000],
R=[1, 0, 0, 0, 1, 0, 0, 0, 1],
P=[
891.148254, 0.000000, 449.250272, 0.000000,
0.000000, 901.238647, 355.367597, 0.000000,
0.000000, 0.000000, 1.000000, 0.000000],
)
while not rospy.is_shutdown():
publisher.publish(fixed_msg)
now = datetime.now().time()
print(f"{now}: published camera calibration info", end='\r')
rate.sleep()
# -- the rest of the code will be only runned when execured from the terminal
if __name__ == '__main__':
publisher()
| 2.5625 | 3 |
all.py | 0xfares/darkreporter | 0 | 12796315 | <filename>all.py
import csv
import sys
from collections import defaultdict
#Defining list of variables hold the actual data.
#IP Addresses Varible
RowOfDataIPAddress = []
RowOfDataIPAddress = list()
#Plugin ID Varible
RowOfDataPluginID = []
RowOfDataPluginID = list()
#Plugin Name Varible
RowOfDataPluginName = []
RowOfDataPluginName = list()
#Family Varibale
RowOfDataFamily = []
RowOfDataFamily = list()
#Severity Varible
RowOfDataSeverity = []
RowOfDataSeverity = list()
#IP Address Varible
RowOfDataHostName = []
RowOfDataHostName = list()
#Protocol Varible
RowOfDataProtocol = []
RowOfDataProtocol = list()
#Port Varibale
RowOfDataPort = []
RowOfDataPort = list()
#Exploit? Varible
RowOfDataIsExploitable = []
RowOfDataIsExploitable = list()
#Repository Varible
RowOfDataRepository = []
RowOfDataRepository = list()
#MAC Address Variable
RowOfDataMACAddress = []
RowOfDataMACAddress = list()
#DNS Name Varibale
RowOfDataDNSName = []
RowOfDataDNSName = list()
#NetBIOS Name Variable
RowOfDataNetBIOSName = []
RowOfDataNetBIOSName = list()
#Plugin Text Variable
RowOfDataPluginText = []
RowOfDataPluginText = list()
#First Discovered Variable
RowOfDataFirstDiscovered = []
RowOfDataFirstDiscovered = list()
#Last Observed Varibale
RowOfDataLastObserved = []
RowOfDataLastObserved = list()
#Exploit Frameworks Varibale
RowOfDataExploitFrameworks = []
RowOfDataExploitFrameworks = list()
#Synopsis Varibale
RowOfDataSynopsis = []
RowOfDataSynopsis = list()
#Description Varibale
RowOfDataDescription = []
RowOfDataDescription = list()
#Solution Varibale
RowOfDataSolution = []
RowOfDataSolution = list()
#See Also Varibale
RowOfDataSeeAlso = []
RowOfDataSeeAlso = list()
#CVE Variable
RowOfDataCVE = []
RowOfDataCVE = list()
#Vulnerablity Publication Date Variable
RowOfDataVulnerablityPublicationDate = []
RowOfDataVulnerablityPublicationDate = list()
#Exploit Ease Variable
RowOfDataExploitEase = []
RowOfDataExploitEase = list()
#All Data In One Place
PluginsIndexList = []
PluginsIndexList = list()
with open('nessus.csv') as MainCSVFile:
FirstRoundReader = csv.DictReader(MainCSVFile)
for IndexPluginRows in FirstRoundReader:
PluginListIndexer = IndexPluginRows['Plugin ID']
PluginsIndexList.append(PluginListIndexer)
PluginsIndexList.sort()
RemovingDublicatesFromIndexList = list(set(PluginsIndexList))
RemovingDublicatesFromIndexList.sort()
print(RemovingDublicatesFromIndexList)
IndexCounter = 0
while IndexCounter < len(RemovingDublicatesFromIndexList):
with open('nessus.csv') as SecondMainFileLoading:
SecondRoundReader = csv.DictReader(SecondMainFileLoading)
for IndexAllDataRows in SecondRoundReader:
if RemovingDublicatesFromIndexList[IndexCounter] == IndexAllDataRows['Plugin ID']:
#fetching the data as rows
PluginIDRows = IndexAllDataRows['Plugin ID']
PluginNameRows = IndexAllDataRows['Name']
HostNameRows = IndexAllDataRows['Host']
ProtocolRows = IndexAllDataRows['Protocol']
PortRows = IndexAllDataRows['Port']
#Appending the results in the lists
RowOfDataPluginID.append(PluginIDRows)
RowOfDataPluginName.append(PluginNameRows)
RowOfDataHostName.append(HostNameRows)
RowOfDataProtocol.append(ProtocolRows)
RowOfDataPort.append(PortRows)
#RowOfDataPluginName.sort()
#RowOfDataHostName.sort()
AllDataPluginIDs = list(RowOfDataPluginID)
AllDataPluginNames = list(RowOfDataPluginName)
AllDataPluginNames2 = list(set(RowOfDataPluginName))
AllDataHostNames = list(RowOfDataHostName)
AllDataHostNames2 = list(set(RowOfDataHostName))
AllDataProtocols = list(RowOfDataProtocol)
AllDataPorts = list(RowOfDataPort)
IndexCounter += 1
AllData = []
AllData = list()
for j in range(len(AllDataPluginNames2)):
print(j,AllDataPluginNames2[j])
matches = [index for index, value in enumerate(AllDataPluginNames) if value == AllDataPluginNames2[j]]
print("*************************************************************************************")
print(matches)
print("*************************************************************************************")
#display the data based on matches list hostnames,an more
for xx in range(len(matches)):
#print(AllDataPluginNames[xx])
print(xx,AllDataHostNames[matches[xx]],AllDataProtocols[matches[xx]],AllDataPorts[matches[xx]])
#print()
print("||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||")
#AllData.append(({AllDataPluginNames[j]:[AllDataHostNames[j],AllDataProtocols[j],AllDataPorts[j]]}))
| 2.4375 | 2 |
formats/ycd/__init__.py | Adobe12327/Sollumz | 131 | 12796316 | <filename>formats/ycd/__init__.py
if "bpy" in locals():
import importlib
importlib.reload(Animation)
importlib.reload(AnimSequence)
importlib.reload(Channel)
importlib.reload(Clip)
importlib.reload(ClipDictionary)
importlib.reload(utils)
else:
from . import Animation
from . import AnimSequence
from . import Channel
from . import Clip
from . import ClipDictionary
from . import utils
import bpy | 1.695313 | 2 |
tests/test-root/conf.py | kai687/sphinxawesome-sampdirective | 1 | 12796317 | <reponame>kai687/sphinxawesome-sampdirective
"""Sphinx configuration file for testing."""
| 0.789063 | 1 |
tests/test_myst_plugins.py | noirbizarre/pelican-myst | 0 | 12796318 | <reponame>noirbizarre/pelican-myst
from __future__ import annotations
from typing import Union
import pytest
from .helpers import read_content_metadata
# from pyquery import PyQuery as pq
TASKLIST_EXPECTATIONS: tuple[tuple[Union[dict, list], str], ...] = (
([], "disabled"),
({}, "disabled"),
(["tasklist"], "default"),
({"tasklist": {}}, "default"),
({"tasklist": dict(enabled=True)}, "enabled"),
({"tasklist": dict(label=True)}, "label"),
)
@pytest.mark.parametrize("setting,key", TASKLIST_EXPECTATIONS)
def test_myst_tasklist(setting, key):
content, meta = read_content_metadata("myst/tasklist.md", MYST_PLUGINS=setting)
assert content == meta["expected"][key]
# def test_myst_admonitions():
# content, meta = read_content_metadata("myst/admonitions.md", MYST_PLUGINS=["admonitions"])
# print(content)
# html = pq(content)
# admonitions = html.find("div.admonition")
# assert admonitions.length == 8
# assert admonitions.find("p.admonition-title").length == 8
# assert html.find("div.admonition.note").length == 4
# assert html.find("div.admonition.important").length == 2
# assert html.find("div.admonition.warning").length == 1
| 2 | 2 |
main.py | banasiakmichal/portfel | 0 | 12796319 | # set keyboard mode for ios device
#from kivy.config import Config
#Config.set('kivy', 'keyboard_mode', 'dock')
from kivy.lang.builder import Builder
from kivymd.uix.bottomnavigation import MDBottomNavigation
from kivy.clock import Clock
from functools import partial
import SecondScreen
import FirstScreen
import ThirdScreen
from class_mydb import Mydb
from storage import Storage
from kivy.properties import StringProperty
from kivymd.app import MDApp
from Mdialog import GraphDialog
""" set test window and input android keyboard"""
# from kivy.core.window import Window
# Window.size = (375, 667)
# Window.softinput_mode = "resize"
kv = '''
#:import get_color_from_hex kivy.utils.get_color_from_hex
#:include FirstScreen.kv
#:include SecondScreen.kv
#:include ThirdScreen.kv
<Content>:
orientation: "vertical"
spacing: "12dp"
size_hint_y: None
width: "500dp"
height: "300dp"
BoxLayout:
id: graph
BoxLayout:
id: view
ScrollView:
MDList:
id: list
MDScreen:
Manager:
id: manager
#panel_color: get_color_from_hex("#eeeaea")
#selected_color_background: get_color_from_hex("#97ecf8")
#text_color_active: 0, 0, 0, 1
FirstScreen:
id: screen1
name: 'screen1'
text: 'Kasa'
icon: 'account-cash'
on_leave:
screen2.ids.general_view.populate_view()
screen2.ids.costs_view.populate_view()
SecondScreen:
id: screen2
name: 'screen2'
text: 'Portfel'
icon: 'format-list-bulleted-type'
ThirdScreen:
name: 'screen3'
text: 'Ustawienia'
icon: 'table-settings'
on_leave:
screen1.ids.catpro_view.populate_view()
screen2.ids.general_view.populate_view()
screen2.ids.costs_view.populate_view()
'''
class Manager(MDBottomNavigation):
pass
class Budget(MDApp):
costs_sum = StringProperty('0')
# store = ''
def __init__(self, **kwargs):
super().__init__(**kwargs)
# init DICTstorage from class Storage() in storage.py for ios device
self.storage = Storage(self.user_data_dir)
# self.storage = Storage('') local env
self.store = self.storage.store
self.db = Mydb(self.user_data_dir)
#self.db = Mydb('') local env
def build(self):
self.icon = 'logo.png'
self.theme_cls.primary_palette = "Orange"
self.theme_cls.primary_hue = "500"
return Builder.load_string(kv)
def on_start(self):
self.update_store_cat_pro('category', 'project')
self.update_all()
def on_pause(self):
self.db.conn.close()
def on_stop(self):
self.db.conn.close()
""" fetch db methods """
def update_store_cat_pro(self, *args):
for i in args:
rows = self.db.fetch_col(i)
items = [i for item in rows for i in item if i is not None]
if i == 'category':
self.store['category']['cat'] = list(dict.fromkeys(items))
else:
self.store['project']['pro'] = list(dict.fromkeys(items))
def update_procat_costs(self):
self.db.procat('project', self.store['project']['pro'])
self.db.procat('category', self.store['category']['cat'])
def update_gen_cost(self):
self.fetch_costs()
self.fetch_general_costs()
def update_all(self):
#todo: TEST: fetch from zero db ?
self.fetch_costs()
self.fetch_general_costs()
self.db.procat('project', self.store['project']['pro'])
self.db.procat('category', self.store['category']['cat'])
def fetch_costs(self):
""" all costs for pro, cat and datas source in mydb class"""
rows = self.db.fetch_col(col='cost')
self.store['costs']['RAZEM'] = f'{sum([i for item in rows for i in item if i is not None]):.2f}'
self.costs_sum = f'{(sum([i for item in rows for i in item if i is not None])):.2f}'
def fetch_general_costs(self):
""" fetch and pass into localstore all today costs """
self.fetch_items(self.db.fetch_by_date, 'dzisiaj')
""" fetch and pass into localstore from curent week """
self.fetch_items(self.db.fetch_week, 'w tym tygodniu')
""" fetch and pass into localstore all costs from - current month """
self.fetch_items(self.db.fetch_current_month, 'w tym miesiącu')
""" fetch and pass into localstore all costs from - last month """
self.fetch_items(self.db.fetch_last_mont, 'miesiąc wcześniej')
""" fetch and pass into localstore all costs from - current year """
self.fetch_items(self.db.all_year, 'w tym roku')
""" fetch and pass into local store all cost from last year """
self.fetch_items(self.db.last_year, 'w poprzednim roku')
def fetch_items(self, f, ar1):
""" fetch method"""
r_ = f()
self.store['costs'][ar1] = f'{sum([i for item in r_ for i in item]):.2f}'
return ar1
def storage(self):
#app = MDApp.get_running_app()
#ddir = app.user_data_dir
self.ddir = self.user_data_dir
print('with app:', self.ddir)
print('ddir:', self.user_data_dir + 'STORE')
# return self.user_data_dir + 'STORE'
""" section graph dialog """
def open_graph_dialog(self, text):
item = text[:(text.find(':') - 1)]
if item in self.store['category']['cat']:
r = self.db.fetch_cost_and_data('category', item)
else:
r = self.db.fetch_cost_and_data('project', item)
time = [r[i][1] for i in range(len(r))] #if r[i][0] != 0]
cost = [r[i][0] for i in range(len(r))] #if r[i][0] != 0]
"pass param as a graph attr"
GraphDialog(cost, time, item).show_graph()
Budget().run() | 2.3125 | 2 |
tests/test_htmlreflector.py | christabor/codeReflector | 3 | 12796320 | <filename>tests/test_htmlreflector.py
# -*- coding: utf-8 -*-
__author__ = """<NAME> (<EMAIL>)"""
import unittest
from code_reflector import html_reflector
class SelectorOutputTestCase(unittest.TestCase):
def setUp(self):
self.ref = html_reflector.HTMLReflector()
def test_single_class(self):
res = self.ref.process_string('.foo {}').extract().make_html(
save_as_string=True)
self.assertEqual(res, '<div class="foo"></div>')
def test_single_id(self):
res = self.ref.process_string('#foo {}').extract().make_html(
save_as_string=True)
self.assertEqual(res, '<div id="foo"></div>')
def test_pseudoselector(self):
res = self.ref.process_string('#foo:hover {}').extract().make_html(
save_as_string=True)
self.assertEqual(res, '')
def test_pseudoselector_mixed(self):
res = self.ref.process_string(
'#foo:hover {} #bar {}').extract().make_html(
save_as_string=True)
self.assertEqual(res, '<div id="bar"></div>')
def test_nested_id(self):
res = self.ref.process_string('#foo #bar #bim {}').extract().make_html(
save_as_string=True)
expected = ('<div id="foo"><div id="bar"><div id="bim">'
'</div></div></div>')
self.assertEqual(res, expected)
def test_nested_class(self):
res = self.ref.process_string('.foo .bar .bim {}').extract().make_html(
save_as_string=True)
expected = ('<div class="foo"><div class="bar"><div class="bim">'
'</div></div></div>')
self.assertEqual(res, expected)
def test_compound_class_id(self):
res = self.ref.process_string('.foo#bar {}').extract().make_html(
save_as_string=True)
expected = ('<div id="bar" class="foo"></div>')
self.assertEqual(res, expected)
def test_compound_multiclass(self):
res = self.ref.process_string('.foo.bar.bim {}').extract().make_html(
save_as_string=True)
expected = ('<div class="foo bar bim"></div>')
self.assertEqual(res, expected)
def test_compound_id_multiclass(self):
res = self.ref.process_string('#foo.bar.bim {}').extract().make_html(
save_as_string=True)
expected = ('<div id="foo" class="bar bim"></div>')
self.assertEqual(res, expected)
def test_compound_id_class(self):
res = self.ref.process_string('#foo.bar {}').extract().make_html(
save_as_string=True)
expected = ('<div id="foo" class="bar"></div>')
self.assertEqual(res, expected)
def test_nested_simple_class(self):
res = self.ref.process_string('.foo>.bar {}').extract().make_html(
save_as_string=True)
expected = ('<div class="foo"><div class="bar"></div></div>')
self.assertEqual(res, expected)
def test_nested_simple_id(self):
res = self.ref.process_string('#foo>#bar {}').extract().make_html(
save_as_string=True)
expected = ('<div id="foo"><div id="bar"></div></div>')
self.assertEqual(res, expected)
def test_nested_simple_id_spaces(self):
res = self.ref.process_string('#foo > #bar {}').extract().make_html(
save_as_string=True)
expected = ('<div id="foo"><div id="bar"></div></div>')
self.assertEqual(res, expected)
def test_nested_multiid_multiclass_tag(self):
res = self.ref.process_string(
'.foo > .bar > section#bam section.quux {}').extract().make_html(
save_as_string=True)
expected = ('<div class="foo"><div class="bar"><section id="bam">'
'<section class="quux"></section></section></div></div>')
self.assertEqual(res, expected)
def test_nested_multiid_multiclass_tag_mixedspaces(self):
res = self.ref.process_string(
'.foo > .bar>section#bam section.quux {}').extract().make_html(
save_as_string=True)
expected = ('<div class="foo"><div class="bar"><section id="bam">'
'<section class="quux"></section></section></div></div>')
self.assertEqual(res, expected)
| 2.671875 | 3 |
src/jets/data_ops/get_data_loader.py | isaachenrion/jets | 9 | 12796321 | import os
import pickle
import logging
from src.jets.data_ops.DataLoader import DataLoader
from src.jets.data_ops.Dataset import Dataset
import numpy as np
from .io import load_jets_from_pickle
w_vs_qcd = 'w-vs-qcd'
quark_gluon = 'quark-gluon'
DATASETS = {
'w':(w_vs_qcd,'antikt-kt'),
'wp':(w_vs_qcd + '/pileup','pileup'),
'pp': (quark_gluon,'pp'),
'pbpb': (quark_gluon,'pbpb'),
#'protein': ('proteins', 'casp11')
}
def load_jets(data_dir, filename, do_preprocessing=False):
if 'w-vs-qcd' in data_dir:
from .w_vs_qcd import preprocess
elif 'quark-gluon' in data_dir:
from .quark_gluon import preprocess
else:
raise ValueError('Unrecognized data_dir!')
#from problem_module import preprocess, crop_dataset
#preprocessed_dir = os.path.join(data_dir, 'preprocessed')
raw_data_dir = os.path.join(data_dir, 'raw')
preprocessed_dir = os.path.join(data_dir, 'preprocessed')
path_to_preprocessed = os.path.join(preprocessed_dir, filename)
if not os.path.exists(path_to_preprocessed) or do_preprocessing:
if not os.path.exists(preprocessed_dir):
os.makedirs(preprocessed_dir)
logging.warning("Preprocessing...")
preprocess_fn(raw_data_dir, preprocessed_dir, filename)
logging.warning("Preprocessed the data and saved it to {}".format(path_to_preprocessed))
else:
logging.warning("Data at {} and already preprocessed".format(path_to_preprocessed))
jets = load_jets_from_pickle(path_to_preprocessed)
logging.warning("\tSuccessfully loaded data")
logging.warning("\tFound {} jets in total".format(len(jets)))
return jets
def training_and_validation_dataset(data_dir, dataset, n_train, n_valid, preprocess):
intermediate_dir, filename = DATASETS[dataset]
data_dir = os.path.join(data_dir, intermediate_dir)
jets = load_jets(data_dir,"{}-train.pickle".format(filename), preprocess)
problem = data_dir.split('/')[-1]
subproblem = filename
train_jets = jets[n_valid:n_valid + n_train] if n_train > 0 else jets[n_valid:]
train_dataset = Dataset(train_jets, problem=problem,subproblem=subproblem)
#
valid_jets = jets[:n_valid]
valid_dataset = Dataset(valid_jets, problem=problem,subproblem=subproblem)
if 'w-vs-qcd' in data_dir:
from .w_vs_qcd import crop_dataset
elif 'quark-gluon' in data_dir:
from .quark_gluon import crop_dataset
else:
raise ValueError('Unrecognized data_dir!')
valid_dataset, cropped_dataset = crop_dataset(valid_dataset)
train_dataset.extend(cropped_dataset)
train_dataset.shuffle()
##
logging.warning("Building normalizing transform from training set...")
train_dataset.transform()
valid_dataset.transform(train_dataset.tf)
# add cropped indices to training data
logging.warning("\tfinal train size = %d" % len(train_dataset))
logging.warning("\tfinal valid size = %d" % len(valid_dataset))
return train_dataset, valid_dataset
def test_dataset(data_dir, dataset, n_test, preprocess):
train_dataset, _ = training_and_validation_dataset(data_dir, dataset, -1, 27000, False)
intermediate_dir, filename = DATASETS[dataset]
data_dir = os.path.join(data_dir, intermediate_dir)
logging.warning("Loading test data...")
filename = "{}-test.pickle".format(filename)
jets = load_jets(data_dir, filename, preprocess)
jets = jets[:n_test]
dataset = Dataset(jets)
dataset.transform(train_dataset.tf)
# crop validation set and add the excluded data to the training set
if 'w-vs-qcd' in data_dir:
from .w_vs_qcd import crop_dataset
elif 'quark-gluon' in data_dir:
from .quark_gluon import crop_dataset
else:
raise ValueError('Unrecognized data_dir!')
dataset, _ = crop_dataset(dataset)
# add cropped indices to training data
logging.warning("\tfinal test size = %d" % len(dataset))
return dataset
def get_train_data_loader(data_dir, dataset, n_train, n_valid, batch_size, leaves=None,preprocess=None,**kwargs):
train_dataset, valid_dataset = training_and_validation_dataset(data_dir, dataset, n_train, n_valid, preprocess)
train_data_loader = DataLoader(train_dataset, batch_size, leaves=leaves)
valid_data_loader = DataLoader(valid_dataset, batch_size, leaves=leaves)
return train_data_loader, valid_data_loader
def get_test_data_loader(data_dir, dataset, n_test, batch_size, leaves=None,preprocess=None,**kwargs):
dataset = test_dataset(data_dir, dataset, n_test, preprocess)
test_data_loader = DataLoader(dataset, batch_size, leaves=leaves)
return test_data_loader
| 2.1875 | 2 |
download.py | jidzhang/vuejs-docs | 0 | 12796322 | <filename>download.py
#!/usr/env/bin python
# -*- coding:utf-8 -*-
import os, sys, re, urllib2
def download(path, ext):
path = os.path.expanduser(path)
for (dirname, subdir, subfile) in os.walk(path):
for f in subfile:
subf = os.path.join(dirname, f)
if subf.endswith(ext):
fix_file(subf)
for f in subdir:
subf = os.path.join(dirname, f)
download(subf, ext)
def fix_file(path):
with open(path, 'r+') as f:
data = f.read()
found = False
pat = r'="(https?:)?//([^"<]*\.(js|css|svg))"'
itr = re.finditer(pat, data)
for match in itr:
pre = match.group(1)
if not pre:
pre = 'https:'
url = match.group(2)
parts = url.split('/')
n = len(parts)
filename = parts[n - 1]
dl_dir = 'dl/'
for i in range(n - 1):
dl_dir = dl_dir + parts[i] + '/'
if not os.path.exists(dl_dir):
os.makedirs(dl_dir)
dl_file = dl_dir + filename
downloaded = False
if not os.path.exists(dl_file):
try:
print('download from: ' + pre + '//' + url)
print('download to: ' + dl_file)
response = urllib2.urlopen(pre + '//' + url)
html = response.read()
with open(dl_file, 'w+') as df:
df.write(html)
downloaded = True
except:
print("download failed: " + pre + '//' + url)
else:
downloaded = True
if downloaded:
if re.search(pat, data) != None:
data = re.sub(pat, r'="/dl/\2"', data)
found = True
if found:
f.seek(0)
f.truncate()
f.write(data)
if __name__ == '__main__':
download('./themes', '.ejs')
download('./src/v2', '.md')
| 3.03125 | 3 |
MuJoCo/Validate_GP_Controller.py | sanjaythakur/Uncertainty-aware-Imitation-Learning-on-Multiple-Tasks-using-Bayesian-Neural-Networks | 8 | 12796323 | <gh_stars>1-10
import numpy as np
import _pickle as pickle
import gpflow
import os
import tensorflow as tf
from Load_Controllers import Load_Demonstrator
from multiple_tasks import get_task_on_MUJOCO_environment
import sys
sys.path.insert(0,'./../')
from Housekeeping import *
def validate_GP_controller(domain_name, task_identity, window_size, drift_per_time_step, moving_windows_x_size, behavior_controller, mean_x, deviation_x, mean_y, deviation_y):
if not os.path.exists(LOGS_DIRECTORY):
os.makedirs(LOGS_DIRECTORY)
#file_to_save_logs = LOGS_DIRECTORY + domain_name + '_' + str(task_identity) + '_' + str(window_size) + '_GP.pkl'
logs_for_all_tasks = {}
for task_to_validate in ALL_MUJOCO_TASK_IDENTITIES:
logs_for_a_task = {}
demonstrator_graph = tf.Graph()
with demonstrator_graph.as_default():
demonstrator_controller = Load_Demonstrator(domain_name=domain_name, task_identity=str(task_to_validate))
for validation_trial in range(NUMBER_VALIDATION_TRIALS):
all_observations = []
all_behavior_control_means = []
all_behavior_control_deviations = []
all_behavior_rewards = []
all_demonstrator_controls = []
#all_target_control_means, all_target_control_deviations = [], []
env = get_task_on_MUJOCO_environment(env_name=domain_name, task_identity=str(task_to_validate))
total_cost = total_variance = 0.
observation = env.reset()
finish = False
moving_window_x = np.zeros((1, moving_windows_x_size))
moving_window_x[0, -observation.shape[0]:] = observation
behavior_mean_control, behavior_var_control = behavior_controller.predict_y(NORMALIZE(moving_window_x, mean_x, deviation_x))
behavior_mean_control = REVERSE_NORMALIZE(behavior_mean_control, mean_y, deviation_y)
behavior_var_control = behavior_var_control * deviation_y
time_step = 0.0
while not finish:
all_observations.append(observation)
all_behavior_control_means.append(behavior_mean_control)
all_behavior_control_deviations.append(np.sqrt(behavior_var_control))
observation = np.append(observation, time_step) # add time step feature
demonstrator_control = demonstrator_controller.sess.run(demonstrator_controller.output_action_node, feed_dict={demonstrator_controller.scaled_observation_node: (observation.reshape(1,-1) - demonstrator_controller.offset) * demonstrator_controller.scale})
all_demonstrator_controls.append(demonstrator_control)
time_step += 1e-3
#all_target_control_means.append(target_mean_control)
#all_target_control_deviations.append(target_var_control)
observation, reward, finish, info = env.step(behavior_mean_control)
all_behavior_rewards.append(reward)
#target_mean_control, target_var_control = -1. * np.dot(K, observation), np.array([[0.]])
if not window_size == 1:
moving_window_x[0, :-drift_per_time_step] = moving_window_x[0, drift_per_time_step:]
moving_window_x[0, -drift_per_time_step:-(drift_per_time_step-behavior_mean_control.shape[1])] = behavior_mean_control[0]
moving_window_x[0, -(drift_per_time_step-behavior_mean_control.shape[1])] = reward
moving_window_x[0, -observation.shape[0]:] = observation
behavior_mean_control, behavior_var_control = behavior_controller.predict_y(NORMALIZE(moving_window_x, mean_x, deviation_x))
behavior_mean_control = REVERSE_NORMALIZE(behavior_mean_control, mean_y, deviation_y)
behavior_var_control = behavior_var_control * deviation_y
all_observations = np.array(all_observations)
all_behavior_control_means = np.concatenate(all_behavior_control_means, axis=0)
all_behavior_rewards = np.array(all_behavior_rewards)
all_behavior_control_deviations = np.concatenate(all_behavior_control_deviations, axis=0)
all_demonstrator_controls = np.array(all_demonstrator_controls)
logs_for_a_task[str(validation_trial)] = {OBSERVATIONS_LOG_KEY: all_observations, BEHAVIORAL_CONTROL_MEANS_LOG_KEY: all_behavior_control_means,
BEHAVIORAL_CONTROL_REWARDS_LOG_KEY: all_behavior_rewards, BEHAVIORAL_CONTROL_DEVIATIONS_LOG_KEY: all_behavior_control_deviations,
TARGET_CONTROL_MEANS_LOG_KEY: all_demonstrator_controls}
logs_for_all_tasks[str(task_to_validate)] = logs_for_a_task
#with open(file_to_save_logs, 'wb') as f:
# pickle.dump(logs_for_all_tasks, f, protocol=-1)
return logs_for_all_tasks | 2.0625 | 2 |
google_destinationD.py | MyColumbus/googleDestinationsScrapper | 0 | 12796324 | <gh_stars>0
# -*- coding: utf-8 -*-
# !/usr/bin/env python
# Columbus - Personalised Travel Itineraries based on experience, country or preferred destination.
# Contact <NAME> <<EMAIL>>
#
import time
import re
import csv
import os
import logging
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import StaleElementReferenceException
class GoogleDestination:
def __init__(self, country):
self.country = country
self.destinations = {}
self.url = 'https://www.google.com/destination'
self.driver = ''
self.current_destination = ''
def open_browser(self):
options = Options()
options.add_argument("--headless")
self.driver = webdriver.Chrome(chrome_options = options)
def search(self):
"""
Find the search input element and enter the country name.
"""
try:
self.driver.get(self.url)
# wait to loading of search input element
search_input_present = EC.visibility_of_element_located((By.XPATH, "//div[@class='gb_Xe']//input"))
WebDriverWait(self.driver, 20).until(search_input_present)
input_elem = self.driver.find_element(By.XPATH, "//div[@class='gb_Xe']//input")
input_elem.click()
time.sleep(1) # guarantee
input_elem.send_keys(self.country, Keys.ARROW_DOWN) # write country end select first result
time.sleep(1)
# wait to loading of search results menu
search_results_present = EC.visibility_of_element_located((By.XPATH, "//ul[@class='sbsb_b']"))
WebDriverWait(self.driver, 20).until(search_results_present)
time.sleep(1) # guarantee
input_elem.send_keys(Keys.ENTER)
destination_page_present = EC.visibility_of_element_located((By.XPATH, "//div[@class='RrdlOe']"))
WebDriverWait(self.driver, 20).until(destination_page_present)
return True
except TimeoutException:
print("1. Timeout problem please try again.")
return False
except Exception as e:
print(e)
return False
def get_destinations(self, at=0):
"""
Get urls of destinations according to each option and append to list.
"""
attempt = at
try:
# wait to be clickable of destinations link
destinations_elem_present = EC.element_to_be_clickable((By.XPATH, "//li[@id='DESTINATIONS']"))
WebDriverWait(self.driver, 20).until(destinations_elem_present)
destinations_elem = self.driver.find_element(By.XPATH, "//li[@id='DESTINATIONS']").find_element_by_tag_name('a').click() # click destinations link
# wait to loading of destinations list
destinations_list_present = EC.visibility_of_element_located((By.XPATH, "//div[@class='BvC61c RrdlOe']"))
WebDriverWait(self.driver, 20).until(destinations_list_present)
# maybe if block for are there any option?
options = self.driver.find_element_by_xpath("//span[@class='irTQQe']").find_elements_by_tag_name("chip-button")
if len(options) == 0:
destination_lists = self.driver.find_elements_by_xpath("//a[@class='sjglme']")
for destination in destination_lists:
destination_name = destination.find_element_by_tag_name("h2").get_attribute('innerHTML')
if destination_name not in self.destinations:
destination_url = destination.get_attribute('href')
self.destinations[destination_name] = {}
self.destinations[destination_name]['URL'] = destination_url
self.destinations[destination_name]['Options'] = None
return True
else:
for i in range(len(options)):
temp_class_of_content = self.driver.find_element_by_xpath("//div[contains(@class,'LOIWPe')]").get_attribute("class")
self.driver.find_element_by_xpath("//span[@class='irTQQe']").find_elements_by_tag_name("chip-button")[i].click()
time.sleep(1)
# wait until to changing of content
while True:
current_class_of_content = self.driver.find_element_by_xpath("//div[contains(@class,'LOIWPe')]").get_attribute("class")
if current_class_of_content == temp_class_of_content:
time.sleep(3)
else:
break
option_name = self.driver.find_element_by_xpath("//chip-button[@aria-checked='true']").find_element_by_class_name("gws-travel-controls__chip-label").get_attribute("innerHTML")
# get destinations on selected option
destination_lists = self.driver.find_elements_by_xpath("//a[@class='sjglme']")
for destination in destination_lists:
destination_name = destination.find_element_by_tag_name("h2").get_attribute('innerHTML')
if destination_name not in self.destinations:
destination_url = destination.get_attribute('href')
self.destinations[destination_name] = {}
self.destinations[destination_name]['URL'] = destination_url
self.destinations[destination_name]['Options'] = []
self.destinations[destination_name]['Options'].append(option_name)
self.driver.find_element_by_xpath("//span[@class='irTQQe']").find_elements_by_tag_name("chip-button")[i].click()
time.sleep(1)
# wait until to changing of content
while True:
temp_class_of_content = self.driver.find_element_by_xpath("//div[contains(@class,'LOIWPe')]").get_attribute("class")
if current_class_of_content == temp_class_of_content:
time.sleep(3)
else:
break
return True
except StaleElementReferenceException:
# if stale exception occur, try again three times
if attempt == 4:
return False
attempt += 1
return self.get_destinations(attempt)
except TimeoutException:
print("2. Timeout problem please try again.")
return False
except Exception as e:
print(e)
return False
def get_destination_details(self, url, at=0):
attempt = at
try:
self.driver.get(url)
destination_detail_present = EC.visibility_of_element_located((By.XPATH, "//div[@class='AofZnb']"))
WebDriverWait(self.driver, 20).until(destination_detail_present)
except TimeoutException:
if attempt == 3:
print("Problem with destination ", url)
return False
attempt += 1
return self.get_destination_details(url, attempt)
soup = BeautifulSoup(self.driver.page_source, 'html.parser')
destination = soup.find('div', class_='AofZnb').text # destination name
self.current_destination = destination
months = {}
month_table = soup.find_all('table', class_='qt3FE')
if len(month_table) > 1:
months['Country'] = self.country
months['Destination'] = self.current_destination
months['Months'] = {}
tr = month_table[1].find_all('tr')
for row in tr[1:13]:
month_name = row.find('td').text
other_elems = row.find_all('td', class_='qRa1yd')
months['Months'][month_name] = {}
months['Months'][month_name]['Popularity'] = ''
months['Months'][month_name]['MinTemp'] = ''
months['Months'][month_name]['MaxTemp'] = ''
months['Months'][month_name]['Precipitation'] = ''
for elem in other_elems:
length_of_spans = len(elem.find_all('span'))
# popularity
if length_of_spans == 1:
popularity = elem.find('span')
if popularity.has_attr('aria-label'):
popularity = elem.find('span')['aria-label']
popularity = re.findall(r'\d+\.\d+|\d+', popularity)[0]
months['Months'][month_name]['Popularity'] = popularity
# tempature
if length_of_spans == 2:
tempatures = [i.text for i in elem.find_all('span')]
months['Months'][month_name]['MaxTemp'] = tempatures[0]
months['Months'][month_name]['MinTemp'] = tempatures[1].split("/")[1]
# precipitation
if length_of_spans == 0:
precipitation = elem.text
months['Months'][month_name]['Precipitation'] = precipitation
return months
else:
return False
def get_topsgihts_details(self, at=0):
attempt = at
try:
# click the top sight element on menu
self.driver.find_element(By.XPATH, "//li[@id='TOP_SIGHTS']").find_element_by_tag_name('a').click()
# wait to loading of top sight page of destination
top_sights_list_present = EC.visibility_of_element_located((By.XPATH, "//div[@class='w9GWWb']"))
WebDriverWait(self.driver, 20).until(top_sights_list_present)
top_sights = self.driver.find_element_by_tag_name('ol').find_elements_by_tag_name('li') # get all topsight element on destination
top_sights_detail = {}
top_sights_detail['Country'] = self.country
top_sights_detail['Destination'] = self.current_destination
top_sights_detail['Topsights'] = {}
for idx,top_sight in enumerate(top_sights):
top_sight.click() # click each top sight item by one by
try:
# wait to loading of the content of top sight
top_sight_present = EC.visibility_of_element_located((By.XPATH, "//div[contains(@class,'au3Yqc')]"))
WebDriverWait(self.driver, 20).until(top_sight_present)
except TimeoutException:
continue
top_sight_html = top_sight.get_attribute('innerHTML')
# get details
soup = BeautifulSoup(top_sight_html, 'html.parser')
place_name = soup.find('h2', class_='NbdpWc').text
# rating
rating = soup.find('span', class_='rtng')
if rating:
if len(rating) == 2:
rating.style.decompose()
rating = rating.text
else:
rating = ''
# number of reviews
number_of_reviews = soup.find('span', attrs={'class':'Vfp4xe p13zmc'})
if number_of_reviews:
if len(number_of_reviews) == 2:
number_of_reviews.style.decompose()
number_of_reviews = number_of_reviews.text.strip()
else:
number_of_reviews = ''
# get the details of typical time spent
if self.driver.find_element_by_tag_name('async-local-kp').is_displayed() == True:
time_spent_html = self.driver.find_element_by_tag_name('async-local-kp').get_attribute('innerHTML')
else:
time_spent_html = self.driver.find_element_by_id('gws-trips-desktop__dest-mrgkp').get_attribute('innerHTML')
time_spent_soup = BeautifulSoup(time_spent_html,'html.parser')
time_spent = time_spent_soup.find('div', class_='UYKlhc')
if time_spent:
time_spent = time_spent.find('b').text
else:
time_spent = ''
# add details to dict
top_sights_detail['Topsights'][idx] = {}
top_sights_detail['Topsights'][idx]['Place Name'] = place_name
top_sights_detail['Topsights'][idx]['Rating'] = rating
top_sights_detail['Topsights'][idx]['Number of Reviews'] = number_of_reviews
top_sights_detail['Topsights'][idx]['Typical Time Spent'] = time_spent
# wait to close element
close_present = EC.element_to_be_clickable((By.TAG_NAME, "g-fab"))
WebDriverWait(self.driver, 20).until(close_present)
self.driver.find_element_by_tag_name('g-fab').click()
time.sleep(1)
return top_sights_detail
except NoSuchElementException:
# if there are no topsight at 'destination'
return False
except Exception as e:
if attempt == 2:
print(e, " .3.")
return False
attempt += 1
return self.get_topsgihts_details(attempt)
def write_month(self, data):
path = os.path.dirname(os.path.abspath(__file__))
with open(path + '/Months.csv', 'a', newline='', encoding='utf-8') as f:
writer = csv.writer(f, delimiter=',')
file_is_empty = os.stat(path + '/Months.csv').st_size == 0
if file_is_empty:
fields = ['Country', 'Destination', 'Month', 'Popularity', 'MinTemp', 'MaxTemp', 'Precipitation']
writer.writerow(fields)
#
for key, items in data['Months'].items():
writer.writerow([data['Country'],
data['Destination'],
key,
items['Popularity'],
items['MinTemp'],
items['MaxTemp'],
items['Precipitation']])
def write_top_sight(self, data):
path = os.path.dirname(os.path.abspath(__file__))
with open(path + '/ThingsToDo.csv', 'a', encoding='utf-8', newline='') as f:
writer = csv.writer(f, delimiter=',')
option_fields = {'Architecture': 0, 'Nature': 1, 'Shopping': 2, 'Fishing': 3, 'Hiking': 4, 'Outdoor Recreation': 5, 'Adventure': 6,
'Beaches': 7, 'Camping': 8, 'Caves': 9, 'Museums': 10, 'National Parks': 11, 'Art': 12, 'Desert': 13, 'Coral reef': 14, 'Skiing': 15,
'Snowboarding': 16, 'Winter sports': 17, 'Wildlife': 18, 'Penguin': 19, 'Glacier': 20, 'Ecotourism': 21, 'Snorkeling': 22, 'Koala': 23,
'Surfing': 24, 'Nature reserve': 25, 'Volcano': 26, 'Sailing': 27, 'Scuba diving': 28, 'Theaters': 29, 'Elephant': 30, 'Safari': 31,
'Jaguar': 32, 'Casinos': 33, 'Kitesurfing': 34, 'Windsurfing': 35, 'Birdwatching': 36, 'Rainforest': 37, 'Nightlife': 38,
'Whale watching': 39, 'Reindeer': 40, 'Gorilla': 41, 'Kayaking': 42, 'Polar bear': 43, 'Hot spring': 44, 'Tiger': 45, 'Yoga': 46,
'Orangutan': 47, 'Golf': 48, 'Rafting': 49, 'Autumn leaf color': 50, 'Dolphin': 51, 'Wine tasting': 52, 'Climbing': 53, 'Paragliding': 54,
'Bungee jumping': 55, 'Whale shark': 56, 'Alpine skiing': 57, 'Historic site': 58}
file_is_empty = os.stat(path + '/ThingsToDo.csv').st_size == 0
if file_is_empty:
fields = ['Country', 'Destination', 'Things to Do', 'Rating', 'Number of Review', 'Typical Time Spent']
fields.extend([key for key in option_fields])
writer.writerow(fields)
# write options
options = ['no' for key in option_fields]
destination = data['Destination']
if self.destinations[destination]['Options'] != None:
for o in self.destinations[destination]['Options']:
idx = option_fields.get(o)
if idx != None:
options[idx] = 'yes'
# write data
for key, items in data['Topsights'].items():
row = [data['Country'],
data['Destination'],
items['Place Name'],
items['Rating'],
items['Number of Reviews'],
items['Typical Time Spent']]
row += options
writer.writerow(row)
def run(self):
self.open_browser()
search = self.search()
if search == True:
get_dests = self.get_destinations()
if get_dests == True:
counter = 0
for key, item in self.destinations.items():
if counter % 20 == 0:
# re open browser for memory
self.driver.close()
self.open_browser()
counter += 1
print('{}/{}'.format(counter, len(self.destinations)))
dest_details = self.get_destination_details(item['URL'])
if dest_details != False:
self.write_month(dest_details)
topsight_details = self.get_topsgihts_details()
if topsight_details != False:
self.write_top_sight(topsight_details)
country = input("Enter Country: ")
a = GoogleDestination(country)
a.run()
| 3.0625 | 3 |
app/solarvibes/login_check/views.py | Fantaso/site-app-docker | 4 | 12796325 | <filename>app/solarvibes/login_check/views.py
from flask import Blueprint, render_template, redirect, url_for, flash, request
from solarvibes import db
from flask_login import current_user
from flask_security import login_required
login_check = Blueprint(
'login_check',
__name__,
)
#############################
# INDEX
#############################
@login_check.route('/', methods=['GET'])
@login_required
def index():
user = current_user
print('entering...')
if request.method == 'GET':
# if the user has not yet been authenticated -> goto login
if user.is_anonymous:
print('you must be logged in!')
flash('you must be logged in!')
return redirect(url_for('login'))
# if the user is authenticated
if user.is_authenticated:
# if is the first time he logs in
if not user.completed_welcome or user.login_count == None:
print('1')
return redirect(url_for('welcome.index'))
# if the user already complete the welcome setup
if user.completed_welcome:
print('2')
return redirect(url_for('main.index'))
else:
print('3')
return redirect(url_for('welcome.index'))
| 2.484375 | 2 |
iepy/webui/corpus/migrations/0003_remove_dont_know_option.py | francolq/iepy | 813 | 12796326 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('corpus', '0002_data_migration_dont_know_skip_merge'),
]
operations = [
migrations.AlterField(
model_name='evidencelabel',
name='label',
preserve_default=True,
field=models.CharField(
default='SK',
null=True,
max_length=2,
choices=[
('YE', 'Yes, relation is present'),
('NO', 'No relation present'),
('NS', 'Evidence is nonsense'),
('SK', 'Skipped labeling of this evidence')
]
),
),
]
| 1.65625 | 2 |
ui/response_info_ui.py | LinkedList/qTTp | 0 | 12796327 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'response_info.ui',
# licensing of 'response_info.ui' applies.
#
# Created: Sun Feb 17 10:16:18 2019
# by: pyside2-uic running on PySide2 5.12.1
#
# WARNING! All changes made in this file will be lost!
from PySide2 import QtCore, QtGui, QtWidgets
class Ui_ResponseInfo(object):
def setupUi(self, ResponseInfo):
ResponseInfo.setObjectName("ResponseInfo")
ResponseInfo.resize(102, 28)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(ResponseInfo.sizePolicy().hasHeightForWidth())
ResponseInfo.setSizePolicy(sizePolicy)
self.reqStatusLayout = QtWidgets.QHBoxLayout(ResponseInfo)
self.reqStatusLayout.setSpacing(5)
self.reqStatusLayout.setContentsMargins(6, 0, -1, 0)
self.reqStatusLayout.setObjectName("reqStatusLayout")
self.statusCode = QtWidgets.QLabel(ResponseInfo)
self.statusCode.setStyleSheet("")
self.statusCode.setText("")
self.statusCode.setObjectName("statusCode")
self.reqStatusLayout.addWidget(self.statusCode)
self.time = QtWidgets.QLabel(ResponseInfo)
self.time.setText("")
self.time.setMargin(5)
self.time.setObjectName("time")
self.reqStatusLayout.addWidget(self.time)
self.contentType = QtWidgets.QLabel(ResponseInfo)
self.contentType.setText("")
self.contentType.setObjectName("contentType")
self.reqStatusLayout.addWidget(self.contentType)
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.reqStatusLayout.addItem(spacerItem)
self.retranslateUi(ResponseInfo)
QtCore.QMetaObject.connectSlotsByName(ResponseInfo)
def retranslateUi(self, ResponseInfo):
pass
| 1.429688 | 1 |
xfeeds/management/commands/seed.py | rubeon/django-xfeeds | 0 | 12796328 | <filename>xfeeds/management/commands/seed.py<gh_stars>0
import time
import urllib.parse
import feedparser
from django.core.management.base import BaseCommand, CommandError
from xfeeds.models import Feed
from xfeeds.parser import tasks
from bs4 import BeautifulSoup as soup
from pprint import pprint
# Command to feed a bunch of content into the site
# This will go to a seeder page, and spider the first
# 100 feeds it can find
class Command(BaseCommand):
"""
Spider a bunch o' goddam feeds
"""
all_links = []
rss_links = []
spidered = []
max_links = 500
max_feeds = 500
help = 'Add feeds starting with a seeder site'
def add_arguments(self, parser):
"""
Build argument parser
"""
parser.add_argument('url', type=str)
parser.add_argument('--max-links', '-l', dest='max_links',
type=int,
default=500,
help="Maximum number of links "
"to spider (approx.)"
)
parser.add_argument('--max-feeds', '-f', dest='max_feeds',
type=int,
default=500,
help="Maximum number of RSS"
"feeds to process "
"(approx.)"
)
def spider(self, site):
"""
grabs all the outgoing links from a site
"""
# print("Spidering", site)
if len(self.all_links) > self.max_links:
print("Skipping (max_links)")
return
try:
rurl = urllib.parse.urlparse(site)
except:
print("Couldn't parse", site)
return
self.spidered.append(site)
pprint(self.spidered)
try:
html = soup(urllib.request.urlopen(site, timeout=1).read(), 'html.parser')
except:
return
alist = html.find_all('a')
links = [link.get('href') for link in alist]
for link in links:
# print(link)
try:
purl = urllib.parse.urlparse(link)
if purl.netloc != rurl.netloc and \
purl.scheme in ['http', 'https']:
self.all_links.append(link) and print("added", link)
else:
continue
except:
continue
finally:
print("{} Processed {}".format(len(self.all_links), purl.netloc))
# pprint(all_links)
# return all_links
for link in self.all_links:
if link not in self.spidered:
self.spider(link)
def handle(self, *args, **options):
start = time.time()
print(options)
self.max_links = options.get('max_links')
self.max_feeds = options.get('max_feeds')
url = options.get('url')
print("Starting with", url)
self.spider(url)
print("Spidered {} sites".format(len(self.all_links)))
for site in self.all_links:
try:
self.rss_links.extend(tasks.find_feed(site))
except Exception as e:
print("Skipping", site)
print(str(e))
finally:
print("#", len(self.rss_links))
pprint(self.rss_links)
finish = time.time()
print("Found {} RSS feeds in {} seconds"
.format(len(self.rss_links), finish - start))
for feed in self.rss_links:
tasks.url_to_feed(feed) | 2.515625 | 3 |
fru/FirewallManager/apps.py | Owen-Cummings/Firewall-GUI-Prototype | 0 | 12796329 | from django.apps import AppConfig
class FirewallmanagerConfig(AppConfig):
name = 'FirewallManager'
| 1.140625 | 1 |
scripts/cond_num_run.py | polyfem/Decoupling-Simulation-Accuracy-from-Mesh-Quality | 2 | 12796330 | <reponame>polyfem/Decoupling-Simulation-Accuracy-from-Mesh-Quality
import os
import json
import subprocess
import tempfile
if __name__ == '__main__':
polyfem_exe = "./PolyFEM_bin"
out_folder = "cond_num"
n_refs = [0, 1, 2, 3]
p_refs = [False, True]
current_folder = cwd = os.getcwd()
with open("test.json", 'r') as f:
json_data = json.load(f)
for is_bad in [True, False]:
mesh = "../data/conditioning_44000_bad.mesh" if is_bad else "../data/conditioning_44000_good.mesh"
out_f = out_folder + ('bad' if is_bad else 'good')
for ref in n_refs:
for pref in p_refs:
json_data["mesh"] = mesh
json_data["n_refs"] = ref
json_data["use_p_ref"] = pref
json_data["output"] = os.path.join(os.getcwd(), out_f, "out_" + str(ref) + ("_pref" if pref else "") + ".json")
json_data["stiffness_mat_save_path"] = os.path.join(os.getcwd(), out_f, "mat_" + str(ref) + ("_pref" if pref else "") + ".json")
with tempfile.NamedTemporaryFile(suffix=".json") as tmp_json:
with open(tmp_json.name, 'w') as f:
f.write(json.dumps(json_data, indent=4))
args = [polyfem_exe, '-json', tmp_json.name, '-cmd']
subprocess.run(args)
| 2.21875 | 2 |
src/stock_logger.py | SteveZhengMe/TK-Gui-Assignment | 0 | 12796331 | <gh_stars>0
# This project is a assignment of College.
# Purpose: Practice TK and database connection
# Usage: The user can add a stock record to Sqllite database, and one can search and list the records
#
# Author: <NAME>
# Date: 2021-03-17
import tkinter as tk
from tkinter import ttk, messagebox, filedialog
from functools import partial
from datetime import datetime
import os
import sqlite3
from sqlite3 import Error
##
# Validate the input
class Validator:
# Return error message if the input is not a number (float)
# Return None if the input is valid
def isNumber(self,input):
errMsg = "Please input a number."
try:
if input=='NaN':
return errMsg
float(input)
except ValueError:
return errMsg
else:
return None
# Return error message if the input is blank
# Return None if the input is valid
def isEmpty(self, input):
errMsg = "Value required"
if input != "":
return None
else:
return errMsg
# Return error message if the input is not in a "yyyy-MM-dd" format
# Return None if the input is valid
def isDate(self, input):
errMsg = "Please input a date in yyyy-MM-dd format."
try:
datetime.strptime(input, "%Y-%m-%d")
except ValueError:
return errMsg
else:
return None
##
# One label and one combobox
class LabelDDCombo(tk.Frame):
def __init__(self, parent, labelName="Label Name", entryState="normal", packSide="left", size=(0,0), margin=(0,0),ddItems=[],*args,**kw):
super().__init__(master=parent, *args, **kw)
# Create label and pack
self.label = tk.Label(self, text=labelName, font=("Courier",9), fg="#333", anchor="e")
if size[0] != None:
self.label.config(width=size[0])
self.label.pack(side=packSide, padx=(margin[0],0), pady=margin[1])
# Create input and pack
self.inputValue = tk.StringVar()
self.input = ttk.Combobox(self, textvariable = self.inputValue, state=entryState, values=ddItems)
self.input.current(0)
if size[1] != None:
self.input.config(width=size[1])
self.input.pack(side=packSide, padx=(0,margin[0]), pady=margin[1])
# When the value is invalidate, this handler will display error message.
# The handler should be .config(text=XXX)
def setInputErrorHandler(self, handler):
self.errHandler = handler
# The validator. It will call validator class and loop
def validator(self):
validator = Validator()
for valRules in self.validatorArray:
#eval()
validationErrors = validator.isEmpty(self.getDDValue())
if validationErrors != None:
# Output the error message to "error handler"
self.errHandler.config(text=self.label["text"] + " - " + validationErrors)
return False
return True
# When focus, focus the input box
def focus(self):
self.input.focus()
# Return the input value
def getDDValue(self):
return self.inputValue.get()
def setValue(self, valueIndex):
self.input.current(valueIndex)
##
# One label and one input box
class LabelInputCombo(tk.Frame):
def __init__(self, parent, labelName="Label Name", entryState="normal", packSide="left", size=(0,0), margin=(0,0),validateArray=[],*args,**kw):
super().__init__(master=parent, *args, **kw)
# validateArray = ["isNumber", "isEmpty"], means the value needs two validation
self.validatorArray = validateArray
# Create label and pack
self.label = tk.Label(self, text=labelName, font=("Courier",9), fg="#333", anchor="e")
if size[0] != None:
self.label.config(width=size[0])
self.label.pack(side=packSide, padx=(margin[0],0), pady=margin[1])
# Create input and pack
self.inputValue = tk.StringVar()
self.input = tk.Entry(self, textvariable=self.inputValue, state=entryState)
if size[1] != None:
self.input.config(width=size[1])
self.input.pack(side=packSide, padx=(0,margin[0]), pady=margin[1])
# When the value is invalidate, this handler will display error message.
# The handler should be .config(text=XXX)
def setInputErrorHandler(self, handler):
self.errHandler = handler
# The validator. It will call validator class and loop
def validator(self):
self.errHandler.config(text="No Error")
validator = Validator()
for valRules in self.validatorArray:
#eval()
validationErrors = eval("validator." + valRules + "('" + self.inputValue.get() + "')")
if validationErrors != None:
# Output the error message to "error handler"
self.errHandler.config(text=self.label["text"] + " - " + validationErrors)
self.input.delete(0,"end")
return False
return True
# When focus, focus the input box
def focus(self):
self.input.focus()
# Return the input value
def getInputValue(self):
return self.inputValue.get()
def setValue(self, value):
if self.input["state"].lower() == "disabled":
self.input.config(state="normal")
self.input.delete(0,"end")
self.input.insert(0,value)
self.input.config(state="disabled")
self.input.delete(0,"end")
self.input.insert(0,value)
# Table view
class TreeViewWithScrollBar(tk.Frame):
def __init__(self, parent, columnsAttr, tableRows=5, *args,**kw):
super().__init__(master=parent, *args, **kw)
columns = list(item["colName"] for item in columnsAttr)
self.treeview = ttk.Treeview(self, height=tableRows, show="headings", columns=columns)
for aColumn in columnsAttr:
self.treeview.column(aColumn["colName"], width=aColumn["width"], anchor=aColumn["anchor"])
self.treeview.heading(aColumn["colName"], text=aColumn["colName"])
treeScroll = ttk.Scrollbar(self, orient="vertical",command=self.treeview.yview)
self.treeview.grid(row=0,column=0)
treeScroll.grid(row=0,column=1,sticky="NSEW")
self.treeview.configure(yscrollcommand=treeScroll.set)
def addValues(self,valueArray):
self.treeview.insert('','end',values=valueArray)
def clearAll(self):
self.treeview.delete(*self.treeview.get_children())
def setValues(self, tupleArray):
if tupleArray is not None:
self.clearAll()
for row in tupleArray[0]:
self.addValues(row)
def getRecordsCount(self):
return len(self.treeview.get_children())
##
# A layout to group some elements.
# Support two layouts:
# Use "h" to pack horizontally
# Use "v" to pack vertically
class LayoutFrame(tk.Frame):
def __init__(self, parent, *args, **kw):
super().__init__(master=parent, *args, **kw)
def layout(self, layout, *items):
if items != None:
for item in items:
if layout == "v":
item.pack(side='top', pady=5)
else:
item.pack(side='left', padx=5)
return self
############################# Above are the widgets; Below are the UI design ###########################
##
# "Activity Display" contains two buttons on the top: Summary and Activities
class ActivityDisplayWindow(tk.Frame):
summaryFrame = None
activitiesDataTableFrame = None
dbName = "stocks.db"
def __init__(self,parent):
self.parent = parent
self.parent.resizable(False, False)
self.windowSelfConfig()
self.createWidgets()
def windowSelfConfig(self):
self.parent.geometry('400x600+20+20')
self.parent.title("Activities Display")
self.parent.protocol("WM_DELETE_WINDOW", self.onClose)
def onClose(self):
if messagebox.askokcancel("Quit", "Do you want to quit both two windows?"):
self.parent.destroy()
def createWidgets(self):
# self.parent.rowconfigure(0,weight=1)
self.parent.columnconfigure(0,weight=1)
topButtonsArea = LayoutFrame(self.parent)
self.summaryButton = tk.Button(topButtonsArea, text="Summary", command=partial(self.switchButtonOnClick,"summary"))
self.activitiesButton = tk.Button(topButtonsArea, text="Activities", command=partial(self.switchButtonOnClick,"Activities"))
topButtonsArea.layout("h",self.summaryButton, self.activitiesButton).grid(row=0,column=0,pady=10)
self.buildSummaryPage()
def buildSummaryPage(self):
if self.summaryFrame is None:
self.summaryFrame = LayoutFrame(self.parent)
self.uniqueStockSymbols = tk.StringVar()
self.oldestTransactionSummary = LabelInputCombo(self.summaryFrame, labelName="Oldest Transaction:", entryState="disabled", size=(22,22), margin=(2,2))
self.newestTransactionSummary = LabelInputCombo(self.summaryFrame, labelName="Newest Transaction:", entryState="disabled", size=(22,22), margin=(2,2))
self.cheapestPriceSymmary = LabelInputCombo(self.summaryFrame, labelName="Cheapest Price:", entryState="disabled", size=(22,22), margin=(2,2))
self.mostExpensivePriceSummary = LabelInputCombo(self.summaryFrame, labelName="Most Expensive Price:", entryState="disabled", size=(22,22), margin=(2,2))
self.mostTradedStockSummary = LabelInputCombo(self.summaryFrame, labelName="Most Traded Stock:", entryState="disabled", size=(22,22), margin=(2,2))
self.summaryFrame.layout("v",
tk.Label(self.summaryFrame, text="", font=("Arial", 14), anchor="w"),
tk.Label(self.summaryFrame, text="Unique Stock Symbols", font=("Arial", 14), anchor="w"),
tk.Listbox(self.summaryFrame, listvariable=self.uniqueStockSymbols),
tk.Label(self.summaryFrame, text="", font=("Arial", 14), anchor="w"),
tk.Label(self.summaryFrame, text="Summary", font=("Arial", 14), anchor="w"),
self.oldestTransactionSummary,
self.newestTransactionSummary,
self.cheapestPriceSymmary,
self.mostExpensivePriceSummary,
self.mostTradedStockSummary
)
self.summaryFrame.grid(row=1,column=0)
self.updateInfo()
def buildActivitiesPage(self):
if self.activitiesDataTableFrame is None:
self.activitiesDataTableFrame = TreeViewWithScrollBar(self.parent,[
{"colName":"ID","width":10,"anchor":"center"},
{"colName":"Date","width":100,"anchor":"center"},
{"colName":"Symbol","width":80,"anchor":"center"},
{"colName":"Transation","width":70,"anchor":"center"},
{"colName":"Quantity","width":70,"anchor":"center"},
{"colName":"Price$","width":60,"anchor":"center"}],tableRows=26)
self.activitiesDataTableFrame.grid(row=1,column=0)
self.updateInfo()
# Update the data from DB
def updateInfo(self):
dataController = DataController(self.dbName)
if self.summaryFrame is not None:
summaryResults = dataController.getSummaryInfo()
if summaryResults is not None:
tradeSymbols = summaryResults[0]
self.uniqueStockSymbols.set([x[0] for x in tradeSymbols])
OldestTrade = summaryResults[1][0]
self.oldestTransactionSummary.setValue("%s %s %s" % (OldestTrade[1],OldestTrade[3],OldestTrade[2]))
newestTrade = summaryResults[2][0]
self.newestTransactionSummary.setValue("%s %s %s" % (newestTrade[1],newestTrade[3],newestTrade[2]))
cheapestTrade = summaryResults[3][0]
self.cheapestPriceSymmary.setValue("$%0.2f %s %s" % (cheapestTrade[5],cheapestTrade[3],cheapestTrade[2]))
expensiveTrade = summaryResults[4][0]
self.mostExpensivePriceSummary.setValue("$%0.2f %s %s" % (expensiveTrade[5],expensiveTrade[3],expensiveTrade[2]))
mostTrade = summaryResults[5][0]
self.mostTradedStockSummary.setValue("%s (%d Transactions)" % (mostTrade[1],mostTrade[0]))
if self.activitiesDataTableFrame is not None:
self.activitiesDataTableFrame.setValues(dataController.listTransactions())
def switchButtonOnClick(self, activity):
if activity.lower() == "summary":
if self.activitiesDataTableFrame is not None:
self.activitiesDataTableFrame.grid_forget()
self.buildSummaryPage()
elif activity.lower() == "activities":
if self.summaryFrame is not None:
self.summaryFrame.grid_forget()
self.buildActivitiesPage()
##
# "Activity Display" contains two buttons on the top: Summary and Activities
class ActivityEntryWindow(tk.Frame):
# will be overwritten in class constructor
dbName = "stocks_test.db"
def __init__(self,parent, parentWindowClass):
self.parent = parent
self.parentClass = parentWindowClass
self.dbName = parentWindowClass.dbName
self.parent.resizable(False, False)
self.windowSelfConfig()
self.createWidgets()
def windowSelfConfig(self):
self.parent.geometry('400x600+450+20')
self.parent.title("Activity Entry")
self.parent.protocol("WM_DELETE_WINDOW", self.onClose)
# Destroy parent window
def onClose(self):
if messagebox.askokcancel("Quit", "Do you want to quit both two windows?"):
self.parentClass.parent.destroy()
def createWidgets(self):
self.errorMessageDisplay = tk.Label(self.parent, text="No Error", font=("Arial", 10), fg="red", anchor="w")
self.dataInputForm().pack(side="top", pady=(20,10))
self.buttons().pack(side="top", pady=(0,20))
tk.Label(self.parent, text="All Transactions", font=("Arial", 14), anchor="w").pack(side="top")
self.allTransactions = TreeViewWithScrollBar(self.parent,[
{"colName":"ID","width":10,"anchor":"center"},
{"colName":"Date","width":100,"anchor":"center"},
{"colName":"Symbol","width":80,"anchor":"center"},
{"colName":"Transation","width":70,"anchor":"center"},
{"colName":"Quantity","width":70,"anchor":"center"},
{"colName":"Price","width":60,"anchor":"center"}],tableRows=19)
self.allTransactions.pack(side="top", pady=(10,0), fill="both")
self.errorMessageDisplay.pack(side="bottom", fill="x")
self.updateTransactions()
def dataInputForm(self):
dataInputFrame = LayoutFrame(self.parent)
self.dateInput = LabelInputCombo(dataInputFrame, labelName="Date", validateArray=["isDate", "isEmpty"], size=(5,10), packSide="top", margin=(1,1))
self.dateInput.setInputErrorHandler(self.errorMessageDisplay)
self.symbolInput = LabelInputCombo(dataInputFrame, labelName="Symbol", validateArray=["isEmpty"], size=(6,6), packSide="top", margin=(2,2))
self.symbolInput.setInputErrorHandler(self.errorMessageDisplay)
self.transationInput = LabelDDCombo(dataInputFrame, labelName="Transation", ddItems=["","buy","sell"], size=(10,5),entryState="readonly",packSide="top", margin=(2,2))
self.transationInput.setInputErrorHandler(self.errorMessageDisplay)
self.quantityInput = LabelInputCombo(dataInputFrame, labelName="Quantity", validateArray=["isNumber", "isEmpty"], size=(8,8), packSide="top", margin=(2,2))
self.quantityInput.setInputErrorHandler(self.errorMessageDisplay)
self.priceInput = LabelInputCombo(dataInputFrame, labelName="Price", validateArray=["isNumber", "isEmpty"], size=(5,6), packSide="top", margin=(2,2))
self.priceInput.setInputErrorHandler(self.errorMessageDisplay)
dataInputFrame.layout('h',
self.dateInput,
self.symbolInput,
self.transationInput,
self.quantityInput,
self.priceInput
)
return dataInputFrame
def buttons(self):
buttonsFrame = LayoutFrame(self.parent)
recordButton = tk.Button(buttonsFrame, text="Record", command=self.recordOnClick)
clearButton = tk.Button(buttonsFrame, text="Clear", command=self.clearOnClick)
searchButton = tk.Button(buttonsFrame, text="search", command=self.searchOnClick)
exportButton = tk.Button(buttonsFrame, text="Export", command=self.exportOnClick)
buttonsFrame.layout('h', recordButton, clearButton, searchButton, exportButton)
return buttonsFrame
def updateTransactions(self):
self.allTransactions.setValues(DataController(self.dbName).listTransactions())
def generateParametersDict(self):
queryDict = {}
if self.dateInput.getInputValue() != "" and self.dateInput.validator():
queryDict["transaction_date"] = self.dateInput.getInputValue()
if self.symbolInput.getInputValue() != "" and self.symbolInput.validator():
queryDict["symbol"] = self.symbolInput.getInputValue()
if self.transationInput.getDDValue() != "":
queryDict["transaction_direction"] = self.transationInput.getDDValue()
if self.quantityInput.getInputValue() != "" and self.quantityInput.validator():
queryDict["Quantity"] = self.quantityInput.getInputValue()
if self.priceInput.getInputValue() != "" and self.priceInput.validator():
queryDict["price"] = self.priceInput.getInputValue()
return queryDict
def recordOnClick(self):
inputDict = self.generateParametersDict()
# 5 means all items are inputted
if len(inputDict) == 5:
if DataController(self.dbName).addTransaction(inputDict["transaction_date"],inputDict["symbol"],inputDict["transaction_direction"],inputDict["Quantity"],inputDict["price"]):
self.updateTransactions()
self.parentClass.updateInfo()
self.clearOnClick()
self.errorMessageDisplay.config(text="Insert Successfully")
else:
self.errorMessageDisplay.config(text="Insert Fail.")
else:
self.errorMessageDisplay.config(text="Please complete all input items")
def clearOnClick(self):
self.dateInput.setValue("")
self.symbolInput.setValue("")
self.transationInput.setValue(0)
self.quantityInput.setValue("")
self.priceInput.setValue("")
self.errorMessageDisplay.config(text="All inputs are cleared")
def searchOnClick(self):
self.allTransactions.setValues(DataController(self.dbName).listTransactions(self.generateParametersDict()))
self.errorMessageDisplay.config(text=" %d records returned" % self.allTransactions.getRecordsCount())
def exportOnClick(self):
destFile = filedialog.asksaveasfile(filetypes = [('Text Document', '*.txt')], defaultextension = [('Text Document', '*.txt')])
if destFile is not None:
exportResult = DataController(self.dbName).listTransactions()
if exportResult:
destFile.write("User Activity")
for record in exportResult[0]:
destFile.write("\n%d, %s, %s, %s, %d, %.2f" % record)
destFile.close()
self.errorMessageDisplay.config(text="Export Successfully")
################################# Above are UI design, below are database access code ########################
##
# Controller: Manipulate the data and return to View
class DataController:
def __init__(self, dataFile):
self.db = dataFile
if not os.path.exists(dataFile):
# Create Data
if not self.initializeDatabase(withData = True):
raise Exception("Database Initialize Error")
# get all information in one connection
def getSummaryInfo(self):
isSuccess, dataResult = self.runSql([
'select distinct symbol from stocks',
'select * from stocks order by transaction_date asc limit 1',
'select * from stocks order by transaction_date desc limit 1',
'select * from stocks order by price asc limit 1',
'select * from stocks order by price desc limit 1',
'select count(id) as trade_times, symbol from stocks group by symbol order by trade_times desc limit 1'
])
if isSuccess:
return dataResult
return None
def listTransactions(self, paramDict={}):
queryParam = []
for item, value in paramDict.items():
if type(value) is str:
queryParam.append(item + "='" + value + "'")
else:
queryParam.append(item + "=" + str(value))
where = ""
if len(queryParam) > 0:
where = "where " + " and ".join(queryParam)
# TODO: put it in debug log
#print('select * from stocks ' + where + ' order by transaction_date asc')
isSuccess, dataResult = self.runSql([
'select * from stocks ' + where + ' order by transaction_date asc'
])
if isSuccess:
return dataResult
return None
def addTransaction(self, transDate,symbol,trans,quantity,price):
isSuccess, dataResult = self.runSql(
["insert into stocks (transaction_date,symbol,transaction_direction,Quantity,price) values (?,?,?,?,?)"],
[(transDate, symbol, trans, quantity, price)]
)
return isSuccess
# Run sql, support batch
# return 1: True/False for update/delete/insert
# return 2: fetch data for select
def runSql(self, sqlStatementArray, sqlStatementParamArray=[]):
conn = None
if len(sqlStatementParamArray) > 0:
if len(sqlStatementArray) != len(sqlStatementParamArray):
return False,[]
fetchResult = []
try:
conn = sqlite3.connect(self.db)
needCommit = False
for i in range(len(sqlStatementArray)):
if len(sqlStatementParamArray) > 0:
queryResults = conn.execute(sqlStatementArray[i], sqlStatementParamArray[i])
else:
queryResults = conn.execute(sqlStatementArray[i])
if sqlStatementArray[i].strip().lower().startswith("select"):
fetchResult.append(queryResults.fetchall())
else:
needCommit = True
if needCommit:
conn.commit()
except Error as e:
# TODO: Log the error
print(e)
return False, []
else:
return True, fetchResult
finally:
if conn:
conn.close()
# Create Table and initialize Data
# Transaction Data: yyyy-MM-dd
# Stock Symbol: MSFT
# Transaction: Buy/Sell
# Quantity: 100
# Transation Price: 12.34
def initializeDatabase(self, withData = False):
if self.runSql(['''CREATE TABLE stocks (
id INTEGER PRIMARY KEY AUTOINCREMENT,
transaction_date DATE,
symbol text,
transaction_direction text,
Quantity INTEGER,
price REAL
)'''])[0]:
return self.runSql(
["insert into stocks (transaction_date,symbol,transaction_direction,Quantity,price) values (?,?,?,?,?)" for x in range(10)],
[
('2020-01-01', 'AAPL', 'buy', 100, 12.3),
('2020-02-01', 'MSFT', 'buy', 80, 8.3),
('2020-03-01', 'AAPL', 'sell', 80, 10.3),
('2020-04-01', 'MSFT', 'sell', 80, 10.4),
('2020-05-01', 'AAPL', 'sell', 100, 9.3),
('2020-06-01', 'AAPL', 'buy', 100, 14.3),
('2020-07-01', 'MSFT', 'buy', 100, 16.3),
('2020-08-01', 'AAPL', 'buy', 100, 6.3),
('2020-09-01', 'MSFT', 'sell', 80, 10.3),
('2020-10-01', 'AAPL', 'sell', 80, 11.3)
]
)[0]
return False
if __name__ == "__main__":
activityDisplayWindow = tk.Tk()
displayWindowClass = ActivityDisplayWindow(activityDisplayWindow)
activityEntryWindow = tk.Toplevel(activityDisplayWindow)
ActivityEntryWindow(activityEntryWindow, displayWindowClass)
activityDisplayWindow.mainloop() | 3.953125 | 4 |
scripts/spy.py | tarvitz/dsfp | 30 | 12796332 | # -*- coding: utf-8 -*
""" DSFP modifications spy, looks for save file modifications
.. module:: watcher
:platform: Linux, Windows, MacOS X
:synopsis: watches for dark souls save file modifications and prints
any modified data in console
.. moduleauthor:: Tarvitz <<EMAIL>>
"""
from __future__ import unicode_literals
import os
import sys
import six
import struct
import json
import argparse
from time import sleep
from datetime import datetime
from textwrap import wrap
from struct import pack, unpack
PROJECT_ROOT = os.path.pardir
sys.path.insert(0, os.path.join(PROJECT_ROOT, 'dsfp'))
from dsfp.utils import chunks
def rel(path):
return os.path.join(PROJECT_ROOT, path)
class Node(object):
def __init__(self):
self.children = []
def add(self, element):
self.children.append(element)
class Leaf(Node):
__slots__ = ['start', 'size']
def __init__(self, start, size, old, new):
super(Leaf, self).__init__()
self.start = start
self.size = size
self.old = old
self.new = new
def add(self, element):
"""
nothing to do as this is leaf
:param element:
:return: None
"""
@staticmethod
def unpack(value, fmt='I'):
return struct.unpack(fmt, value)[0]
def __str__(self):
new = self.unpack(self.new)
old = self.unpack(self.old)
fmt = (
"0x%(addr)08x[%(saddr)10s] %(value)10s 0x%(hex)08x "
"%(follow)5s %(old)10s 0x%(old_hex)08x" % {
'addr': self.start,
'saddr': self.start,
'value': new,
'hex': new,
'old': old,
'old_hex': old,
'follow': '<-'
}
)
return fmt
def __repr__(self):
return "<Leaf: 0x%08x>" % self.start
def _wrap(source, parts):
"""
wrap source to list of equal parts python 3+ only
:param str source: source to wrap
:param int parts: N equal parts
:rtype: list[str]
:return: list of str with N or equal length
"""
return list(chunks(source, parts))
def text_wrap(source, parts):
"""
wrap source to list of equal parts
:param str source: source to wrap
:param int parts: N equal parts
:rtype: list[str]
:return: list of str with N or equal length
"""
if six.PY2:
return wrap(source, parts)
return _wrap(source, parts)
class NewDiff(object):
"""
"""
def __init__(self, new_stream, old_stream, watchers):
self.new_stream = new_stream
self.old_stream = old_stream
self.watchers = watchers
def read_stream(self, stream, block):
"""
read stream withing given block
:param stream: stream to read
:type stream: six.BytesIO
:param dict block: start offset, size to read
:rtype: str
:return: raw data
"""
start = int(block['start'], 16)
size = int(block['size'], 16)
stream.seek(start)
return stream.read(size)
def process_diff(self, word_size=4):
"""
processes diff
:param int word_size: word size for diff processing
:rtype: list[Leaf]
:return: diffs
"""
nodes = []
for table in self.watchers:
for block in table.get('WATCH', []):
old_data = self.read_stream(self.old_stream, block)
new_data = self.read_stream(self.new_stream, block)
for idx, (old, new) in enumerate(
zip(text_wrap(old_data, word_size),
text_wrap(new_data, word_size))
):
size = int(block['size'], 16) + idx * word_size
start = int(block['start'], 16) + idx * word_size
if old == new:
continue
#: todo decide what's version of python would be
#: more prioritized as textwrap.wrap does not work with
#: bytestring and iterate through it coverts chars back to
#: int there's only one option convert/pack them back in
#: python 3+ which could give performance drop downs.
processed_old = old
processed_new = new
if isinstance(old, list) and isinstance(new, list):
processed_old = pack('B' * word_size, *old)
processed_new = pack('B' * word_size, *new)
nodes.append(
Leaf(start, size, processed_old, processed_new)
)
return nodes
class Spy(object):
""" Changes spy
:param str filename: path inspected filename
:keyword int slot: character slot
:keyword dict skip_table: skip some data which is represented in table
stored in dict
:keyword bool use_curses: use curses interface instead of standard cli
:keyword int start_offset: start inspections with given offset
:keyword int start_offset: end inspections with given offset
"""
def __init__(self, filename, watchers=None):
self.filename = filename
self.watchers = watchers
def read(self):
fo = open(self.filename, 'rb')
return six.BytesIO(fo.read())
@staticmethod
def log(out):
"""
log into the main window
:keyword bool refresh: True if should be refreshed
"""
print(out)
def run(self):
modified = 0
old_stat = os.lstat(self.filename)
old_stream = self.read()
while 1:
sleep(1)
stat = os.lstat(self.filename)
if stat.st_mtime == old_stat.st_mtime:
continue
now = datetime.now()
print("modified: %s [%s]" % (modified, now.strftime('%H:%M:%S')))
old_stat = stat
new_stream = self.read()
diff = NewDiff(old_stream=old_stream,
new_stream=new_stream,
watchers=self.watchers)
for node in diff.process_diff():
print(node)
modified += 1
def get_default_file_name():
"""
running on windows it would get default draks0005.sl2 file location
:rtype: str
:return: draks0005.sl2 file location
"""
prefix = os.path.join(
os.getenv('HOME'), 'Documents/NBGI/DarkSouls/'
)
path = ''
default_file = 'draks0005.sl2'
if sys.version_info[:2] >= (3, 5):
path = next(x for x in os.scandir(prefix) if x.is_dir()).path
else:
for root, directory, files in os.walk(prefix):
for filename in files:
if filename == default_file:
path = os.path.join(prefix, root)
break
return os.path.join(path, default_file)
def main(ns):
filename = ns.filename or get_default_file_name()
watchers = []
if ns.watch_table:
for stream in ns.watch_table:
watchers.append(json.loads(stream.read()))
watcher = Spy(filename=filename, watchers=watchers)
try:
watcher.run()
except KeyboardInterrupt:
print("\nCatch Ctrl+C, exiting ..")
finally:
sys.exit(0)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Prints changes for darksouls save file.'
)
parser.add_argument('-f', '--filename', metavar='draks0005.sl2',
type=str, dest='filename',
help='save file', required=False)
parser.add_argument('-w', '--watch-table',
dest='watch_table',
metavar='table.json,table2.json',
nargs='+',
type=argparse.FileType('r'),
help=(
'use data inside of json file for choosing what to'
' diff check inside of block with given offsets'),
required=True)
arguments = parser.parse_args(sys.argv[1:])
main(arguments)
| 2.515625 | 3 |
Curso-Em-Video-Python/2Exercicios/090_Dicionarios_em_python.py | pedrohd21/Cursos-Feitos | 0 | 12796333 | <gh_stars>0
'''aluno = {}
aluno['nome'] = str(input('Qual o Seu nome: '))
aluno['media'] = float(input('Qual a sua media: '))
print(f'Nome igual a {aluno["nome"]}\nMedia igual a {aluno["media"]:.2f}')
if aluno['media'] >= 7:
aluno['situação'] = 'Aprovado'
elif aluno['media'] >= 5:
aluno['situação'] = 'Recuperação'
else:
aluno['situação'] = 'Reprovado'
print(f'A situação do aluno {aluno["nome"]} é {aluno["situação"]}')'''
print('Resolvido pelo guanabara')
aluno = dict()
aluno['nome'] = str(input('Nome: '))
aluno['media'] = float(input(f'Media de {aluno["nome"]}: '))
if aluno['media'] >= 7:
aluno['situação'] = 'Aprovado'
elif 5 <= aluno['media'] < 7:
aluno['situação'] = 'Recuperação'
else:
aluno['situação'] = 'Reprovado'
print('-=' * 30)
print(aluno)
for k, v in aluno.items():
print(f'{k} é igual a {v}') | 3.828125 | 4 |
tests/python/test.py | pylbert/firfilt | 0 | 12796334 | #! /usr/bin/env python
def main(filename_raw_in, filename_filtered_out):
import fir
import tempfile
filt = fir.Filter_LP(51, 1000, 7)
fdout = tempfile.NamedTemporaryFile(delete = False)
with open(filename_raw_in) as f:
for line in f:
try:
raw = float(line.strip())
fdout.write('%0.08f\n' % filt.do_sample(raw))
except:
pass
fdout.flush()
# Compare the generated file with the golden filtered file
import filecmp
if not filecmp.cmp(filename_filtered_out, fdout.name):
print 'Files not equal: ', filename_filtered_out, fdout.name
exit(-1)
# Delete the file on success
try:
import os
os.remove(fdout.name)
except OSError:
pass
if __name__ == '__main__':
import sys
if len(sys.argv) != 4:
print 'usage: %s /path/to/fir_module /path/to/filter_in_file /path/to/filter_out_golden' % sys.argv[0]
exit(-1)
# Add the module file path to path
sys.path.insert(0, sys.argv[1])
main(sys.argv[2], sys.argv[3])
| 2.59375 | 3 |
foreignc/setup.py | thebridge0491/intro_py | 0 | 12796335 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function
import os, sys, pkgutil, json, glob
from distutils.command.clean import clean as CleanCommand
from setuptools import setup, find_packages, Command
#from setuptools import Extension # for Swig extension
from builtins import open, dict
PROJECT = 'intro_py.foreignc'
HERE = os.path.abspath(os.path.dirname(__file__))
sys.path.extend([os.path.join(HERE, '..')])
## for cffi, swig
#if 'java' in sys.platform.lower():
# raise Exception('This package can not be used with Jython.')
## for jna
#if 'java' not in sys.platform.lower():
# raise Exception('This package can only be used with Jython.')
## jip install <groupId>:<artifactId>:<version> --> javalib/*.jar
## java -jar ivy.jar -dependency <groupId> <artifactId> '[<version>,)' -types jar -retrieve 'javalib/[artifact]-[revision](-[classifier]).[ext]'
#sys.path.extend(glob.glob('javalib/*.jar'))
def disable_commands(*blacklist):
bad_cmds = [arg for cmd in blacklist for arg in sys.argv if cmd in arg]
if [] != bad_cmds:
print('Command(s) {0} have been disabled; exiting'.format(bad_cmds))
raise SystemExit(2)
disable_commands('register', 'upload')
def _matches_filepatterns(filepats, paths):
import fnmatch
matches_pats = [os.path.join(root, file1) for path in paths
for root, dirs, files in os.walk(path) for filepat in filepats
for file1 in fnmatch.filter(dirs + files, filepat)]
return matches_pats
def _remove_pathlist(pathlist):
import shutil
for path in pathlist:
if os.path.exists(path) and os.path.isdir(path):
shutil.rmtree(path)
elif os.path.exists(path):
os.remove(path)
class Clean0(CleanCommand):
description = CleanCommand.description + ' (modified)'
def run(self):
import shutil
CleanCommand.run(self)
if 1 != self.all:
return
_remove_pathlist(_matches_filepatterns(['build', 'dist', '*.egg*',
'.cache', '__pycache__', '.hypothesis', 'htmlcov', '.tox', '*.so',
'*.pyc', '*.pyo', '*~', '.coverage*', '*.log', '*.class'], ['.']))
class Test0(Command):
## nose2 cmd description
#description = 'run nose2 [DEBUG=1] (* addon *)'
description = 'run unittest discover [DEBUG=1] (* addon *)'
user_options = [('opts=', 'o', 'Test options (default: -s {0})'.format(
'/'.join(PROJECT.split('.')[:-1])))]
def initialize_options(self):
self.cwd, self.opts = None, ''
def finalize_options(self):
self.cwd = os.getcwd()
def run(self):
import subprocess
assert os.getcwd() == self.cwd, 'Must be in pkg root: {0}'.format(
self.cwd)
## use nose2
#errno = subprocess.call('{0} -m nose2 {1}'.format(
# sys.executable, self.opts), shell = True)
errno = subprocess.call('{0} -m unittest discover {1}'.format(
sys.executable, self.opts), shell = True)
raise SystemExit(errno)
## for ffi_lib
#PREFIX = os.environ.get('PREFIX', '/usr/local')
#os.environ['LD_LIBRARY_PATH'] = ':'.join([
# os.environ.get('LD_LIBRARY_PATH', '.'), '{}/lib'.format(PREFIX)])
#os.environ['LDFLAGS'] = ' '.join([
# os.environ.get('LDFLAGS', '-Lbuild/lib'), '-L{}/lib'.format(PREFIX)])
#os.environ['CPPFLAGS'] = ' '.join([
# os.environ.get('CPPFLAGS', '-Ibuild/include'),
# '-I{}/include'.format(PREFIX)])
## for Swig extension
#extension_mod = Extension(name='{0}._classic_c'.format(PROJECT),
# # sources=['{0}/classic_c_wrap.c'.format('build')],
# sources=['{0}/classic_c.i'.format(PROJECT.replace('.', '/'))],
# include_dirs=['.', PROJECT.replace('.', '/'), '{}/include'.format(PREFIX)],
# library_dirs=os.environ.get('LD_LIBRARY_PATH', 'build/lib').split(':'),
# libraries=[PROJECT],
# runtime_library_dirs=['$ORIGIN/', '{}/lib'.format(PREFIX)],
# extra_compile_args=os.environ.get('CPPFLAGS', '-Ibuild/include').split(' '),
# extra_link_args=os.environ.get('LDFLAGS', '-Lbuild/lib').split(' '),
# swig_opts=['-modern', '-I.']
# )
cmds_addon = {}
if '1' == os.environ.get('DEBUG', '0').lower():
sys.executable = '{0} -m coverage run'.format(sys.executable)
# setuptools add-on cmds
try:
import setup_addcmds
cmds_addon.update(setup_addcmds.cmdclass)
except ImportError as exc:
print(repr(exc))
with open('README.rst') as f_in:
readme = f_in.read()
with open('HISTORY.rst') as f_in:
history = f_in.read()
json_bytes = pkgutil.get_data(PROJECT, 'resources/pkginfo.json')
pkginfo = json.loads(json_bytes.decode(encoding='utf-8')) if json_bytes is not None else {}
licenseclassifiers = {
"Apache-2.0": "License :: OSI Approved :: Apache Software License",
"MIT": "License :: OSI Approved :: MIT License",
"BSD-3-Clause": "License :: OSI Approved :: BSD License",
"GPL-3.0+": "License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)",
"ISC": "License :: OSI Approved :: ISC License (ISCL)",
"Unlicense": "License :: Public Domain"
}
setup(
long_description=readme + '\n\n' + history,
classifiers=[
"Natural Language :: English",
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
licenseclassifiers.get('Apache-2.0', "License :: OSI Approved :: Apache Software License"),
"Programming Language :: Python",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: Implementation :: Jython",
"Topic :: Software Development"
],
#package_dir={'': '.'},
#packages=find_packages(include=[PROJECT, '{0}.tests'.format(PROJECT.replace('.', '/'))]),
packages=find_packages(),
# py_modules=[splitext(basename(path))[0] for path in glob.glob('{0}/*.py'.format('/'.join(PROJECT.split('.')[:-1])))],
#data_files=[('', ['{0}/tests/__main__.py'.format(PROJECT.replace('.', '/'))])], # DON'T USE
#package_data={'': ['{0}/tests/__main__.py'.format(PROJECT.replace('.', '/'))]}, # DON'T USE
#test_suite='{0}.tests'.format(PROJECT),
## for cffi
#cffi_modules=['{0}/classic_build.py:ffibuilder'.format(
# PROJECT.replace('.', '/'))],
## for Swig extension
#ext_modules=[extension_mod],
cmdclass=dict(dict({'clean': Clean0, 'test': Test0}).items()
## setuptools add-on cmds
| cmds_addon.items()
),
**pkginfo
)
| 1.921875 | 2 |
tests/test_cpu.py | mirecl/pprof | 4 | 12796336 | <gh_stars>1-10
import atexit
import inspect
import os.path
import webbrowser
from tempfile import gettempdir
import pytest
from pprof import cpu
from pprof.cpu import show_html
empty_report = '<html><head><meta charset="UTF-8"/></head></html>'
def f(a, b):
# fake func
res = a + b
return res
@pytest.mark.run(order=-1)
def test_cpu(mocker):
report = cpu.get_report()
assert report == empty_report
mocker.patch.object(atexit, "register", lambda x: x)
cpu.auto_report()
mocker.patch.object(webbrowser, "open", lambda x: x)
cpu.open_report()
path = f"{gettempdir()}/cpu_profile.html"
assert os.path.exists(path) is True
wrapper_f = cpu(f)
assert wrapper_f(1, 1) == 2
@pytest.mark.run(order=0)
@pytest.mark.parametrize(
"test_input,expected",
[
([(18, 1, 0), (19, 1, 300_000)], "300.0ms"),
([(18, 1, 0), (19, 1, 0)], "."),
([(18, 1, 1_000_001), (19, 1, 0)], "1.00s"),
],
)
def test_show_html(mocker, test_input, expected):
source = inspect.getsource(f).split("\n")
mocker.patch.object(inspect, "getblock", lambda x: source)
report = show_html({(__file__, 16, "f"): test_input})
assert report.__contains__(expected) is True
| 2.296875 | 2 |
apitests/conftest.py | hjalves/playlists | 0 | 12796337 | # This file is loaded by py.test to discover API tests
import pytest
from apitest import APITest
from loader import yaml_load
def pytest_collect_file(parent, path):
if path.ext == ".yaml" and path.basename.startswith("test"):
return APITestFile(path, parent)
class APITestFile(pytest.File):
def collect(self):
doc = yaml_load(self.fspath.open())
if doc:
config = doc.get('config', {})
for test in doc.get('tests', []):
yield APITestItem(test['name'], self, test, config)
class APITestItem(pytest.Item):
def __init__(self, name, parent, api_test, api_config):
super(APITestItem, self).__init__(name, parent)
self.api_test = api_test
self.api_config = api_config
def runtest(self):
test = APITest(self.api_test, self.api_config)
test.runtest()
def reportinfo(self):
return self.fspath, 0, "API Test: %s" % self.name
class YamlException(Exception):
""" custom exception for error reporting. """
| 2.421875 | 2 |
sherpa_client/models/segment_contexts.py | kairntech/sherpa-client | 0 | 12796338 | <reponame>kairntech/sherpa-client
from typing import Any, Dict, Type, TypeVar
import attr
from ..models.segment_context import SegmentContext
T = TypeVar("T", bound="SegmentContexts")
@attr.s(auto_attribs=True)
class SegmentContexts:
""" """
after: SegmentContext
before: SegmentContext
def to_dict(self) -> Dict[str, Any]:
after = self.after.to_dict()
before = self.before.to_dict()
field_dict: Dict[str, Any] = {}
field_dict.update(
{
"after": after,
"before": before,
}
)
return field_dict
@classmethod
def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:
d = src_dict.copy()
after = SegmentContext.from_dict(d.pop("after"))
before = SegmentContext.from_dict(d.pop("before"))
segment_contexts = cls(
after=after,
before=before,
)
return segment_contexts
| 2 | 2 |
katas/kyu_5/The_Hashtag_Generator.py | dusadamey/CodeWars | 0 | 12796339 | # https://www.codewars.com/kata/52449b062fb80683ec000024/
'''
Instructions :
The marketing team is spending way too much time typing in hashtags.
Let's help them with our own Hashtag Generator!
Here's the deal:
It must start with a hashtag (#).
All words must have their first letter capitalized.
If the final result is longer than 140 chars it must return false.
If the input or the result is an empty string it must return false.
Examples
" Hello there thanks for trying my Kata" => "#HelloThereThanksForTryingMyKata"
" Hello World " => "#HelloWorld"
"" => false
'''
def generate_hashtag(s):
return '#' +''.join([i.capitalize() for i in s.split()]) if (len(s) < 140 and s) else False
| 4.1875 | 4 |
My Resources/f3.py | PradeepDuraisamy/Python | 0 | 12796340 | a = 10
b = 5
print(a+b)
| 2.90625 | 3 |
libs.py | BlackGameNeon/first-jump | 0 | 12796341 | #Import the libraries
#Pygame
import pygame
pygame.init()
#os to access files
import os
#Inits
#import win | 1.515625 | 2 |
python/StartupUI/ThumbnailExportUI.py | borsarinicola/NFTS_nuk_env | 5 | 12796342 | # Thumbnail Exporter Task UI
# Thumbnail image export task which can be used via the Export dialog via Shot, Clip or Sequence Processor
# To install copy the ThumbnailExportTask.py and ThumbnailExportTaskUI.py to your <HIERO_PATH>/Python/Startup directory.
# Keyword tokens exist for:
# {frametype} - Position where the thumbnail was taken from (first/middle/last/custom)
# {srcframe} - The frame number of the original source clip file used for thumbnail
# {dstframe} - The destination frame (timeline time) number used for the thumbnail
# <NAME>, v1.0, 13/10/13
from PySide2.QtCore import *
from PySide2.QtGui import *
from PySide2.QtWidgets import *
import hiero.ui
import ThumbnailExportTask
class ThumbnailExportUI(hiero.ui.TaskUIBase):
kFirstFrame = "First"
kMiddleFrame = "Middle"
kLastFrame = "Last"
kCustomFrame = "Custom"
def __init__(self, preset):
"""Initialize"""
hiero.ui.TaskUIBase.__init__(self, ThumbnailExportTask.ThumbnailExportTask, preset, "Thumbnail Exporter")
def formatComboBoxChanged(self):
# Slot to handle change of thumbnail format combo change state
value = self._formatComboBox.currentText()
self._preset.properties()["format"] = unicode(value)
def customOffsetTextChanged(self):
# Slot to handle change of thumbnail format combo change state
value = self._customFrameLineEdit.text()
self._preset.properties()["customFrameOffset"] = unicode(value)
def widthTextChanged(self):
# Slot to handle change of thumbnail format combo change state
value = self._widthBox.text()
self._preset.properties()["width"] = unicode(value)
def heightTextChanged(self):
# Slot to handle change of thumbnail format combo change state
value = self._heightBox.text()
self._preset.properties()["height"] = unicode(value)
def frameTypeComboBoxChanged(self, index):
# Slot to handle change of thumbnail format combo change state
value = self._frameTypeComboBox.currentText()
if str(value) == self.kCustomFrame:
self._customFrameLineEdit.setEnabled(True)
self._preset.properties()["customFrameOffset"] = unicode(self._customFrameLineEdit.text())
else:
self._customFrameLineEdit.setEnabled(False)
self._preset.properties()["frameType"] = unicode(value)
def thumbSizeComboBoxChanged(self, index):
# Slot to handle change of thumbnail format combo change state
value = self._thumbSizeComboBox.currentText()
if value == "Default":
self._widthBox.setEnabled(False)
self._wLabel.setEnabled(False)
self._heightBox.setEnabled(False)
self._hLabel.setEnabled(False)
elif value == "To Box":
self._widthBox.setEnabled(True)
self._heightBox.setEnabled(True)
self._wLabel.setEnabled(True)
self._hLabel.setEnabled(True)
elif value == "Scaled to Width":
self._widthBox.setEnabled(True)
self._wLabel.setEnabled(True)
self._heightBox.setEnabled(False)
self._hLabel.setEnabled(False)
elif value == "Scaled to Height":
self._widthBox.setEnabled(False)
self._wLabel.setEnabled(False)
self._heightBox.setEnabled(True)
self._hLabel.setEnabled(True)
self._preset.properties()["thumbSize"] = unicode(value)
def populateUI(self, widget, exportTemplate):
layout = QFormLayout()
layout.setContentsMargins(9, 0, 9, 0)
widget.setLayout(layout)
# Thumb frame type layout
thumbFrameLayout = QHBoxLayout()
self._frameTypeComboBox = QComboBox()
self._frameTypeComboBox.setToolTip("Specify the frame from which to pick the thumbnail.\nCustom allows you to specify a custom frame offset, relative from the first frame.")
thumbFrameTypes = (self.kFirstFrame, self.kMiddleFrame, self.kLastFrame, self.kCustomFrame)
for index, item in zip(range(0,len(thumbFrameTypes)), thumbFrameTypes):
self._frameTypeComboBox.addItem(item)
if item == str(self._preset.properties()["frameType"]):
self._frameTypeComboBox.setCurrentIndex(index)
self._frameTypeComboBox.setMaximumWidth(80)
self._customFrameLineEdit = QLineEdit()
self._customFrameLineEdit.setEnabled(False)
self._customFrameLineEdit.setToolTip("This is the frame offset from the first frame of the shot/sequence")
self._customFrameLineEdit.setValidator(QIntValidator())
self._customFrameLineEdit.setMaximumWidth(80);
self._customFrameLineEdit.setText(str(self._preset.properties()["customFrameOffset"]))
thumbFrameLayout.addWidget(self._frameTypeComboBox, Qt.AlignLeft)
thumbFrameLayout.addWidget(self._customFrameLineEdit, Qt.AlignLeft)
#thumbFrameLayout.addStretch()
# QImage save format type
self._formatComboBox = QComboBox()
thumbFrameTypes = ("png", "jpg", "tiff", "bmp")
for index, item in zip(range(0,len(thumbFrameTypes)), thumbFrameTypes):
self._formatComboBox.addItem(item)
if item == str(self._preset.properties()["format"]):
self._formatComboBox.setCurrentIndex(index)
self._formatComboBox.currentIndexChanged.connect(self.formatComboBoxChanged)
# QImage save height
# Thumb frame type layout
thumbSizeLayout = QHBoxLayout()
self._thumbSizeComboBox = QComboBox()
self._thumbSizeComboBox.setMaximumWidth(115)
self._thumbSizeComboBox.setToolTip("This determines the size of the thumbnail.\nLeave as Default to use Hiero's internal thumbnail size or specify a box or width/height scaling in pixels.")
thumbSizeTypes = ("Default","To Box", "Scaled to Width", "Scaled to Height")
for index, item in zip(range(0,len(thumbSizeTypes)), thumbSizeTypes):
self._thumbSizeComboBox.addItem(item)
if item == str(self._preset.properties()["thumbSize"]):
self._thumbSizeComboBox.setCurrentIndex(index)
thumbSizeLayout.addWidget(self._thumbSizeComboBox)
self._wLabel = QLabel('w:')
self._wLabel.setFixedWidth(12)
thumbSizeLayout.addWidget(self._wLabel,Qt.AlignLeft)
self._widthBox = QLineEdit()
self._widthBox.setToolTip("Thumbnail width in pixels")
self._widthBox.setEnabled(False)
self._widthBox.setValidator(QIntValidator())
self._widthBox.setMaximumWidth(40)
self._widthBox.setText(str(self._preset.properties()["width"]))
self._widthBox.textChanged.connect(self.widthTextChanged)
thumbSizeLayout.addWidget(self._widthBox,Qt.AlignLeft)
self._hLabel = QLabel('h:')
self._hLabel.setFixedWidth(12)
thumbSizeLayout.addWidget(self._hLabel,Qt.AlignLeft)
self._heightBox = QLineEdit()
self._heightBox.setToolTip("Thumbnail height in pixels")
self._heightBox.setEnabled(False)
self._heightBox.setValidator(QIntValidator())
self._heightBox.setMaximumWidth(40)
self._heightBox.setText(str(self._preset.properties()["height"]))
self._heightBox.textChanged.connect(self.heightTextChanged)
thumbSizeLayout.addWidget(self._heightBox,Qt.AlignLeft)
self._thumbSizeComboBox.currentIndexChanged.connect(self.thumbSizeComboBoxChanged)
self.thumbSizeComboBoxChanged(0)
self._frameTypeComboBox.currentIndexChanged.connect(self.frameTypeComboBoxChanged)
self.frameTypeComboBoxChanged(0) # Trigger to make it set the enabled state correctly
self._customFrameLineEdit.textChanged.connect(self.customOffsetTextChanged)
layout.addRow("Frame Type:",thumbFrameLayout)
layout.addRow("Size:",thumbSizeLayout)
layout.addRow("File Type:",self._formatComboBox)
hiero.ui.taskUIRegistry.registerTaskUI(ThumbnailExportTask.ThumbnailExportPreset, ThumbnailExportUI) | 2.203125 | 2 |
embeddings.py | ramanshgrover/A-Modern-Approach-To-Image-Captioning | 0 | 12796343 | '''
This code was written by following the following tutorial:
Link: https://medium.com/@martinpella/how-to-use-pre-trained-word-embeddings-in-pytorch-71ca59249f76
This script processes and generates GloVe embeddings
'''
# coding: utf-8
import pickle
from preprocess import Vocabulary
import numpy as np
import json
from scipy import misc
import bcolz
words = []
idx = 0
word2idx = {}
vectors = bcolz.carray(np.zeros(1), rootdir='glove.6B/6B.300.dat', mode='w')
with open('glove.6B/glove.6B.300d.txt', 'rb') as f:
for l in f:
line = l.decode().split()
word = line[0]
words.append(word)
word2idx[word] = idx
idx += 1
vect = np.array(line[1:]).astype(np.float)
vectors.append(vect)
vectors = bcolz.carray(vectors[1:].reshape((400000, 300)), rootdir='glove.6B/6B.300.dat', mode='w')
vectors.flush()
pickle.dump(words, open('glove.6B/6B.300_words.pkl', 'wb'))
pickle.dump(word2idx, open('glove.6B/6B.300_idx.pkl', 'wb'))
with open('data/vocab.pkl', 'rb') as f:
vocab = pickle.load(f)
print('Loading vocab...')
vectors = bcolz.open('glove.6B/6B.300.dat')[:]
words = pickle.load(open('glove.6B/6B.300_words.pkl', 'rb'))
word2idx = pickle.load(open('glove.6B/6B.300_idx.pkl', 'rb'))
print('glove is loaded...')
glove = {w: vectors[word2idx[w]] for w in words}
matrix_len = len(vocab)
weights_matrix = np.zeros((matrix_len, 300))
words_found = 0
for i, word in enumerate(vocab.idx2word):
try:
weights_matrix[i] = glove[word]
words_found += 1
except KeyError:
weights_matrix[i] = np.random.normal(scale=0.6, size=(300, ))
pickle.dump(weights_matrix, open('glove.6B/glove_words.pkl', 'wb'), protocol=2)
print('weights_matrix is created')
| 2.953125 | 3 |
pystematic/standard_plugin/standard_plugin.py | evalldor/pystematic | 1 | 12796344 | <reponame>evalldor/pystematic
import datetime
import functools
import itertools
import logging
import multiprocessing
import multiprocessing.connection
import pathlib
import random
import string
import pystematic
import pystematic.core as core
import wrapt
from rich.console import Console
from rich.markup import escape
from rich.theme import Theme
from .. import parametric
from . import yaml_wrapper as yaml
logger = logging.getLogger('pystematic.standard')
class StandardPlugin:
def __init__(self, app) -> None:
self.api_object = StandardApi()
app.on_experiment_created(self.experiment_created, priority=10)
app.on_before_experiment(self.api_object._before_experiment, priority=10)
app.on_after_experiment(self.api_object._after_experiment, priority=10)
self.extend_api(app.get_api_object())
def experiment_created(self, experiment):
for param in standard_params:
experiment.add_parameter(param)
return experiment
def extend_api(self, api_object):
for name in dir(self.api_object):
if not name.startswith("_"):
setattr(api_object, name, getattr(self.api_object, name))
def _create_log_dir_name(output_dir, experiment_name):
current_time = datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
chars = string.digits + string.ascii_lowercase # + string.ascii_uppercase
suffix = "".join(random.SystemRandom().choice(chars) for _ in range(6))
directory = pathlib.Path(output_dir).resolve().joinpath(experiment_name).joinpath(f"{current_time}-{suffix}")
return directory
def _get_log_file_name(output_dir, local_rank):
if local_rank == 0:
return output_dir.joinpath("log.txt")
return output_dir.joinpath(f"log.rank-{local_rank}.txt")
class StandardLogHandler(logging.Handler):
def __init__(self, file_path, no_style=False):
super().__init__()
theme = Theme({
'debug': 'magenta',
'info': 'blue',
'warning': 'yellow',
'error': 'red',
'rank': 'green',
'name': 'green'
}, inherit=False)
if no_style:
theme = Theme({}, inherit=False)
self.console_output = Console(theme=theme)
self.file_handle = file_path.open("a")
self.file_output = Console(file=self.file_handle)
def handle(self, record):
level_str = escape(f"[{record.levelname}]")
level = f"[{record.levelname.lower()}]{level_str}[/{record.levelname.lower()}]"
msg = escape(f"{record.getMessage()}")
name = "[name]" + escape(f'[{record.name}]') + "[/name]"
time_str = datetime.datetime.fromtimestamp(record.created).strftime('%Y-%m-%d %H:%M:%S')
if pystematic.local_rank() > 0 or pystematic.subprocess_counter > 0:
rank = "[rank]" + escape(f"[RANK {pystematic.local_rank()}]") + "[/rank]"
self.console_output.print(f"{level} {rank} {name} {msg}")
self.file_output.print(f"[{time_str}] {level} {rank} {name} {msg}")
else:
self.console_output.print(f"{level} {name} {msg}")
self.file_output.print(f"[{time_str}] {level} {name} {msg}")
if record.exc_info:
self.console_output.print_exception(show_locals=True, suppress=[core])
self.file_output.print_exception(show_locals=True, suppress=[core])
def close(self):
self.file_handle.close()
class StandardApi:
def __init__(self) -> None:
self.current_experiment : core.Experiment = wrapt.ObjectProxy(None)
self.params: dict = wrapt.ObjectProxy(None)
self.output_dir: pathlib.Path = wrapt.ObjectProxy(None)
self.params_file: pathlib.Path = wrapt.ObjectProxy(None)
self.random_gen: random.Random = wrapt.ObjectProxy(None)
self.subprocess_counter: int = wrapt.ObjectProxy(None)
self._log_handler = None
def _before_experiment(self, experiment, params):
self.subprocess_counter.__wrapped__ = 0
self.current_experiment.__wrapped__ = experiment
self.params.__wrapped__ = params
self.random_gen.__wrapped__ = random.Random(params["random_seed"])
if self.params["debug"]:
log_level = "DEBUG"
else:
log_level = "INFO"
if params["subprocess"]:
assert params["local_rank"] > 0
self.output_dir.__wrapped__ = pathlib.Path(params["subprocess"]).parent
self.params_file.__wrapped__ = pathlib.Path(params["subprocess"])
self._log_handler = StandardLogHandler(_get_log_file_name(self.output_dir, params["local_rank"]))
logging.basicConfig(level=log_level, handlers=[self._log_handler], force=True)
logger.debug(f"Initializing subprocess...")
else:
self.output_dir.__wrapped__ = _create_log_dir_name(params["output_dir"], experiment.name)
self.params_file.__wrapped__ = self.output_dir.joinpath("parameters.yaml")
self.output_dir.mkdir(parents=True, exist_ok=False)
self._log_handler = StandardLogHandler(_get_log_file_name(self.output_dir, params["local_rank"]))
logging.basicConfig(level=log_level, handlers=[self._log_handler], force=True)
logger.debug(f"Writing parameters file to '{self.params_file}'.")
with self.params_file.open("w") as f:
yaml.dump(params, f)
def _after_experiment(self, error=None):
end_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
if error is not None:
logger.error(f"Experiment ended at {end_time} with an error: {error}", exc_info=error)
else:
logger.info(f"Experiment ended at {end_time}.")
self._log_handler.close()
procs = multiprocessing.active_children()
for proc in procs:
try:
proc.kill()
except Exception:
pass
for proc in procs:
try:
proc.join()
except Exception:
pass
def new_seed(self, nbits=32) -> int:
"""Use this function to generate random numbers seeded by the experiment
parameter ``random_seed``. Expected use is to seed your own random number
generators.
Args:
nbits (int, optional): The number of bits to use to represent the
generated number. Defaults to 32.
Returns:
int: A random number seeded by the experiment parameter ``random_seed``.
"""
return self.random_gen.getrandbits(nbits)
def launch_subprocess(self, **additional_params) -> multiprocessing.Process:
"""Launches a subprocess. The subprocess will be instructed to execute
the main function of the currently running experiment, and have the same
output directory and parameters as the current process.
Args:
**additional_params: Any additional parameters that should be
passed to the subprocess. Params given here takes precedence
over the parameters copied from the current experiment.
.. warning::
The subprocess will be initialized with the same random
seed as the current process. If this is not what you want, you
should pass a new seed to this function in the ``random_seed`` parameter.
E.g.:
.. code-block:: python
pystematic.launch_subprocess(random_seed=pystematic.new_seed())
"""
if self.is_subprocess():
raise AssertionError("A subprocess cannot launch further subprocesses.")
subprocess_params = {name: value for name, value in self.params.items()}
for name, value in additional_params.items():
subprocess_params[name] = value
self.subprocess_counter += 1
subprocess_params["subprocess"] = str(self.params_file)
subprocess_params["local_rank"] = int(self.subprocess_counter)
logger.debug(f"Launching subprocess with arguments '{' '.join(subprocess_params)}'.")
return self.current_experiment.run_in_new_process(subprocess_params)
def run_parameter_sweep(self, experiment, list_of_params, max_num_processes=1) -> None:
"""Runs an experiment multiple times with a set of different params. At most
:obj:`max_num_processes` concurrent processes will be used. This call will block until
all experiments have been run.
Args:
experiment (Experiment): The experiment to run.
list_of_params (list of dict): A list of parameter dictionaries. Each corresponding to
one run of the experiment. See :func:`pystematic.param_matrix` for a convenient way
of generating such a list.
max_num_processes (int, optional): The maximum number of concurrent processes to use
for running the experiments. Defaults to 1.
"""
pool = ProcessQueue(max_num_processes)
pool.run_and_wait_for_completion(experiment, list_of_params)
def is_subprocess(self) -> bool:
"""Returns true if this process is a subprocess. I.e. it has been
launched by a call to :func:`launch_subprocess` in a parent process.
Returns:
bool: Whether or not the current process is a subprocess.
"""
return self.params["subprocess"] is not None
def local_rank(self):
"""Returns the local rank of the current process. The master process
will always have rank 0, and every subprocess launched with
:func:`pystematic.launch_subprocess` will be assigned a new local rank
from an incrementing integer counter starting at 1.
Returns:
int: The local rank of the current process.
"""
return self.params["local_rank"]
def param_matrix(self, **param_values):
"""This function can be used to build parameter combinations to use when
running parameter sweeps. It takes an arbitrary number of keywork
arguments, where each argument is a parameter and a list of all values
that you want to try for that parameter. It then builds a list of
parameter dictionaries such that all combinations of parameter values
appear once in the list. The output of this function can be passed
directly to :func:`pystematic.run_parameter_sweep`.
For example:
.. code-block:: python
import pystematic as ps
param_list = ps.param_matrix(
int_param=[1, 2],
str_param=["hello", "world"]
)
assert param_list == [
{
"int_param": 1,
"str_param": "hello"
},
{
"int_param": 1,
"str_param": "world"
},
{
"int_param": 2,
"str_param": "hello"
},
{
"int_param": 2,
"str_param": "world"
}
]
Args:
**param_values: A mapping from parameter name to a list of values to try
for that parameter. If a value is not a list or tuple, it is assumed to be constant
(its value will be the same in all combinations).
Returns:
list of dicts: A list of parameter combinations created by taking the cartesian
product of all values in the input.
"""
# Make sure all values are lists
for key, value in param_values.items():
if not isinstance(value, (list, tuple)):
param_values[key] = [value]
keys = param_values.keys()
param_combinations = []
for instance in itertools.product(*param_values.values()):
param_combinations.append(dict(zip(keys, instance)))
return param_combinations
class ParamsFileBehaviour(parametric.DefaultParameterBehaviour):
def on_value(self, param, value: pathlib.Path, result_dict: dict):
super().on_value(param, value, result_dict)
if value is not None:
if not value.exists():
raise ValueError(f"File does not exist: '{value}'.")
blacklisted_config_ops = []
for param in result_dict.get_params():
if hasattr(param, "allow_from_file") and not param.allow_from_file:
blacklisted_config_ops.append(param.name)
with value.open("r") as f:
params_from_file = yaml.load(f)
for key, value in params_from_file.items():
if key not in blacklisted_config_ops:
result_dict.set_value_by_name(key, value)
standard_params = [
core.Parameter(
name="output_dir",
default="./output",
help="Parent directory to store all run-logs in. Will be created if it "
"does not exist.",
type=str
),
core.Parameter(
name="debug",
default=False,
help="Sets debug flag on/off. Configures the python logging mechanism to "
"print all DEBUG messages.",
type=bool,
is_flag=True
),
core.Parameter(
name="params_file",
type=pathlib.Path,
help="Read experiment parameters from a yaml file, such as the one "
"dumped in the output dir from an eariler run. When this option is "
"set from the command line, any other options supplied after this one "
"will override the ones loaded from the file.",
behaviour=ParamsFileBehaviour(),
allow_from_file=False
),
core.Parameter(
name="random_seed",
default=functools.partial(random.getrandbits, 32),
help="The value to seed the master random number generator with.",
type=int,
default_help="<randomly generated>"
),
core.Parameter(
name="subprocess",
default=None,
help="Internally used to indicate that this process is a subprocess. "
"DO NOT USE MANUALLY.",
type=pathlib.Path,
allow_from_file=False,
hidden=True
),
core.Parameter(
name="local_rank",
type=int,
default=0,
help="For multiprocessing, gives the local rank for this process. "
"This parameter is set automatically by the framework, and should not "
"be used manually.",
allow_from_file=False,
hidden=True,
),
]
class ProcessQueue:
def __init__(self, num_processes):
self._mp_context = multiprocessing.get_context('spawn')
self._num_processes = num_processes
self._live_processes = []
def _wait(self):
sentinels = [proc.sentinel for proc in self._live_processes]
finished_sentinels = multiprocessing.connection.wait(sentinels)
completed_procs = []
for proc in self._live_processes:
if proc.sentinel in finished_sentinels:
completed_procs.append(proc)
for proc in completed_procs:
self._live_processes.remove(proc)
def run_and_wait_for_completion(self, experiment, list_of_params):
for params in list_of_params:
while len(self._live_processes) >= self._num_processes:
self._wait()
proc = experiment.run_in_new_process(params)
self._live_processes.append(proc)
while len(self._live_processes) > 0:
self._wait()
| 2.078125 | 2 |
scisalt/matplotlib/setup_axes.py | joelfrederico/mytools | 1 | 12796345 | <reponame>joelfrederico/mytools
import os as _os
_on_rtd = _os.environ.get('READTHEDOCS', None) == 'True'
if not _on_rtd:
import numpy as _np
from .setup_figure import setup_figure as _setup_figure
def setup_axes(rows=1, cols=1, figsize=(8, 6), expand=True, tight_layout=None, **kwargs):
"""
Sets up a figure of size *figsize* with a number of rows (*rows*) and columns (*cols*). \*\*kwargs passed through to :meth:`matplotlib.figure.Figure.add_subplot`.
.. versionadded:: 1.2
Parameters
----------
rows : int
Number of rows to create.
cols : int
Number of columns to create.
figsize : tuple
Size of figure to create.
expand : bool
Make the entire figure with size `figsize`.
Returns
-------
fig : :class:`matplotlib.figure.Figure`
The figure.
axes : :class:`numpy.ndarray`
An array of all of the axes. (Unless there's only one axis, in which case it returns an object instance :class:`matplotlib.axis.Axis`.)
"""
if expand:
figsize = (figsize[0]*cols, figsize[1]*rows)
figargs = {}
if isinstance(tight_layout, dict):
figargs["tight_layout"] = tight_layout
elif tight_layout == "pdf":
figargs["tight_layout"] = {"rect": (0, 0, 1, 0.95)}
dpi = kwargs.pop('dpi', None)
fig, gs = _setup_figure(rows=rows, cols=cols, figsize=figsize, dpi=dpi, **figargs)
axes = _np.empty(shape=(rows, cols), dtype=object)
for i in range(rows):
for j in range(cols):
axes[i, j] = fig.add_subplot(gs[i, j], **kwargs)
if axes.shape == (1, 1):
return fig, axes[0, 0]
else:
return fig, axes
| 2.53125 | 3 |
Python/dbinfo.py | mfunduc/dbcomp | 0 | 12796346 | import sys
import sqlite3
from tableinfo import TableInfo
class DbInfo(object):
def __init__(self, name):
self.name = name
self.conn = sqlite3.connect(name)
self.tables = {}
self.conn.text_factory = lambda x: str(x, 'utf-8', 'ignore')
cursor = self.conn.cursor()
cursor.execute("SELECT name FROM sqlite_master WHERE type =\'table\' AND name NOT LIKE \'sqlite_%\'")
rows = cursor.fetchall()
if len(rows) > 0:
tableNames = []
for row in rows:
tableNames.append(row[0])
for tableName in tableNames:
self.tables[tableName] = TableInfo(self.conn, tableName)
def compareTables(self, tableName, numColumns, db):
cursor = self.conn.cursor()
sql = "SELECT * FROM '" + tableName + "'"
cursor.execute(sql)
rows = cursor.fetchall()
if len(rows) > 0:
cursor2 = db.conn.cursor()
cursor2.execute(sql)
for rowNum, row in enumerate(rows):
row2 = cursor2.fetchone()
if (row is None) or (row2 is None):
return False, rowNum
for col in range(numColumns):
if row[col] != row2[col]:
return False, rowNum
return True, 0
return False, 0
def compare(self, db):
matches = True
matched = []
onlyOne = []
onlyTwo = []
for tableName, tableInfo in self.tables.items():
tableInfo2 = db.tables.get(tableName)
if tableInfo2 is not None:
if tableInfo.compare(tableInfo2):
if tableInfo.numRows < 1000:
dataMatched, rowNum = self.compareTables(tableName, len(tableInfo.columns), db)
if not dataMatched:
matches = False
sys.stdout.write('Different Data for Table: {} in row {}\n'.format(tableName, rowNum + 1))
else:
matched.append(tableInfo)
else:
matched.append(tableInfo)
else:
matches = False
sys.stdout.write('Different Table: {} {} {} but in {} {}\n'.format(tableName, self.name, tableInfo.toStr(False),
db.name, tableInfo2.toStr(False)))
else:
matches = False
onlyOne.append(tableName)
for tableName, tableInfo in db.tables.items():
if tableName not in self.tables:
matches = False
onlyTwo.append(tableName)
if len(matched) > 0:
sys.stdout.write("*************** {} matched tables ****************\n".format(len(matched)))
for table in matched:
sys.stdout.write("Table: {}\n".format(table.toStr(True)))
if len(onlyOne) > 0:
sys.stdout.write("*************** {} tables only in {} ****************\n".format(len(onlyOne), self.name))
for table in onlyOne:
sys.stdout.write("Table: {}\n".format(table))
if len(onlyTwo) > 0:
sys.stdout.write("*************** {} tables only in {} ****************\n".format(len(onlyTwo), db.name))
for table in onlyTwo:
sys.stdout.write("Table: {}\n".format(table))
return matches
| 3.375 | 3 |
setup.py | portugueslab/arrayqueues | 27 | 12796347 | <filename>setup.py
from distutils.core import setup
from setuptools import find_namespace_packages
with open("requirements_dev.txt") as f:
requirements_dev = f.read().splitlines()
with open("requirements.txt") as f:
requirements = f.read().splitlines()
with open("README.md") as f:
long_description = f.read()
setup(
name="arrayqueues",
version="1.3.0",
author="<NAME> @portugueslab",
author_email="<EMAIL>",
license="MIT",
packages=find_namespace_packages(exclude=("docs", "tests*")),
install_requires=requirements,
extras_require=dict(dev=requirements_dev),
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Topic :: Multimedia :: Video",
"Topic :: Software Development :: Libraries",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
keywords="multiprocessing queues arrays",
description="Multiprocessing queues for numpy arrays using shared memory",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/portugueslab/arrayqueues",
)
| 1.289063 | 1 |
FindHighestAltitude.py | vanigupta20024/Programming-Challenges | 14 | 12796348 | <reponame>vanigupta20024/Programming-Challenges<filename>FindHighestAltitude.py
'''
There is a biker going on a road trip. The road trip consists of n + 1 points at different altitudes.
The biker starts his trip on point 0 with altitude equal 0.
You are given an integer array gain of length n where gain[i] is the net gain in altitude between points i
and i + 1 for all (0 <= i < n). Return the highest altitude of a point.
Example 1:
Input: gain = [-5,1,5,0,-7]
Output: 1
Explanation: The altitudes are [0,-5,-4,1,1,-6]. The highest is 1.
'''
class Solution:
def largestAltitude(self, gain: List[int]) -> int:
for i in range(1, len(gain)):
gain[i] += gain[i - 1]
if max(gain) < 0:
return 0
return max(gain)
| 4.21875 | 4 |
cargo/logic/binary.py | jaredlunde/cargo-orm | 3 | 12796349 | """
`Binary Logic and Operations`
--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--
2016 <NAME> © The MIT License (MIT)
http://github.com/jaredlunde
"""
import psycopg2.extensions
from cargo.expressions import *
__all__ = ('BinaryLogic',)
class BinaryLogic(BaseLogic):
__slots__ = tuple()
CONCAT_OP = '||'
def _cast_bytes(self, string):
if isinstance(string, bytes):
return psycopg2.extensions.Binary(string)
return string
def concat(self, string, **kwargs):
""" String concatenation
-> (:class:Expression)
"""
string = self._cast_bytes(string)
return Expression(self, self.CONCAT_OP, string, **kwargs)
def octet_length(self, **kwargs):
""" Number of bytes in binary string
-> (:class:Function)
"""
return Function('octet_length', self, **kwargs)
def overlay(self, substring, from_, for_=None, **kwargs):
""" Replace @substring
-> (:class:Function)
"""
substring = self._cast_bytes(substring)
exps = [self,
Expression(self.empty,
'placing',
Expression(substring, 'from', from_))]
if for_:
exps.append(Expression(self.empty, 'for', for_))
return Function('overlay', Clause("", *exps), **kwargs)
def position(self, substring):
""" Location of specified @substring
-> (:class:Function)
"""
substring = self._cast_bytes(substring)
return Function('position', Expression(substring, 'in', self))
def substring(self, from_=None, for_=None, **kwargs):
""" Extracts substring from @from_ to @for_
-> (:class:Function)
"""
exps = []
if from_ is not None:
exps.append(Expression(self.empty, 'from', from_))
if for_ is not None:
exps.append(Expression(self.empty, 'for', for_))
return Function('substring', Clause("", self, *exps), **kwargs)
def trim(self, bytes_, both=False, **kwargs):
""" Remove the longest string containing only the bytes in @bytes_
from the start and end of the string
-> (:class:Expression)
"""
bytes_ = self._cast_bytes(bytes_)
exp = Expression(bytes_, 'from', self)
if both:
exp = Clause('both', exp)
return Function('trim', exp, **kwargs)
def encode(self, format, **kwargs):
""" Encode binary data into a textual representation. Supported
formats are: base64, hex, escape. escape converts zero bytes and
high-bit-set bytes to octal sequences (\nnn) and doubles
backslashes.
-> (:class:Function)
"""
return F.encode(self, format, **kwargs)
def decode(self, format, **kwargs):
""" Decode binary data from textual representation in string. Options
for format are same as in encode.
-> (:class:Function)
"""
return F.decode(self, format, **kwargs)
def get_bit(self, offset, **kwargs):
""" Extract bit from @string
-> (:class:Function)
"""
return F.get_bit(self, offset, **kwargs)
def get_byte(self, offset, **kwargs):
""" Extract byte from @string
-> (:class:Function)
"""
return F.get_byte(self, offset, **kwargs)
def set_bit(self, offset, new_value, **kwargs):
""" Set bit in @string
-> (:class:Function)
"""
return F.set_bit(self, offset, new_value, **kwargs)
def set_byte(self, offset, new_value, **kwargs):
""" Set byte in @string
-> (:class:Function)
"""
return F.set_byte(self, offset, new_value, **kwargs)
def length(self, **kwargs):
""" Length of binary @string
-> (:class:Function)
"""
return F.length(self, **kwargs)
def md5(self, **kwargs):
""" Calculates the MD5 hash of @string, returning the result in
hexadecimal.
-> (:class:Function)
"""
return F.md5(self, **kwargs)
| 2.84375 | 3 |
examples/YinMo/add_album_images.py | zbx911/CHRLINE | 0 | 12796350 | <reponame>zbx911/CHRLINE
# -*- coding: utf-8 -*-
from CHRLINE import *
cl = CHRLINE()
# Group ID: str
groupId = None
# Album ID: str, u can get it from getAlbums or create a new album.
albumId = None
# Image's path: list<str>
images = []
for i in images:
print(f"--> Try to Upload {i}")
oid = cl.updateImageToAlbum(groupId, albumId, i)
print(f"<-- OID: {oid}")
cl.addImageToAlbum(groupId, albumId, oid)
print(f"Done.")
| 2.296875 | 2 |
Subsets and Splits