id
stringlengths 2
8
| text
stringlengths 16
264k
| dataset_id
stringclasses 1
value |
---|---|---|
9765797
|
<gh_stars>1-10
"""mp_notice
Revision ID: cf266bf19ef3
Revises: <PASSWORD>
Create Date: 2019-05-04 22:42:02.845137
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'cf266bf19ef3'
down_revision = '<PASSWORD>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('likecomment',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('comment_id', sa.Integer(), nullable=True),
sa.Column('user_id', sa.String(length=32), nullable=True),
sa.Column('time', sa.DATETIME(), nullable=True),
sa.ForeignKeyConstraint(['comment_id'], ['article1.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['role1.uuid'], ),
sa.PrimaryKeyConstraint('id')
)
op.add_column('role1', sa.Column('last_reply_read_time', sa.DATETIME(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('role1', 'last_reply_read_time')
op.drop_table('likecomment')
# ### end Alembic commands ###
|
StarcoderdataPython
|
191726
|
# Generated by Django 3.1.8 on 2021-04-14 06:57
import datetime
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('listings', '0003_auto_20210412_0924'),
]
operations = [
migrations.CreateModel(
name='Contact',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('listing', models.CharField(max_length=255)),
('name', models.CharField(max_length=100)),
('email', models.CharField(max_length=100)),
('phone', models.CharField(max_length=50)),
('message', models.TextField(blank=True)),
('contact_date', models.DateTimeField(blank=True, default=datetime.datetime.now)),
('listing_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='listings.listing')),
('user_id', models.ForeignKey(blank=True, on_delete=django.db.models.deletion.DO_NOTHING, to=settings.AUTH_USER_MODEL)),
],
),
]
|
StarcoderdataPython
|
8073188
|
<gh_stars>1-10
import bisect
import collections
from datetime import timedelta
Update = collections.namedtuple("Update", ["timestamp", "value"])
class TimeSeries:
def __init__(self):
"""An object that manages TimeSeries that include a timestamp and value.
Attributes:
series (Update): The chronological record of updates to the instance.
"""
self.series = []
def __getitem__(self, index):
return self.series[index]
def update(self, timestamp, value):
"""Updates the TimeSeries instance's series with a new entry.
Args:
timestamp (datetime.datetime): The timestamp of the update.
value (int) The value of the update.
"""
bisect.insort_left(self.series, Update(timestamp, value))
def get_closing_price(self, on_date):
"""Returns a given dates closing price.
This is the stock's last price from an update on the date or the closing price for the previous day if an
update has not occurred.
Args:
on_date (datetime.datetime): The on_date being checked for a closing price.
Raises:
ValueError: If stock has not had any updates.
Returns:
Closing price if it exists, executes self.closing_price(previous day) if not.
"""
if self.series:
date_history = [update for update in self.series if update.timestamp.date() == on_date.date()]
return date_history[-1].value if date_history else self.get_closing_price(on_date - timedelta(days=1))
else:
raise ValueError("stock has not had any updates")
def has_sufficient_update_history(self, on_date, num_of_days):
"""Checks for sufficient update history data from a given date backwards with a given number of days.
Args:
on_date (datetime.datetime): The date on which the cross over signal is to be checked.
num_of_days (int): The number of days of history.
Returns:
True if there is sufficient data, False if not.
"""
earliest_date = on_date.date() - timedelta(days=num_of_days)
return earliest_date < self.series[0].timestamp.date()
|
StarcoderdataPython
|
9702002
|
'''
Collection of localised thin multipole maps.
For formulae see e.g. SIXTRACK:
SixTrack Physics Manual
<NAME> and <NAME>
August 18, 2015
or, likewise,
A Symplectic Six-Dimensional Thin-Lens Formalism for Tracking
<NAME>, <NAME>
April 5, 1995
@authors: <NAME>
@date: 23/03/2016
'''
from math import factorial
from PyHEADTAIL.general.element import Element
class ThinQuadrupole(Element):
'''Thin quadrupolar map.'''
def __init__(self, k1l, *args, **kwargs):
'''Arguments:
- k1l: normalised strength times the length of the
quadrupole magnet [1/m]
'''
self.kL = k1l
def track(self, beam):
beam.xp -= self.kL * beam.x
beam.yp += self.kL * beam.y
class ThinSkewQuadrupole(Element):
'''Thin skew quadrupolar map.'''
def __init__(self, k1sl, *args, **kwargs):
'''Arguments:
- k1sl: normalised strength times the length of the
skew quadrupole magnet [1/m]
'''
self.kL = k1sl
def track(self, beam):
beam.xp += self.kL * beam.y
beam.yp += self.kL * beam.x
class ThinSextupole(Element):
'''Thin sextupolar map.'''
def __init__(self, k2l, *args, **kwargs):
'''Arguments:
- k2l: normalised strength times the length of the
sextupole magnet [1/m^2]
'''
self.kL = k2l
def track(self, beam):
beam.xp -= 0.5 * self.kL * (beam.x*beam.x - beam.y*beam.y)
beam.yp += self.kL * beam.x * beam.y
class ThinOctupole(Element):
'''Thin octupolar map.'''
def __init__(self, k3l, *args, **kwargs):
'''Arguments:
- k3l: normalised strength times the length of the
octupole magnet [1/m^3]
'''
self.kL = k3l
self.kL6 = k3l / 6.
def track(self, beam):
beam.xp -= self.kL6 * (beam.x*beam.x*beam.x - 3*beam.x*beam.y*beam.y)
beam.yp -= self.kL6 * (beam.y*beam.y*beam.y - 3*beam.x*beam.x*beam.y)
class ThinMultipole(Element):
'''Implements the Horner scheme to efficiently calculate the
polynomials for any order multipole maps.
'''
def __init__(self, knl, ksl=[], *args, **kwargs):
'''MAD style counting of of normal and skew multipole strengths:
[dipolar, quadrupolar, sextupolar, octupolar, ...] components.
Arguments:
- knl: list of normalised normal strengths times the length
of the multipole magnet [1/m^order] in ascending
order
Optional arguments:
- ksl: list of normalised skew strengths times the length
of the multipole magnet [1/m^order] in ascending
order
N.B.: If knl and ksl have different lengths, zeros are appended
until they are equally long.
'''
newlen = max(len(knl), len(ksl))
knl = list(knl) + [0] * (newlen - len(knl))
ksl = list(ksl) + [0] * (newlen - len(ksl))
self.knl = knl
self.ksl = ksl
def track(self, beam):
dxp, dyp = self.ctaylor(beam.x, beam.y, self.knl, self.ksl)
beam.xp -= dxp
beam.yp += dyp
@staticmethod
def ctaylor(x, y, kn, ks):
'''Efficient Horner scheme.'''
dpx = kn[-1]
dpy = ks[-1]
nn = list(range(1, len(kn) + 1))
for n, kkn, kks in list(zip(nn, kn, ks))[-2::-1]:
dpxi = (dpx*x - dpy*y) / float(n)
dpyi = (dpx*y + dpy*x) / float(n)
dpx = kkn + dpxi
dpy = kks + dpyi
return dpx, dpy
@staticmethod
def ztaylor(x, y, kn, ks):
'''Same as ctaylor but using complex numbers, slower but more
readable -- added for the sake of clarity.
'''
z = (x + 1j*y)
res = 0
for n, (kkn, kks) in enumerate(zip(kn, ks)):
res += (kkn + 1j*kks) * z**n / factorial(n)
return res.real, res.imag
|
StarcoderdataPython
|
5060059
|
<gh_stars>1-10
from util import time_it
@time_it
def linear_search(numbers_list, number_to_find):
for index, element in enumerate(numbers_list):
if element == number_to_find:
return index
return -1
if __name__ == '__main__':
numbers_list = [12, 15, 17, 19, 21, 24, 45, 67]
number_to_find = 21
index = linear_search(numbers_list, number_to_find)
print(f"Number found at index {index} using linear search")
|
StarcoderdataPython
|
9704362
|
import pytest
import sqlite3
import os
@pytest.fixture(scope='module')
def sqlite_connection():
# TODO: setup test instance of db then destory after
dbname = 'data/test_data.db'
# TODO: check if db does not exist before continuing
db = sqlite3.connect(dbname)
sql_create_photos_table = """ CREATE TABLE IF NOT EXISTS `photos` (
`objectID` TEXT NOT NULL UNIQUE,
`url` TEXT,
`thumb` TEXT,
`title` TEXT,
`desc` TEXT,
`taken` TEXT,
`CRC` TEXT,
PRIMARY KEY(`objectID`)
); """
sql_create_photo_record = """ INSERT INTO `photos` (
`objectID`,
`url`,
`thumb`,
`title`,
`desc`,
`taken`,
`CRC`)
VALUES (
"96de6a68-4fc2-411c-91a3-52ae37879481",
"https://s3-ap-southeast-2.amazonaws.com/flaskgallery-photos/600.png",
"https://s3-ap-southeast-2.amazonaws.com/flaskgallery-photos/300.png",
"Photo #1",
"This is a description of the photograph",
"2018-01-01",
"0x784DD132"
); """
c = db.cursor()
c.execute(sql_create_photos_table)
db.commit()
c.execute(sql_create_photo_record)
db.commit()
db.close()
yield dbname
os.remove('data/test_data.db')
@pytest.fixture(scope='module')
def jsonfile_connection():
dbname = 'data/test_data.json'
return dbname
|
StarcoderdataPython
|
1603982
|
from encargoapi import app
from encargoapi.auth import auth
from encargoapi.config import db
from encargoapi.user.model import User
from flask import (
abort,
g,
jsonify,
request,
url_for,
)
@app.route('/api/v1.0/users', methods = ['GET'])
def user():
return jsonify({'users': 'ok'})
@app.route('/api/v1.0/users', methods = ['POST'])
def create_user():
username = request.json.get('username')
password = request.json.get('password')
if username is None or password is None:
abort(400) # missing arguments
if User.query.filter_by(username = username).first() is not None:
abort(400) # existing user
user = User(username = username)
user.hash_password(password)
db.session.add(user)
db.session.commit()
return jsonify(
{
'id':user.id,
'username':user.username,
}
), 201, {'Location': url_for('get_user', user = user, _external = True)}
@app.route('/api/v1.0/resource')
@auth.login_required
def get_resource():
return jsonify({ 'data': 'Hello, {}!'.format(g.user.username) })
@app.route('/api/v1.0/user', methods=['GET'])
@auth.login_required
def get_user():
return jsonify({
'user': {
'id': g.user.id,
'username': g.user.username,
}
})
|
StarcoderdataPython
|
8128434
|
<gh_stars>1-10
# coding: utf-8
# ----------------------------------------------------------------------------
# <copyright company="Aspose" file="deconvolution_filter_properties.py">
# Copyright (c) 2018-2020 Aspose Pty Ltd. All rights reserved.
# </copyright>
# <summary>
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# </summary>
# ----------------------------------------------------------------------------
import pprint
import re
import six
from asposeimagingcloud.models.filter_properties_base import FilterPropertiesBase
class DeconvolutionFilterProperties(FilterPropertiesBase):
"""Deconvolution Filter Options, abstract class
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'snr': 'float',
'brightness': 'float',
'grayscale': 'bool',
'is_partial_loaded': 'bool'
}
attribute_map = {
'snr': 'Snr',
'brightness': 'Brightness',
'grayscale': 'Grayscale',
'is_partial_loaded': 'IsPartialLoaded'
}
def __init__(self, snr=None, brightness=None, grayscale=None, is_partial_loaded=None):
"""DeconvolutionFilterProperties - a model defined in Swagger"""
super(DeconvolutionFilterProperties, self).__init__()
self._snr = None
self._brightness = None
self._grayscale = None
self._is_partial_loaded = None
if snr is not None:
self.snr = snr
if brightness is not None:
self.brightness = brightness
if grayscale is not None:
self.grayscale = grayscale
if is_partial_loaded is not None:
self.is_partial_loaded = is_partial_loaded
@property
def snr(self):
"""Gets the snr of this DeconvolutionFilterProperties.
Gets or sets the SNR(signal-to-noise ratio) recommended range 0.002 - 0.009, default value = 0.007
:return: The snr of this DeconvolutionFilterProperties.
:rtype: float
"""
return self._snr
@snr.setter
def snr(self, snr):
"""Sets the snr of this DeconvolutionFilterProperties.
Gets or sets the SNR(signal-to-noise ratio) recommended range 0.002 - 0.009, default value = 0.007
:param snr: The snr of this DeconvolutionFilterProperties.
:type: float
"""
if snr is None:
raise ValueError("Invalid value for `snr`, must not be `None`")
self._snr = snr
@property
def brightness(self):
"""Gets the brightness of this DeconvolutionFilterProperties.
Gets or sets the brightness. recommended range 1 - 1.5 default value = 1.15
:return: The brightness of this DeconvolutionFilterProperties.
:rtype: float
"""
return self._brightness
@brightness.setter
def brightness(self, brightness):
"""Sets the brightness of this DeconvolutionFilterProperties.
Gets or sets the brightness. recommended range 1 - 1.5 default value = 1.15
:param brightness: The brightness of this DeconvolutionFilterProperties.
:type: float
"""
if brightness is None:
raise ValueError("Invalid value for `brightness`, must not be `None`")
self._brightness = brightness
@property
def grayscale(self):
"""Gets the grayscale of this DeconvolutionFilterProperties.
Gets or sets a value indicating whether this DeconvolutionFilterProperties is grayscale. Return grayscale mode or RGB mode.
:return: The grayscale of this DeconvolutionFilterProperties.
:rtype: bool
"""
return self._grayscale
@grayscale.setter
def grayscale(self, grayscale):
"""Sets the grayscale of this DeconvolutionFilterProperties.
Gets or sets a value indicating whether this DeconvolutionFilterProperties is grayscale. Return grayscale mode or RGB mode.
:param grayscale: The grayscale of this DeconvolutionFilterProperties.
:type: bool
"""
if grayscale is None:
raise ValueError("Invalid value for `grayscale`, must not be `None`")
self._grayscale = grayscale
@property
def is_partial_loaded(self):
"""Gets the is_partial_loaded of this DeconvolutionFilterProperties.
Gets a value indicating whether this instance is partial loaded.
:return: The is_partial_loaded of this DeconvolutionFilterProperties.
:rtype: bool
"""
return self._is_partial_loaded
@is_partial_loaded.setter
def is_partial_loaded(self, is_partial_loaded):
"""Sets the is_partial_loaded of this DeconvolutionFilterProperties.
Gets a value indicating whether this instance is partial loaded.
:param is_partial_loaded: The is_partial_loaded of this DeconvolutionFilterProperties.
:type: bool
"""
if is_partial_loaded is None:
raise ValueError("Invalid value for `is_partial_loaded`, must not be `None`")
self._is_partial_loaded = is_partial_loaded
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DeconvolutionFilterProperties):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
StarcoderdataPython
|
1858009
|
<reponame>featureoverload/upgrade-marshmallow
from marshmallow import Schema
from marshmallow import fields
def NoneEmptyString(**kwargs): # noqa
return fields.String(**kwargs)
class FooSchema(Schema):
name = NoneEmptyString(title='foo name', description="foo name")
|
StarcoderdataPython
|
1928729
|
<reponame>josephquang97/pymemapi
import sqlite3
from PyMemAPI import __version__
from PyMemAPI import Memrise, SQLite, Course
from PyMemAPI.exception import LoginError, InvalidSeperateElement, AddBulkError, AddLevelError, InputOutOfRange, LanguageError
import unittest
from pytest import MonkeyPatch
# Test version
def test_version():
assert __version__ == "0.1.0"
# Test Memrise features
CLIENT = Memrise()
COURSE: Course
class TestMemrise(unittest.TestCase):
def setUp(self):
self.monkeypatch = MonkeyPatch()
def test_login_fail(self):
with self.assertRaises(LoginError):
CLIENT.login("testingerror", "nopassword")
def test_select_course(self):
user = {"Enter username: ":"dummy_user", "Enter password: ":"<PASSWORD>"}
# self.monkeypatch.setattr("builtins.input", lambda msg: user[msg])
# self.monkeypatch.setattr("getpass.getpass", lambda msg: user[msg])
success = CLIENT.login("dummy_user","testing2022")
if success is True:
responses = {"Make your choice: ": "1"}
self.monkeypatch.setattr("builtins.input", lambda msg : responses[msg])
global COURSE
COURSE = CLIENT.select_course()
COURSE.delete_all_level()
assert COURSE.name == "Testing Course"
else:
assert success
# Unit test for Course
class TestCourse(unittest.TestCase):
def test_addlevel_with_bulk(self):
global COURSE
success = COURSE.add_level_with_bulk("Test Level", "Hello\tXinChao", "\t")
self.assertEqual(success, True)
def test_delete_level(self):
global COURSE
level_id, headers = COURSE.add_level()
success = COURSE.delete_level(level_id)
self.assertEqual(success, True)
def test_move_level(self):
global COURSE
success = COURSE.move_level(1,2)
self.assertEqual(1,1)
def test_update_external_language(self):
global COURSE
COURSE._update_audio_external("en")
self.assertEqual(1,1)
# Test the Exceptions
class TestException(unittest.TestCase):
def setUp(self):
self.monkeypatch = MonkeyPatch()
# When the seperate item is different from "tab" or "comma".
def test_InvalidSeperateElement(self):
global COURSE
with self.assertRaises(InvalidSeperateElement):
success = COURSE.add_level_with_bulk("Test Level", "Hello\tXinChao", "a")
# When the user requests unsupport languages generate audio -> Handled
# This testcase will failed in Linux
def test_LanguageError(self):
global COURSE
responses = {
"Choose the voice number 1: ": "1",
"Enter the number of voices you wish: ": "1",
}
self.monkeypatch.setattr("builtins.input", lambda msg : responses[msg])
COURSE.update_audio("unvalid language")
# Raise Exception for Coverage
def test_AddLevelException(self):
with self.assertRaises(AddLevelError):
raise AddLevelError(id="1",message="Test")
# Raise Exception for Coverage
def test_AddBulkException(self):
with self.assertRaises(AddBulkError):
raise AddBulkError(id="1",message="Test")
def test_InputOutOfRangeException(self):
with self.assertRaises(InputOutOfRange):
responses = {"Make your choice: ": "99"}
self.monkeypatch.setattr("builtins.input", lambda msg : responses[msg])
CLIENT.select_course()
def test_TypeError(self):
global COURSE
success = COURSE.add_level_with_bulk("Test Level", "Hello\tXinChao", "\t")
level = (COURSE.levels())[0]
word = (level.get_words())[0]
with self.assertRaises(TypeError):
word.upload_audio(1)
# Test SQLite
# This case test for Windows
def test_sync_database(db_conn,cmd):
cur: sqlite3.Cursor = db_conn.cursor()
cur.executescript(cmd)
cur.close()
db_conn.commit()
global COURSE
COURSE.sync_database("./course/course.db")
level = (COURSE.levels())[-1]
assert (level.name=="I can't say for sure")
def test_remove_audio():
global COURSE
level = (COURSE.levels())[-1]
words = level.get_words()
for word in words:
word.remove_audio()
word.upload_audio("./audio/audio.mp3")
with open("./audio/audio.mp3","rb") as fp:
audio = fp.read()
word.upload_audio(audio)
assert (1==1)
class TestSQLite(unittest.TestCase):
def test_SQLite_topic_to_bulk(self):
with self.assertRaises(Exception):
db = SQLite("./course/course.db")
db.update_ipas()
db.update_trans(src="en",dest="vi")
db.topic_to_bulk(1,external=True)
db.conn.close()
def test_SQLite_topic_to_bulk2(self):
db = SQLite("./course/course.db")
bulk = db.topic_to_bulk(1,external=True,language="en")
self.assertIsInstance(bulk,str)
def test_ExceptionLanguageError(self):
with self.assertRaises(LanguageError):
db = SQLite("./course/course.db")
db.topic_to_bulk(1,external=True,language="fr")
db.conn.close()
|
StarcoderdataPython
|
8104775
|
# Date Imports
from datetime import date
# AGAGD Models Imports
import agagd_core.models as agagd_models
# AGAGD Django Tables Imports
from agagd_core.tables.beta import (
GamesTable,
PlayersInformationTable,
PlayersOpponentTable,
PlayersTournamentTable,
)
# Django Imports
from django.core.exceptions import ObjectDoesNotExist
from django.db.models import Q
from django.shortcuts import render
# Django Tables 2 Imports
from django_tables2 import RequestConfig
def players_profile(request, player_id):
player = agagd_models.Member.objects.get(member_id=player_id)
player_games = agagd_models.Game.objects.filter(
Q(pin_player_1__exact=player_id) | Q(pin_player_2__exact=player_id)
).order_by("-game_date")
player_rating = agagd_models.Players.objects.filter(
Q(pin_player__exact=player_id)
).values("pin_player", "rating", "sigma")
# compute additional tables for opponents & tournament info. here
# TODO: refactor this into something nicer.
opponent_data = {}
tourney_data = {}
for game in player_games:
try:
t_dat = tourney_data.get(game.tournament_code.pk, {})
t_dat["tournament"] = game.tournament_code
t_dat["won"] = t_dat.get("won", 0)
t_dat["lost"] = t_dat.get("lost", 0)
# Set default game_date to None
game_date = None
# Check for 0000-00-00 dates
if game.game_date != u"0000-00-00":
game_date = game.game_date
t_dat["date"] = t_dat.get("date", game_date)
op = game.player_other_than(player)
opp_dat = opponent_data.get(op, {})
opp_dat["opponent"] = op
opp_dat["total"] = opp_dat.get("total", 0) + 1
opp_dat["won"] = opp_dat.get("won", 0)
opp_dat["lost"] = opp_dat.get("lost", 0)
if game.won_by(player):
opp_dat["won"] += 1
t_dat["won"] += 1
else:
opp_dat["lost"] += 1
t_dat["lost"] += 1
opponent_data[op] = opp_dat
tourney_data[game.tournament_code.pk] = t_dat
except ObjectDoesNotExist:
print("failing game_id: %s" % game.pk)
opp_table = PlayersOpponentTable(opponent_data.values())
RequestConfig(request, paginate={"per_page": 10}).configure(opp_table)
t_table = PlayersTournamentTable(
tourney_data.values(),
sorted(
tourney_data.values(),
key=lambda d: d.get("date", date.today()) or date.today(),
reverse=True,
),
prefix="ts_played",
)
RequestConfig(request, paginate={"per_page": 10}).configure(t_table)
player_games_table = GamesTable(
player_games.values(
"game_date",
"handicap",
"pin_player_1",
"pin_player_2",
"tournament_code",
"result",
)
)
player_information_dict = player.__dict__
player_information_dict["rating"] = player_rating[0]["rating"]
players_information_table = PlayersInformationTable([player_information_dict])
return render(
request,
"beta.player_profile.html",
{
"page_title": "Player Profile | {}".format(player.full_name),
"player": player,
"player_rating": player_rating[0],
"player_games_table": player_games_table,
"players_information_table": players_information_table,
"player_opponents_table": opp_table,
"player_tournaments_table": t_table,
},
)
|
StarcoderdataPython
|
3473737
|
from typing import List, Optional
from django.shortcuts import reverse
from iamheadless_publisher_site.pydantic_models import BaseItemContentsPydanticModel, BaseItemDataPydanticModel, BaseItemPydanticModel
from .conf import settings
from .urls import urlpatterns
class FlatPageContentPydanticModel(BaseItemContentsPydanticModel):
title: str
slug: str
language: str
content: Optional[str]
seo_keywords: Optional[str]
seo_description: Optional[str]
class FlatPageDataPydanticModel(BaseItemDataPydanticModel):
contents: List[FlatPageContentPydanticModel]
class FlatPagePydanticModel(BaseItemPydanticModel):
_content_model = FlatPageContentPydanticModel
_data_model = FlatPageDataPydanticModel
_display_name_plural = 'flat pages'
_display_name_singular = 'flat page'
_item_type = 'flat_page'
_searchable = True
_browsable = True
_urlpatterns = urlpatterns
data: FlatPageDataPydanticModel
def get_item_url(self, language):
data = self.get_display_data(language)
slug = self._content_model.get_slug(data)
return reverse(
settings.URLNAME_FLATPAGE,
kwargs={
'language': language,
'slug': slug,
}
)
@property
def CONTENTS(self):
return self.dict()['data']['contents']
|
StarcoderdataPython
|
179327
|
<gh_stars>1-10
from . import (control, demo, demographics, epi, interventions, timings,
matrices)
|
StarcoderdataPython
|
5056501
|
"""
Some helper functions for workspace stuff
"""
import logging
import re
import biokbase
import biokbase.workspace
from biokbase.workspace import client as WorkspaceClient
g_log = logging.getLogger(__name__)
# regex for parsing out workspace_id and object_id from
# a "ws.{workspace}.{object}" string
ws_regex = re.compile('^ws\.(?P<wsid>\d+)\.obj\.(?P<objid>\d+)')
# regex for parsing out a user_id from a token
user_id_regex = re.compile('^un=(?P<user_id>\w+)\|')
# Exception for a malformed workspace ID see regex above
class BadWorkspaceID(Exception):
pass
# Exception for a workspace object not found see regex above
class BadWorkspaceID(Exception):
pass
class PermissionsError(WorkspaceClient.ServerError):
"""Raised if user does not have permission to
access the workspace.
"""
@staticmethod
def is_permissions_error(err):
"""Try to guess if the error string is a permission-denied error
for the narrative (i.e. the workspace the narrative is in).
"""
pat = re.compile("\s*[Uu]ser \w+ may not \w+ workspace.*")
return pat.match(err) is not None
def __init__(self, name=None, code=None, message=None, **kw):
WorkspaceClient.ServerError.__init__(self, name, code,
message, **kw)
# List of fields returned by the list_workspace_objects function
list_ws_obj_fields = ['id','type','moddate','instance','command',
'lastmodifier','owner','workspace','ref','chsum',
'metadata','objid']
# The list_workspace_objects method has been deprecated, the
# list_objects method is the current primary method for fetching
# objects, and has a different field list
list_objects_fields = ['objid', 'name', 'type', 'save_date', 'ver', 'saved_by',
'wsid', 'workspace', 'chsum', 'size', 'meta']
obj_field = dict(zip(list_objects_fields,range(len(list_objects_fields))))
# object type for a project tag object
ws_tag_type = 'KBaseNarrative.Metadata'
# object type for a project tag object
ws_narrative_type = 'KBaseNarrative.Narrative'
# object name for project tag
ws_tag = {'project' : '_project'}
def get_wsobj_meta(wsclient, objtype=ws_narrative_type, ws_id=None):
"""
Takes an initialized workspace client. Defaults to searching for
Narrative types in any workspace that the token has at least read access to.
If the ws field is specified then it will return the workspace metadata
for only the workspace specified
Returns a dictionary of object descriptions - the key is a workspace id of
the form "ws.{workspace_id}.obj.{object_id}" and the values are dictionaries
keyed on the list_ws_obj_field list above.
Raises: PermissionsError, if access is denied
"""
try:
if ws_id is None:
res = wsclient.list_objects({'type' : objtype,
'includeMetadata' : 1})
else:
res = wsclient.list_objects({'type' : objtype,
'includeMetadata' : 1,
'ids' : [ws_id] })
except WorkspaceClient.ServerError, err:
if PermissionsError.is_permissions_error(err.message):
raise PermissionsError(name=err.name, code=err.code,
message=err.message, data=err.data)
my_narratives = {}
for obj in res:
my_narratives["ws.%s.obj.%s" % (obj[obj_field['wsid']],obj[obj_field['objid']])] = dict(zip(list_objects_fields,obj))
return my_narratives
def get_wsid(wsclient, workspace):
"""
When given a workspace name, returns the numeric ws_id
"""
try:
ws_meta = wsclient.get_workspace_info({'workspace' : workspace});
except WorkspaceClient.ServerError, e:
if e.message.find('not found') >= 0 or e.message.find('No workspace with name') >= 0:
return(None)
else:
raise e
return( ws_meta[0])
def alter_workspace_metadata(wsclient, ref, new_metadata={}, ws_id=None):
"""
This is just a wrapper for the workspace get_objects call.
Takes an initialized workspace client and a workspace ID
of the form "ws.{ws_id}.obj.{object id}" and returns the following:
{
'data' : {actual data contained in the object},
'metadata' : { a dictionary version of the object metadata },
... all the fields that are normally returned in a ws ObjectData type
}
if type is not specified then an extra lookup for object metadata
is required, this can be shortcut by passing in the object type
"""
if ws_id is None and ref is not None:
match = ws_regex.match(ref)
if not match:
raise BadWorkspaceID("%s does not match workspace ID format ws.{workspace id}.obj.{object id}" % ws_id)
ws_id = match.group(1)
elif ws_id is None and ref is None:
raise BadWorkspaceID("No workspace id or object reference given!")
wsclient.alter_workspace_metadata({'wsi':{'id':ws_id}, 'new':new_metadata})
def get_wsobj(wsclient, ws_id, objtype=None):
"""
This is just a wrapper for the workspace get_objects call.
Takes an initialized workspace client and a workspace ID
of the form "ws.{ws_id}.obj.{object id}" and returns the following:
{
'data' : {actual data contained in the object},
'metadata' : { a dictionary version of the object metadata },
... all the fields that are normally returned in a ws ObjectData type
}
if type is not specified then an extra lookup for object metadata
is required, this can be shortcut by passing in the object type
"""
match = ws_regex.match( ws_id)
if not match:
raise BadWorkspaceID("%s does not match workspace ID format ws.{workspace id}.obj.{object id}" % ws_id)
ws = match.group(1)
objid = match.group(2)
objs = wsclient.get_objects([dict( wsid=ws, objid=objid)])
if len(objs) < 1:
raise BadWorkspaceID( "%s could not be found" % ws_id)
elif len(objs) > 1:
raise BadWorkspaceID( "%s non-unique! Weird!!!" % ws_id)
res=objs[0]
res['metadata'] = dict(zip(list_objects_fields,objs[0]['info']))
return res
def delete_wsobj(wsclient, wsid, objid):
"""
Given a workspace client, and numeric workspace id and object id, delete it
returns true on success, false otherwise
"""
try:
wsclient.delete_objects( [{ 'wsid' : wsid,
'objid' : objid }] )
except WorkspaceClient.ServerError, e:
raise e
# return False
return True
# Write an object to the workspace, takes the workspace id, an object of the
# type workspace.ObjectSaveData
# typedef structure {
# type_string type;
# UnspecifiedObject data;
# obj_name name;
# obj_id objid;
# usermeta meta;
# list<ProvenanceAction> provenance;
# boolean hidden;
# } ObjectSaveData;
def rename_wsobj(wsclient, identity, new_name):
"""
Given an object's identity, change that object's name.
"""
try:
obj_info = wsclient.rename_object({ 'obj' : identity,
'new_name' : new_name })
except WorkspaceClient.ServerError, e:
raise e
return dict(zip(list_objects_fields, obj_info))
def put_wsobj(wsclient, ws_id, obj):
try:
ws_meta = wsclient.save_objects({ 'id' : ws_id,
'objects' : [obj] })
except:
raise
return dict(zip(list_objects_fields,ws_meta[0]))
# Tag a workspace as a project, if there is an error, let it propagate up
def check_project_tag(wsclient, ws_id):
try:
tag = wsclient.get_object_info( [{ 'wsid' : ws_id,
'name' : ws_tag['project'] }],
0);
except WorkspaceClient.ServerError, e:
# If it is a not found error, create it, otherwise reraise
if e.message.find('not found') >= 0 or e.message.find('No object with name') >= 0:
obj_save_data = { 'name' : ws_tag['project'],
'type' :ws_tag_type,
'data' : { 'description' : 'Tag! You\'re a project!'},
'meta' : {},
'provenance' : [],
'hidden' : 1}
ws_meta = wsclient.save_objects( { 'id' : ws_id,
'objects' : [obj_save_data]});
else:
raise e
return True
def get_user_id(wsclient):
"""Grab the userid from the token in the wsclient object
This is a pretty brittle way to do things, and will need to be
changed, eventually.
"""
try:
token = wsclient._headers.get('AUTHORIZATION', None)
if token is None:
g_log.error("auth.error No 'AUTHORIZATION' key found "
"in client headers: '{}'"
.format(wsclient._headers))
return None
match = user_id_regex.match(token)
if match:
return match.group(1)
else:
return None
except Exception, e:
g_log.error("Cannot get userid: {}".format(e))
raise e
def check_homews(wsclient, user_id = None):
"""
Helper routine to make sure that the user's home workspace is built. Putting it here
so that when/if it changes we only have a single place to change things.
Takes a wsclient, and if it is authenticated, extracts the user_id from the token
and will check for the existence of the home workspace and
create it if necessary. Will pass along any exceptions. Will also make sure that
it is tagged with a workspace_meta object named "_project"
returns the workspace name and workspace id as a tuple
Note that parsing the token from the wsclient object is brittle and should be changed!
"""
if user_id is None:
user_id = get_user_id(wsclient)
try:
homews = "%s:home" % user_id
workspace_identity = { 'workspace' : homews }
ws_meta = wsclient.get_workspace_info( workspace_identity)
except WorkspaceClient.ServerError, e:
# If it is a not found error, create it, otherwise reraise
if e.message.find('not found') >= 0 or e.message.find('No workspace with name') >= 0:
ws_meta = wsclient.create_workspace({ 'workspace' : homews,
'globalread' : 'n',
'description' : 'User home workspace'})
elif e.message.find('deleted') >= 0:
wsclient.undelete_workspace( { 'workspace' : homews})
ws_meta = wsclient.get_workspace_info( workspace_identity)
else:
raise e
if ws_meta:
# check_project_tag(wsclient, ws_meta[0])
# return the textual name and the numeric ws_id
return ws_meta[1],ws_meta[0]
else:
raise Exception('Unable to find or create or undelete home workspace: %s' % homews)
|
StarcoderdataPython
|
12802768
|
import copy
from typing import Optional, Collection, Any, Dict, Tuple
from causalpy.bayesian_graphs.scm import (
SCM,
NoiseGenerator,
Assignment,
IdentityAssignment,
MaxAssignment,
SignSqrtAssignment,
SinAssignment,
)
import networkx as nx
import pandas as pd
import numpy as np
class SumAssignment(Assignment):
def __init__(self, *assignments, offset: float = 0.0):
super().__init__()
self.assignment = assignments
self.offset = offset
self.coefficients = np.ones(len(assignments))
def __call__(self, noise, *args, **kwargs):
args = self.parse_call_input(*args, **kwargs)
return noise + self.coefficients @ args + self.offset
def __len__(self):
return len(self.coefficients)
def function_str(self, variable_names: Optional[Collection[str]] = None):
rep = "N"
var_strs = [
f"{assignment.function_str([var])}"
for assignment, var in zip(self.assignment, variable_names[1:])
]
if var_strs:
rep += f" + {' + '.join(var_strs)}"
return rep
class ProductAssignment(Assignment):
def __init__(self, *assignments, offset: float = 0.0):
super().__init__()
self.assignment = assignments
self.offset = offset
self.coefficients = np.ones(len(assignments))
def __call__(self, noise, *args, **kwargs):
args = self.parse_call_input(*args, **kwargs)
out = noise + self.offset
if args:
out += np.prod(args, axis=0)
return out
def __len__(self):
return len(self.coefficients)
def function_str(self, variable_names: Optional[Collection[str]] = None):
rep = "N"
var_strs = [
f"{assignment.function_str([var])}"
for assignment, var in zip(self.assignment, variable_names[1:])
]
if var_strs:
rep += f" + {' * '.join(var_strs)}"
return rep
class HeinzeData:
_possible_values_ = dict(
sample_size=[100, 200, 500, 2000, 5000],
target=[f"X_{i}" for i in range(6)],
noise_df=[2, 3, 5, 10, 20, 50, 100],
multiplicative=[True, False],
shift=[True, False],
meanshift=[0, 0.1, 0.2, 0.5, 1, 2, 5, 10],
strength=[0, 0.1, 0.2, 0.5, 1, 2, 5, 10],
mechanism=[
IdentityAssignment,
MaxAssignment,
SignSqrtAssignment,
SinAssignment,
],
interventions=["all", "rand", "close"],
)
def __init__(
self, config: Optional[Dict[str, Any]] = None, seed: Optional[int] = None
):
self.seed = seed
self.rng = np.random.default_rng(self.seed)
self.config = (
self.draw_config() if config is None else self.verify_config(config)
)
self.scm = self.get_scm()
self.intervention_values = dict()
def verify_config(self, config: Dict[str, Any]):
for key in config.keys():
if config[key] not in self._possible_values_[key]:
raise ValueError(
f"Value '{config[key]}' of key '{key}' not within range of allowed values."
)
return config
def draw_config(self):
rng = np.random.default_rng(self.seed)
poss_vals = self._possible_values_
config = dict()
# uniform draws
for param in self._possible_values_.keys():
config[param] = rng.choice(poss_vals[param])
return config
def get_scm(self, noise_seed: Optional[int] = None):
config = self.config
assignment_map = dict()
def get_seed(i):
if noise_seed is not None:
return noise_seed + i
return None
binary_op_assignment = (
ProductAssignment if config["multiplicative"] else SumAssignment
)
mechanism = config["mechanism"]
coeffs = dict()
coeffs[(0, 1)] = 1
coeffs[(0, 2)] = 1
coeffs[(1, 2)] = -1
coeffs[(2, 3)] = -1
coeffs[(3, 5)] = -1
coeffs[(2, 5)] = 1
coeffs[(4, 5)] = 1
df = config["noise_df"]
assignment_map["X_0"] = (
[],
binary_op_assignment(),
NoiseGenerator("standard_t", df=df, seed=get_seed(0)),
)
assignment_map["X_1"] = (
["X_0"],
binary_op_assignment(mechanism(coefficient=coeffs[0, 1])),
NoiseGenerator("standard_t", df=df, seed=get_seed(1)),
)
assignment_map["X_2"] = (
["X_0", "X_1"],
binary_op_assignment(
mechanism(coefficient=coeffs[0, 2]), mechanism(coefficient=coeffs[1, 2])
),
NoiseGenerator("standard_t", df=df, seed=get_seed(2)),
)
assignment_map["X_3"] = (
["X_2"],
binary_op_assignment(mechanism(coefficient=coeffs[2, 3])),
NoiseGenerator("standard_t", df=df, seed=get_seed(3)),
)
assignment_map["X_4"] = (
[],
binary_op_assignment(),
NoiseGenerator("standard_t", df=df, seed=get_seed(4)),
)
assignment_map["X_5"] = (
["X_3", "X_2", "X_4"],
binary_op_assignment(
mechanism(coefficient=coeffs[3, 5]),
mechanism(coefficient=coeffs[2, 5]),
mechanism(coefficient=coeffs[4, 5]),
),
NoiseGenerator("standard_t", df=df, seed=get_seed(5)),
)
return SCM(assignment_map)
def set_intervention_values(self, intervention_number: int = 1):
try:
return self.intervention_values[intervention_number]
except KeyError:
config = self.config
interv_setting = config["interventions"]
target = config["target"]
meanshift = config["meanshift"]
scale = config["strength"]
if interv_setting == "all":
variables = [var for var in self.scm.graph.nodes if var != target]
values = (
self.rng.standard_t(size=len(variables), df=config["noise_df"])
* scale
+ meanshift
)
elif interv_setting == "rand":
parents = list(self.scm[target][0])
descendants = list(
nx.algorithms.dag.descendants(self.scm.graph, target)
)
parent = [self.rng.choice(parents)] if parents else []
descendant = [self.rng.choice(descendants)] if descendants else []
variables = parent + descendant
values = (
self.rng.standard_t(size=len(variables), df=config["noise_df"])
* scale
+ meanshift
)
else:
parents = list(self.scm[target][0])
children = list(self.scm.graph.successors(target))
parent = [self.rng.choice(parents)] if parents else []
child = [self.rng.choice(children)] if children else []
variables = parent + child
values = (
self.rng.standard_t(size=len(variables), df=config["noise_df"])
* scale
+ meanshift
)
self.intervention_values[intervention_number] = variables, values
def set_intervention(self, intervention_number: int):
variables, values = self.intervention_values[intervention_number]
if self.config["shift"]:
new_assignments = {
var: {"assignment": copy.deepcopy(self.scm[var][1]["assignment"])}
for var in variables
}
for (var, items), value in zip(new_assignments.items(), values):
items["assignment"].coefficient = value
self.scm.intervention(interventions=new_assignments)
else:
self.scm.do_intervention(variables, values)
def sample(self) -> Tuple[pd.DataFrame, str, np.ndarray]:
self.set_intervention_values(1)
self.set_intervention_values(2)
sample_size = self.config["sample_size"]
obs = [self.scm.sample(sample_size)]
envs = [0] * sample_size
vars = sorted(self.scm.get_variables())
for i in range(1, 3):
self.set_intervention(i)
obs.append(self.scm.sample(sample_size)[vars])
self.scm.undo_intervention()
envs += [i] * sample_size
obs = pd.concat(obs, sort=True)
envs = np.array(envs)
return obs, self.config["target"], envs
|
StarcoderdataPython
|
1916449
|
from unittest import result
from pytesseract import Output
import pytesseract
import cv2
image = cv2.imread("images_0.jpg")
x = 0
y = 0
w = 1080
h = 800
image = image[y: y+h,x: x+w]
rgb = cv2.cvtColor(image,cv2.COLOR_BGR2RGB)
results = pytesseract.image_to_data(rgb,output_type=Output.DICT,lang='chi_tra',config='--psm 11')
print(results['text'])
|
StarcoderdataPython
|
3495041
|
class CAPostalCode:
__slots__ = ['postal_code', 'city', 'place_names', 'province']
def __init__(
self,
postal_code,
city,
place_names,
province
):
self.postal_code = postal_code
self.city = city
self.place_names = place_names
self.province = province
|
StarcoderdataPython
|
1820263
|
<filename>aioupbit/v1/__init__.py
from __future__ import annotations
from .aiohttp_client import *
from .client import *
from .constants import *
from .values import *
RestClient = AioHTTPRestClient
|
StarcoderdataPython
|
8092003
|
<gh_stars>10-100
import hoomd
# Initialize the simulation.
device = hoomd.device.CPU()
sim = hoomd.Simulation(device=device)
sim.create_state_from_gsd(filename='random.gsd')
# Set the operations for a Lennard-Jones particle simulation.
integrator = hoomd.md.Integrator(dt=0.005)
cell = hoomd.md.nlist.Cell()
lj = hoomd.md.pair.LJ(nlist=cell)
lj.params[('A', 'A')] = dict(epsilon=1, sigma=1)
lj.r_cut[('A', 'A')] = 2.5
integrator.forces.append(lj)
nvt = hoomd.md.methods.NVT(kT=1.5, filter=hoomd.filter.All(), tau=1.0)
integrator.methods.append(nvt)
sim.operations.integrator = integrator
# Instantiate a ThermodyanmicQuantities object to compute kinetic energy.
thermodynamic_properties = hoomd.md.compute.ThermodynamicQuantities(
filter=hoomd.filter.All())
sim.operations.computes.append(thermodynamic_properties)
# Run the simulation.
sim.run(1000)
# Access the system kinetic energy on all ranks.
kinetic_energy = thermodynamic_properties.kinetic_energy
# Print the kinetic energy only on rank 0.
if device.communicator.rank == 0:
print(kinetic_energy)
|
StarcoderdataPython
|
313159
|
#! /usr/bin/env python
"""
Run this either with
$ python -m paderbox.utils.strip_solution name_template.ipynb name_solution.ipynb
or directly with
$ paderbox.strip_solution name_template.ipynb name_solution.ipynb
"""
import re
from pathlib import Path
import fire
import nbformat
CODE_MAGIC_WORD = '# REPLACE'
LATEX_MAGIC_WORD = '% REPLACE'
def code_replace(source, cell_type='code'):
"""
Args:
source: solution code
cell_type: 'code', 'markdown' or 'raw'
Returns:
template code
>>> code_replace('return 1. / (1 + np.exp(-x)) # REPLACE return ???')
'return ???'
>>> print(code_replace(
... '''b = 1
... a = 1. / (1 + np.exp(-x)) # REPLACE
... return 1. / (1 + np.exp(-x)) # REPLACE return ???'''
... ))
b = 1
return ???
>>> print(code_replace(
... '''b = 1
... a = 1. / (1 + np.exp(-x)) # REPLACE a =
... return 1. / (1 + np.exp(-x)) # REPLACE return ???'''
... ))
b = 1
a =
return ???
"""
if cell_type in ['code', 'markdown']:
MAGIC_WORD = {
'code': CODE_MAGIC_WORD,
'markdown': LATEX_MAGIC_WORD,
}[cell_type]
if MAGIC_WORD in source:
new_source_lines = []
for line in source.split('\n'):
if MAGIC_WORD in line:
solution, template = line.split(MAGIC_WORD)
# Remove leading whitespaces
template = template.lstrip(' ')
if template == '':
continue
whitespace = re.search('( *)', line).group(1)
new_source_lines.append(whitespace + template.lstrip(' '))
else:
new_source_lines.append(line)
source = '\n'.join(new_source_lines)
elif cell_type in ['raw']:
pass
else:
raise TypeError(cell_type, source)
return source
def nb_replace(old_path, new_path, force=False, strip_output=False):
"""Remove the solution from a Jupyter notebook.
python -m paderbox.utils.strip_solution _solution.ipynb _template.ipynb
python -m paderbox.utils.strip_solution _solution.ipynb _template.ipynb --force
python -m paderbox.utils.strip_solution _solution.ipynb _template.ipynb --strip-output
python -m paderbox.utils.strip_solution _solution.ipynb _template.ipynb --force --strip-output
Args:
old_path: The input notebook with the ending `_solution.ipynb`.
new_path: The output notebook with the ending `_template.ipynb`.
force: If enabled allow to overwrite the an existing output notebook.
strip_output: Whether to use nbstripout.strip_output to remove output
cells from the notebook.
For example, assume the following line is in a notebook:
x = 10 # REPLACE x = ???
The result will then be:
x = ???
The following example
y = 42 # REPLACE y = # TODO
q = 328 # REPLACE
z = y * q # REPLACE
will result in (Without an replacement the line will be deleted)
y = # TODO
In Markdown cells this code will search for the string `% REPLACE`
instead of `# REPLACE`.
Suggestion:
Generate 4 files for the students:
The solution and the generated template, where this function
removed the solution. For both files you could generate an HTML
preview, so they can view them without an jupyter server.
The solution should contain executed cells, while the template
shouldn't.
Suggestion to produce all 4 files:
$ name=<solutionNotebookPrefix>
$ python -m paderbox.utils.strip_solution ${name}_solution.ipynb build/${name}_template.ipynb --force --strip-output
$ jupyter nbconvert --to html build/${name}_template.ipynb
$ cat ${name}_solution.ipynb | nbstripout > build/${name}_solution.ipynb
$ jupyter nbconvert --execute --allow-errors --to html build/${name}_solution.ipynb
"""
old_path = Path(old_path)
new_path = Path(new_path)
assert old_path != new_path, (old_path, new_path)
assert old_path.is_file(), f'{old_path} is not a file.'
assert old_path.name.endswith('_solution.ipynb'), old_path
assert new_path.name.endswith('_template.ipynb'), new_path
if not force and new_path.is_file():
raise FileExistsError(f'{new_path} already exists.')
nb = nbformat.read(str(old_path), nbformat.NO_CONVERT)
replacements = 0
for _, cell in enumerate(nb['cells']):
cell_source = code_replace(cell['source'], cell['cell_type'])
if cell_source != cell['source']:
replacements += 1
cell['source'] = cell_source
if strip_output:
import nbstripout
nb = nbstripout.strip_output(nb, keep_count=False, keep_output=False)
print(f'Replaced {replacements} lines of code.')
nbformat.write(nb, str(new_path), nbformat.NO_CONVERT)
def entry_point():
"""Used by Fire library to create a source script."""
fire.Fire(nb_replace)
if __name__ == '__main__':
fire.Fire(nb_replace)
|
StarcoderdataPython
|
4920451
|
<reponame>kagemeka/atcoder-submissions
import sys
import typing
import numba as nb
import numpy as np
@nb.njit
def enumerate_fx() -> np.ndarray:
a = np.array([1])
for _ in range(12):
b = []
for x in a:
for i in range(10):
b.append(x * i)
a = np.unique(np.array(b))
return a
@nb.njit
def f(x: int) -> int:
p = 1
while x:
x, r = divmod(x, 10)
p *= r
return p
@nb.njit((nb.i8, nb.i8), cache=True)
def solve(n: int, b: int) -> typing.NoReturn:
cands = enumerate_fx() + b
cnt = 0
for x in cands:
cnt += 1 <= x <= n and x - f(x) == b
print(cnt)
def main() -> typing.NoReturn:
n, b = map(int, input().split())
solve(n, b)
main()
|
StarcoderdataPython
|
1928464
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2011 <NAME> <EMAIL>
#
import logging
log = logging.getLogger('view')
from PyQt4 import QtGui, QtCore, QtOpenGL
from PyQt4.Qt import Qt
from .. import model
LINE_SIZE = 512
PAGE_SIZE = 4096
# LINE_SIZE=512*4
# PAGE_SIZE=4096*16
class MemoryMappingScene(QtGui.QGraphicsScene):
'''
Binds a MemoryHandler mapping to a QGraphicsScene
'''
def __init__(self, mapping, parent=None):
QtGui.QGraphicsScene.__init__(self, parent)
self.mapping = mapping
class MemoryMappingView(QtGui.QGraphicsView):
'''
We need to define our own QGraphicsView to play with.
zoom-able QGraphicsView.
from http://www.qtcentre.org/wiki/index.php?title=QGraphicsView:_Smooth_Panning_and_Zooming
'''
# Holds the current centerpoint for the view, used for panning and zooming
CurrentCenterPoint = QtCore.QPointF()
# From panning the view
LastPanPoint = QtCore.QPoint()
def __init__(self, parent=None):
QtGui.QGraphicsView.__init__(self, parent)
self.setRenderHints(
QtGui.QPainter.Antialiasing | QtGui.QPainter.SmoothPixmapTransform)
# opengl ? !
# self.setViewport(QtOpenGL.QGLWidget(QtOpenGL.QGLFormat(QtOpenGL.QGL.SampleBuffers)))
# self.setCursor(Qt.OpenHandCursor)
self.setCursor(Qt.ArrowCursor)
self.SetCenter(
QtCore.QPointF(
0.0,
0.0)) # A modified version of centerOn(), handles special cases
def loadMapping(self, mapping):
# Set-up the scene
scene = MemoryMappingScene(mapping, parent=self)
self.setScene(scene)
self.mapping = mapping
# Set-up the view
if mapping:
# Populate the scene
# self._debugFill(scene)
self.drawPages(mapping)
self.setSceneRect(0, 0, LINE_SIZE, (len(mapping) // LINE_SIZE) + 1)
# draw a square around
self.scene().addRect(
0,
0,
LINE_SIZE,
(len(mapping) // LINE_SIZE) + 1,
QtGui.QPen(
Qt.SolidLine))
log.debug(
'set sceneRect to %d,%d' %
(LINE_SIZE, (len(mapping) // LINE_SIZE) + 1))
else:
self.setSceneRect(0, 0, LINE_SIZE, LINE_SIZE)
self.SetCenter(
QtCore.QPointF(
0.0,
0.0)) # A modified version of centerOn(), handles special cases
return
def drawPages(self, mapping):
''' draw a page delimitor every PAGE_SIZE '''
pageSize = PAGE_SIZE
# 15 is the mapping's size//PAGE_SIZE
for y in xrange(
PAGE_SIZE // LINE_SIZE, (len(mapping) // LINE_SIZE) - 1, PAGE_SIZE // LINE_SIZE):
self.scene().addLine(0, y, LINE_SIZE, y, QtGui.QPen(Qt.DotLine))
def _debugFill(self, scene):
for x in xrange(0, LINE_SIZE, 25):
for y in xrange(0, LINE_SIZE, 25):
if (x % 100 == 0)and (y % 100 == 0):
scene.addRect(x, y, 2, 2)
pointString = QtCore.QString()
stream = QtCore.QTextStream(pointString)
stream << "(" << x << "," << y << ")"
item = scene.addText(pointString)
item.setPos(x, y)
else:
scene.addRect(x, y, 1, 1)
def GetScene(self):
return self.scene()
def GetCenter(self):
return self.CurrentCenterPoint
'''
* Sets the current centerpoint. Also updates the scene's center point.
* Unlike centerOn, which has no way of getting the floating point center
* back, SetCenter() stores the center point. It also handles the special
* sidebar case. This function will claim the centerPoint to sceneRec ie.
* the centerPoint must be within the sceneRec.
'''
# Set the current centerpoint in the
def SetCenter(self, centerPoint):
# Get the rectangle of the visible area in scene coords
visibleArea = self.mapToScene(self.rect()).boundingRect()
# Get the scene area
sceneBounds = self.sceneRect()
boundX = visibleArea.width() / 2.0
boundY = visibleArea.height() / 2.0
boundWidth = sceneBounds.width() - 2.0 * boundX
boundHeight = sceneBounds.height() - 2.0 * boundY
# The max boundary that the centerPoint can be to
bounds = QtCore.QRectF(boundX, boundY, boundWidth, boundHeight)
if (bounds.contains(centerPoint)):
# We are within the bounds
self.CurrentCenterPoint = centerPoint
else:
# We need to clamp or use the center of the screen
if(visibleArea.contains(sceneBounds)):
# Use the center of scene ie. we can see the whole scene
self.CurrentCenterPoint = sceneBounds.center()
else:
self.CurrentCenterPoint = centerPoint
# We need to clamp the center. The centerPoint is too large
if(centerPoint.x() > bounds.x() + bounds.width()):
self.CurrentCenterPoint.setX(bounds.x() + bounds.width())
elif (centerPoint.x() < bounds.x()):
self.CurrentCenterPoint.setX(bounds.x())
if(centerPoint.y() > bounds.y() + bounds.height()):
self.CurrentCenterPoint.setY(bounds.y() + bounds.height())
elif (centerPoint.y() < bounds.y()):
self.CurrentCenterPoint.setY(bounds.y())
# Update the scrollbars
self.centerOn(self.CurrentCenterPoint)
return
'''
* Handles when the mouse button is pressed
'''
def mousePressEvent(self, event):
''' todo
wierd, quand pointers et nullwords sont affiches, on ne peut plus selecter le pointer..
ca tombe sur l'itemgroup des null words.
'''
# For panning the view
self.LastPanPoint = event.pos()
self.setCursor(Qt.ClosedHandCursor)
item = self.itemAt(event.pos())
log.debug('Mouse press on ' + str(item))
if item is None:
return
item.setSelected(True)
pitem = item.parentItem()
if pitem is None:
# no parent item, that must be lonely....
if self.mapping:
# read mapping value
addr = event.pos().y() * LINE_SIZE + event.pos().x()
value = self.mapping.read_word(self.mapping.start + addr)
log.debug('@0x%x: 0x%x' % (self.mapping.start + addr, value))
else:
# parent item, check for haystack types
log.debug('Mouse press on parent item ' + str(pitem))
if hasattr(pitem, 'value') and model.isRegistered(pitem.value):
log.debug('showing info for %s' % (pitem))
# update info view
self.parent().showInfo(pitem)
elif hasattr(pitem, 'onSelect'):
# print status for pointers and nulls
log.debug('running parent onSelect')
pitem.onSelect()
elif hasattr(item, 'onSelect'):
log.debug('running item onSelect')
pitem.onSelect()
else:
log.debug('%s has no onSelect method' % item)
return
'''
* Handles when the mouse button is released
'''
def mouseReleaseEvent(self, event):
# self.setCursor(Qt.OpenHandCursor)
self.setCursor(Qt.ArrowCursor)
self.LastPanPoint = QtCore.QPoint()
return
'''
*Handles the mouse move event
'''
def mouseMoveEvent(self, event):
if (not self.LastPanPoint.isNull()):
# Get how much we panned
delta = self.mapToScene(
self.LastPanPoint) - self.mapToScene(event.pos())
self.LastPanPoint = event.pos()
# Update the center ie. do the pan
self.SetCenter(self.GetCenter() + delta)
return
'''
* Zoom the view in and out.
'''
def wheelEvent(self, event):
# Get the position of the mouse before scaling, in scene coords
pointBeforeScale = QtCore.QPointF(self.mapToScene(event.pos()))
# Get the original screen centerpoint
# CurrentCenterPoint; //(visRect.center());
screenCenter = self.GetCenter()
# Scale the view ie. do the zoom
scaleFactor = 1.15 # How fast we zoom
if(event.delta() > 0):
# Zoom in
self.scale(scaleFactor, scaleFactor)
else:
# Zooming out
self.scale(1.0 / scaleFactor, 1.0 / scaleFactor)
# Get the position after scaling, in scene coords
pointAfterScale = QtCore.QPointF(self.mapToScene(event.pos()))
# Get the offset of how the screen moved
offset = pointBeforeScale - pointAfterScale
# Adjust to the new center for correct zooming
newCenter = screenCenter + offset
self.SetCenter(newCenter) # QPointF
return
'''
* Need to update the center so there is no jolt in the
* interaction after resizing the widget.
'''
def resizeEvent(self, event):
# Get the rectangle of the visible area in scene coords
visibleArea = self.mapToScene(self.rect()).boundingRect()
self.SetCenter(visibleArea.center())
# Call the subclass resize so the scrollbars are updated correctly
super(QtGui.QGraphicsView, self).resizeEvent(event)
return
|
StarcoderdataPython
|
3431912
|
<gh_stars>1-10
import os
import requests
import json
import threading
import copy
import common
from habitica import Habitica
from datetime import datetime, timezone, timedelta
import leancloud
from leancloud import LeanCloudError
from lc.api import LC
# 提供不背单词基本操作
class GitHub(object):
"""docstring for GitHub"""
# 单例模式加锁
_instance_lock = threading.Lock()
def __init__(self):
env_list = ['GITHUB_USERNAME', 'GITHUB_SECRET']
all_right, no_env = common.check_env(env_list)
if not all_right:
raise Exception("未设置必要环境变量 %s" % no_env)
if not hasattr(self, 'habit_name'):
self.habit_name = LC().get_habit_name_by_project_name("GitHub")
if not hasattr(self, 'habit_id'):
self.habit_id = Habitica().get_habitica_habit_id_by_name(self.habit_name)
if not hasattr(self, 'secret'):
self.secret = os.getenv("GITHUB_SECRET")
if not hasattr(self, 'username'):
self.username = os.getenv("GITHUB_USERNAME")
if not hasattr(self, 'headers'):
self.headers= {
"Accept" : "application/vnd.github.v3+json"
}
# 单例模式实现
def __new__(cls, *args, **kwargs):
if not hasattr(cls, '_instance'):
with GitHub._instance_lock:
if not hasattr(cls, '_instance'):
GitHub._instance = super().__new__(cls)
return GitHub._instance
# 读取 GitHub 当天的数据并解析获取正式数据
def read_github_data(self, github_json):
res = []
for item in github_json:
if item['type'] != 'PushEvent':
continue
data = {
'pushId' : item['payload']['push_id'],
'date' : common.get_china_now(),
'repo' : item['repo']['name'],
'size' : item['payload']['size'],
'commits' : item['payload']['commits'],
}
res.append(data)
return res
# 获取 GitHub 最近的数据(默认30条)
def get_github_data(self):
url = "https://api.github.com/users/PriateXYF/events"
github_res = requests.get(url, auth=(self.username, self.secret), headers=self.headers)
github_json = json.loads(github_res.text)
return github_json
# 获取在 leancloud 的最近50条 GitHub 数据
def get_latest_lc_data(self, limit = 50):
# now = datetime.utcnow().replace(tzinfo=timezone.utc).astimezone(timezone(timedelta(hours=0))) - timedelta(days=days)
query = leancloud.Query('GitHub')
query.descending('date')
# query.greater_than_or_equal_to('date', now)
query.limit(limit)
lc_list = None
try:
lc_list = query.find()
except LeanCloudError as e:
if e.code == 101:
return []
data_list = []
for item in lc_list:
data = {
'id' : item.id,
'pushId' : item.get('pushId'),
'date' : item.get('data'),
'repo' : item.get('repo'),
'size' : item.get('size'),
'commits' : item.get('commits'),
'oldUser' : item.get('oldUser'),
'newUser' : item.get('newUser'),
'diffUser' : item.get('diffUser'),
'info' : item.get('info'),
}
data_list.append(data)
return data_list
# 获取leancloud数据
def get_lc_data(self, page = 0):
query = leancloud.Query('GitHub')
query.descending('date')
query.limit(10)
query.skip(page * 10)
lc_list = None
try:
lc_list = query.find()
except LeanCloudError as e:
if e.code == 101:
return []
data_list = []
for item in lc_list:
data = {
'id' : item.id,
'pushId' : item.get('pushId'),
'date' : item.get('date'),
'repo' : item.get('repo'),
'size' : item.get('size'),
'commits' : item.get('commits'),
'oldUser' : item.get('oldUser'),
'newUser' : item.get('newUser'),
'diffUser' : item.get('diffUser'),
'info' : item.get('info'),
}
data_list.append(data)
return data_list
def set_lc_data(self, github_data, OldUser, NewUser, DiffUser):
now = common.get_china_now()
TodayGitHub = leancloud.Object.extend('GitHub')
today_github = TodayGitHub()
today_github.set('pushId', github_data['pushId'])
today_github.set('repo', github_data['repo'])
today_github.set('size', github_data['size'])
today_github.set('commits', github_data['commits'])
today_github.set('oldUser', OldUser.toJSON())
today_github.set('newUser', NewUser.toJSON())
today_github.set('diffUser', DiffUser.toJSON())
today_github.set('info', DiffUser.get_diff_info())
today_github.set('date', now)
today_github.save()
# 获取已完成habit与现有的差异
def get_today_lc_diff(self):
github_data_list = self.get_github_data()
github_data_list = self.read_github_data(github_data_list)
latest_lc_data_list = self.get_latest_lc_data()
res_list = []
# print(GitHub_data)
for github_data in github_data_list:
flag = False
for lc_data in latest_lc_data_list:
# 如果 lc 中已有数据 跳过
if lc_data['pushId'] == github_data['pushId']:
flag = True
break
if flag:
continue
res_list.append(github_data)
return res_list
# 每日导出数据到habitica
def habitica_daily_export(self):
today_diff_list = self.get_today_lc_diff()
hc = Habitica()
OldUser = hc.get_habitica_user()
NewUser = copy.copy(OldUser)
TrueOldUser = copy.copy(OldUser)
res = "你还没有新的GiHub提交哦 ! Coding Now!"
if len(today_diff_list) != 0:
for github_data in today_diff_list:
for temp in github_data['commits']:
NewUser = hc.do_habitica_habit_by_id(self.habit_id)
DiffUser = NewUser - OldUser
self.set_lc_data(github_data, OldUser, NewUser, DiffUser)
OldUser = NewUser
DiffUser = NewUser - TrueOldUser
common.send_push_plus("你的 GitHub 已导入 Habitica !", DiffUser.get_diff_info())
res = DiffUser.get_diff_info()
return res, len(today_diff_list)
|
StarcoderdataPython
|
5034126
|
from atgql.pyutils.did_you_mean import did_you_mean
def test_does_accept_an_empty_list():
assert did_you_mean([]) == ''
def test_handles_single_suggestion():
assert did_you_mean(['A']) == ' Did you mean "A"?'
def test_handles_two_suggestions():
assert did_you_mean(['A', 'B']) == ' Did you mean "A" or "B"?'
def test_handles_multiple_suggestions():
assert did_you_mean(['A', 'B', 'C']) == ' Did you mean "A", "B", or "C"?'
def test_limits_to_five_suggestions():
assert (
did_you_mean(['A', 'B', 'C', 'D', 'E', 'F']) == ' Did you mean "A", "B", "C", "D", or "E"?'
)
def test_adds_sub_message():
assert did_you_mean('the letter', ['A']) == ' Did you mean the letter "A"?'
|
StarcoderdataPython
|
4934171
|
#!/usr/bin/env python
import torch
class RobotModel(object):
def __init__(self, dofs, nlinks, wksp_dim, state_dim, sphere_radii = [], batch_size=1, num_traj_states=1, use_cuda=False):
self.use_cuda = torch.cuda.is_available() if use_cuda else False
self.device = torch.device('cuda') if self.use_cuda else torch.device('cpu')
self.dofs =dofs
self.nlinks =nlinks
self.wksp_dim =wksp_dim
self.state_dim =state_dim
self.sphere_radii =sphere_radii
self.batch_size = batch_size
self.num_traj_states = num_traj_states
def forward_kinematics(self, pose_config, vel_config=None):
return NotImplementedError
def get_sphere_centers(self, pose_config):
return NotImplementedError
def get_sphere_radii(self):
return self.sphere_radii
|
StarcoderdataPython
|
5190488
|
<reponame>marcottelab/NuevoTx<gh_stars>0
#!/usr/bin/env python3
import gzip
import sys
#filename_fa = 'Karsenia_koreana.prot.select.2021_07.fa'
#filename_bp = 'Karsenia_koreana.prot.select.2021_07.MODtree_ens100_2021_05.dmnd_bp_tbl6.gz'
#sp_code = 'KARKO'
filename_fa = sys.argv[1]
filename_bp = sys.argv[2]
sp_code = sys.argv[3]
q_best = dict()
t_best = dict()
gencode_best = dict()
final_list = dict()
seq_list = dict()
f_fa = open(filename_fa, 'r')
if filename_fa.endswith('.gz'):
f_fa = gzip.open(filename_fa, 'rt')
for line in f_fa:
if line.startswith('>'):
tmp_h = line.strip().lstrip('>')
tmp_h_id = tmp_h.split()[0]
seq_list[tmp_h_id] = {'h': tmp_h, 'seq': ''}
else:
seq_list[tmp_h_id]['seq'] += line.strip()
f_fa.close()
f_bp = open(filename_bp, 'rt')
if filename_bp.endswith('.gz'):
f_bp = gzip.open(filename_bp, 'rt')
for line in f_bp:
tokens = line.strip().split("\t")
q_id = tokens[0]
t_id = tokens[1]
tmp_bits = float(tokens[-1])
if q_id not in q_best:
q_best[q_id] = {'t_id': t_id, 'bits': tmp_bits}
elif q_best[q_id]['bits'] < tmp_bits:
q_best[q_id] = {'t_id': t_id, 'bits': tmp_bits}
if t_id not in t_best:
t_best[t_id] = {'q_id': q_id, 'bits': tmp_bits}
elif t_best[t_id]['bits'] < tmp_bits:
t_best[t_id] = {'q_id': q_id, 'bits': tmp_bits}
if t_id.find('-GENCODE') >= 0:
if t_id not in gencode_best:
gencode_best[t_id] = {'q_id': q_id, 'bits': tmp_bits}
elif gencode_best[t_id]['bits'] < tmp_bits:
gencode_best[t_id] = {'q_id': q_id, 'bits': tmp_bits}
f_bp.close()
def is_good_name(tmp_name):
if tmp_name.upper() == 'NOTAVAIL':
return False
if tmp_name.find('SI_DKEY') >= 0:
return False
if sum(c.isdigit() for c in tmp_name) > 4:
if tmp_name.startswith('ZNF') or tmp_name.startswith('SLC'):
return True
else:
#print("Wrong name", tmp_name)
return False
return True
for tmp_q_id, tmp_q in q_best.items():
tmp_t = tmp_q['t_id']
family_id = tmp_t.split('|')[0]
family_class = tmp_t.split('|')[-1]
gene_name = tmp_t.split('|')[2].upper()
if not is_good_name(gene_name):
gene_name = 'NotAvail'
if family_class in ['IG', 'OR']:
final_list[tmp_q_id] = '%s|%s|%s|%s|%s' % (family_id, sp_code, gene_name, tmp_q_id, family_class)
elif family_class.startswith('single_'):
final_list[tmp_q_id] = '%s|%s|%s|%s|%s' % (family_id, sp_code, gene_name, tmp_q_id, family_class)
elif family_class == 'noname':
final_list[tmp_q_id] = '%s|%s|%s|%s|%s' % (family_id, sp_code, gene_name, tmp_q_id, family_class)
sys.stderr.write('Prefilter: %d \n' % len(final_list))
name_best = dict()
name_list = dict()
for tmp_t_id, tmp_t in gencode_best.items():
tmp_name = tmp_t_id.split('|')[2].upper()
tmp_q_id = tmp_t['q_id']
tmp_bits = tmp_t['bits']
if tmp_q_id in final_list:
continue
if is_good_name(tmp_name):
if tmp_name not in name_best:
name_list[tmp_name] = []
name_best[tmp_name] = {'q_id': tmp_q_id, 't_id': tmp_t_id, 'bits': tmp_bits}
if name_best[tmp_name]['bits'] < tmp_bits:
name_best[tmp_name] = {'q_id': tmp_q_id, 't_id': tmp_t_id, 'bits': tmp_bits}
for tmp_name in name_best.keys():
tmp_q_id = name_best[tmp_name]['q_id']
tmp_t_id = name_best[tmp_name]['t_id']
family_id = tmp_t_id.split('|')[0]
family_class = tmp_t_id.split('|')[-1]
final_list[tmp_q_id] = '%s|%s|%s|%s|%s' % (family_id, sp_code, tmp_name, tmp_q_id, family_class)
sys.stderr.write('Prefilter+Named: %d \n' % len(final_list))
final_family_list = [x.split('|')[0] for x in final_list.values()]
for tmp_q_id, tmp_q in q_best.items():
tmp_t_id = tmp_q['t_id']
family_id = tmp_q['t_id'].split('|')[0]
family_class = tmp_q['t_id'].split('|')[-1]
if family_id not in final_family_list:
tmp_name = tmp_t_id.split('|')[2].upper()
if not is_good_name(tmp_name):
tmp_name = 'NotAvail'
final_list[tmp_q_id] = '%s|%s|%s|%s|%s' % (family_id, sp_code, tmp_name, tmp_q_id, family_class)
sys.stderr.write('Prefilter+Named+Draft: %d \n' % len(final_list))
last_idx = 0
print("#tx_ID\tprot_ID\tfamily_ID\tfamily_class\tgene_name\torig_ID\tExtra")
for tmp_old_h, tmp_new_h in final_list.items():
last_idx += 1
tx_id = 'AB%sT%07d' % (sp_code, last_idx)
prot_id = 'AB%sP%07d' % (sp_code, last_idx)
new_tokens = tmp_new_h.split('|')
family_id = new_tokens[0]
gene_name = new_tokens[2]
old_id = new_tokens[3]
family_class = new_tokens[4]
tmp_extra = seq_list[tmp_old_h]['h'].replace(tmp_old_h, '')
print("%s\t%s\t%s\t%s\t%s\t%s\t%s" % (tx_id, prot_id, family_id, family_class, gene_name, tmp_old_h, tmp_extra))
#tmp_seq = seq_list[tmp_old_h]['seq']
#print(">%s %s\n%s" % (tmp_new_h, tmp_extra, tmp_seq))
|
StarcoderdataPython
|
1893925
|
from ctypes import sizeof
# https://www.csie.ntu.edu.tw/~b03902082/codebrowser/pbrt/include/asm-generic/ioctl.h.html
# and
# https://stackoverflow.com/questions/20500947/what-is-the-equivalent-of-the-c-ior-function-in-python
_IOC_NRBITS = 8
_IOC_TYPEBITS = 8
_IOC_SIZEBITS = 14
_IOC_DIRBITS = 2
_IOC_NRSHIFT = 0
_IOC_TYPESHIFT = _IOC_NRSHIFT + _IOC_NRBITS
_IOC_SIZESHIFT = _IOC_TYPESHIFT + _IOC_TYPEBITS
_IOC_DIRSHIFT = _IOC_SIZESHIFT + _IOC_SIZEBITS
_IOC_NONE = 0
_IOC_WRITE = 1
_IOC_READ = 2
def _IOC(direction, type_, nr, size):
return (
(direction << _IOC_DIRSHIFT)
| (ord(type_) << _IOC_TYPESHIFT)
| (nr << _IOC_NRSHIFT)
| (size << _IOC_SIZESHIFT)
)
def _IO(g, n):
return _IOC(_IOC_NONE, g, n, 0)
def _IOR(g, n, t):
return _IOC(_IOC_READ, g, n, sizeof(t))
def _IOW(g, n, t):
return _IOC(_IOC_WRITE, g, n, sizeof(t))
def _IOWR(g, n, t):
return _IOC(_IOC_READ | _IOC_WRITE, g, n, sizeof(t))
|
StarcoderdataPython
|
4990382
|
<reponame>DaerusX/jupyterlab-data-visualization<gh_stars>1-10
# Thisdataframes=Nonethon script.
# Press Shift+F10 to execute it or replace it with your code.
# Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings.
from datavalidator.handlers.statistics_handler import StatisticsHandler
from datavalidator.handlers.convert_handler import ConvertHandler
def data_visualize(dataframes):
proto = StatisticsHandler().ProtoFromDataFrames(dataframes)
ConvertHandler().data_convert(proto)
|
StarcoderdataPython
|
9722543
|
# Copyright 2016, <NAME>, mailto:<EMAIL>
#
# Python tests originally created or extracted from other peoples work. The
# parts were too small to be protected.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
def simple_comparisons(x, y):
if 'a' <= x <= y <= 'z':
print("One")
if 'a' <= x <= 'z':
print("Two")
if 'a' <= x > 'z':
print("Three")
print("Simple comparisons:")
simple_comparisons('c', 'd')
def side_effect():
print("<side_effect>")
return 7
def side_effect_comparisons():
print("Should have side effect:")
print(1 < side_effect() < 9)
print("Should not have side effect due to short circuit:")
print(3 < 2 < side_effect() < 9)
print("Check for expected side effects only:")
side_effect_comparisons()
def function_torture_is():
a = (1, 2, 3)
for x in a:
for y in a:
for z in a:
print(x, y, z, ':', x is y is z, x is not y is not z)
function_torture_is()
print("Check if lambda can have expression chains:", end = "")
def function_lambda_with_chain():
a = (1, 2, 3)
x = lambda x : x[0] < x[1] < x[2]
print("lambda result is", x(a))
function_lambda_with_chain()
print("Check if generators can have expression chains:", end = "")
def generator_function_with_chain():
x = (1, 2, 3)
yield x[0] < x[1] < x[2]
print(list(generator_function_with_chain()))
print("Check if list contractions can have expression chains:", end = "")
def contraction_with_chain():
return [ x[0] < x[1] < x[2] for x in [(1, 2, 3) ] ]
print(contraction_with_chain())
print("Check if generator expressions can have expression chains:", end = "")
def genexpr_with_chain():
return ( x[0] < x[1] < x[2] for x in [(1, 2, 3) ] )
print(list(genexpr_with_chain()))
print("Check if class bodies can have expression chains:", end = "")
class class_with_chain:
x = (1, 2, 3)
print(x[0] < x[1] < x[2])
x = (1, 2, 3)
print(x[0] < x[1] < x[2])
class CustomOps(int):
def __lt__(self, other):
print("enter <", self, other)
return True
def __gt__(self, other):
print("enter >", self, other)
return False
print("Custom ops, to enforce chain eval order and short circuit:", end = "")
print(CustomOps(7) < CustomOps(8) > CustomOps(6))
print("Custom ops, doing short circuit:", end = "")
print(CustomOps(8) > CustomOps(7) < CustomOps(6))
def inOperatorChain():
print("In operator chains:")
print(3 in [3,4] in [[3,4]])
print(3 in [3,4] not in [[3,4]])
if 3 in [3,4] in [[3,4]]:
print("Yes")
else:
print("No")
if 3 in [3,4] not in [[3,4]]:
print("Yes")
else:
print("No")
inOperatorChain()
# Make sure the values are called and order is correct:
class A(object):
def __init__(self, name, value):
self.name = name
self.value = value
def __repr__(self):
return "<Value %s %d>" % (self.name, self.value)
def __lt__(self, other):
print("less than called for:", self, other, self.value, other.value, self.value < other.value)
if self.value < other.value:
print("good")
return 7
else:
print("bad")
return 0
a = A('a',1)
b = A('b',2)
c = A('c',0)
print(a < b < c)
print('*' * 80)
a = A('a',2)
b = A('b',1)
c = A('c',0)
print(a < b < c)
|
StarcoderdataPython
|
8057569
|
import logging
import time
import requests
import six.moves.urllib.parse as urlparse
from .. import SSS_VERSION
from .. import SSS_FORMAT
from .. import ACTION_PREFIX
from .. import client
from ..common import constants
from ..common import exceptions
from ..common import serializer
from ..common import utils
from ..i18n import _
from datetime import datetime
_logger = logging.getLogger(__name__)
def exception_handler_v10(status_code, error_content):
"""Exception handler for API v1.0 client.
This routine generates the appropriate SSS exception according to
the contents of the response body.
:param status_code: HTTP error status code
:param error_content: deserialized body of error response
"""
error_dict = None
if isinstance(error_content, dict):
error_dict = error_content.get('SSSError')
# Find real error type
bad_err_error_flag = False
if error_dict:
# If SSS key is found, it will definitely contain
# a 'message' and 'type' keys?
try:
error_type = error_dict['type']
error_message = error_dict['message']
if error_dict['detail']:
error_message += "\n" + error_dict['detail']
except Exception:
bad_err_error_flag = True
if not bad_err_error_flag:
# If corresponding exception is defined, use it.
client_exc = getattr(exceptions, '%sClient' % error_type, None)
# Otherwise look up per status-code client exception
if not client_exc:
client_exc = exceptions.HTTP_EXCEPTION_MAP.get(status_code)
if client_exc:
raise client_exc(message=error_message,
status_code=status_code)
else:
raise exceptions.SSSClientException(
status_code=status_code, message=error_message)
else:
raise exceptions.SSSClientException(status_code=status_code,
message=error_dict)
else:
message = None
if isinstance(error_content, dict):
message = error_content.get('message')
if message:
raise exceptions.SSSClientException(status_code=status_code,
message=message)
# If we end up here the exception was not a neutron error
msg = "%s-%s" % (status_code, error_content)
raise exceptions.SSSClientException(status_code=status_code,
message=msg)
class APIParamsCall(object):
"""A Decorator to add support for format and tenant overriding and filters.
"""
def __init__(self, function):
self.function = function
def __get__(self, instance, owner):
def with_params(*args, **kwargs):
_format = instance.format
if 'format' in kwargs:
instance.format = kwargs['format']
ret = self.function(instance, *args, **kwargs)
instance.format = _format
return ret
return with_params
class ClientBase(object):
"""Client for the OpenStack SSS v1.0 API.
:param string username: Username for authentication. (optional)
:param string user_id: User ID for authentication. (optional)
:param string password: Password for authentication. (optional)
:param string token: Token for authentication. (optional)
:param string tenant_name: Tenant name. (optional)
:param string tenant_id: Tenant id. (optional)
:param string auth_strategy: 'keystone' by default, 'noauth' for no
authentication against keystone. (optional)
:param string auth_url: Keystone service endpoint for authorization.
:param string service_type: Network service type to pull from the
keystone catalog (e.g. 'network') (optional)
:param string endpoint_type: Network service endpoint type to pull from the
keystone catalog (e.g. 'publicURL',
'internalURL', or 'adminURL') (optional)
:param string region_name: Name of a region to select when choosing an
endpoint from the service catalog.
:param string endpoint_url: A user-supplied endpoint URL for the neutron
service. Lazy-authentication is possible for API
service calls if endpoint is set at
instantiation.(optional)
:param integer timeout: Allows customization of the timeout for client
http requests. (optional)
:param bool insecure: SSL certificate validation. (optional)
:param bool log_credentials: Allow for logging of passwords or not.
Defaults to False. (optional)
:param string ca_cert: SSL CA bundle file to use. (optional)
:param integer retries: How many times idempotent (GET, PUT, DELETE)
requests to SSS server should be retried if
they fail (default: 0).
:param bool raise_errors: If True then exceptions caused by connection
failure are propagated to the caller.
(default: True)
:param session: Keystone client auth session to use. (optional)
:param auth: Keystone auth plugin to use. (optional)
Example::
from sssclient.v1_0 import client
sss = client.Client(username=USER,
password=<PASSWORD>,
tenant_name=TENANT_NAME,
auth_url=KEYSTONE_URL)
nets = sss.list_networks()
...
"""
# API has no way to report plurals, so we have to hard code them
# This variable should be overridden by a child class.
EXTED_PLURALS = {}
def __init__(self, **kwargs):
"""Initialize a new client for the SSS v1.0 API."""
super(ClientBase, self).__init__()
self.retries = kwargs.pop('retries', 0)
self.raise_errors = kwargs.pop('raise_errors', True)
self.httpclient = client.construct_http_client(**kwargs)
self.version = SSS_VERSION
self.format = SSS_FORMAT
self.action_prefix = ACTION_PREFIX
self.retry_interval = 1
def _handle_fault_response(self, status_code, response_body):
# Create exception with HTTP status code and message
_logger.debug("Error message: %s", response_body)
# Add deserialized error message to exception arguments
try:
des_error_body = self.deserialize(response_body, status_code)
except Exception:
# If unable to deserialized body it is probably not a
# SSS error
des_error_body = {'message': response_body}
# Raise the appropriate exception
exception_handler_v10(status_code, des_error_body)
def do_request(self, method, action, body=None, headers=None, params=None):
# Add format and tenant_id
# action += ".%s" % self.format
if ACTION_PREFIX:
action = self.action_prefix + action
# action = self.action_prefix + action
if type(params) is dict and params:
params = utils.safe_encode_dict(params)
action += '?' + urlparse.urlencode(params, doseq=1)
if body:
body = self.serialize(body)
resp, replybody = self.httpclient.do_request(
action, method, body=body,
content_type=self.content_type())
status_code = resp.status_code
if status_code in (requests.codes.ok,
requests.codes.created,
requests.codes.accepted,
requests.codes.no_content):
return self.deserialize(replybody, status_code)
else:
if not replybody:
replybody = resp.reason
self._handle_fault_response(status_code, replybody)
def get_auth_info(self):
return self.httpclient.get_auth_info()
def serialize(self, data):
"""Serializes a dictionary into either XML or JSON.
A dictionary with a single key can be passed and it can contain any
structure.
"""
if data is None:
return None
elif type(data) is dict:
return serializer.Serializer(
self.get_attr_metadata()).serialize(data, self.content_type())
else:
raise Exception(_("Unable to serialize object of type = '%s'") %
type(data))
def deserialize(self, data, status_code):
"""Deserializes an XML or JSON string into a dictionary."""
if status_code == 204:
return data
return serializer.Serializer(self.get_attr_metadata()).deserialize(
data, self.content_type())['body']
def get_attr_metadata(self):
if self.format == 'json':
return {}
old_request_format = self.format
self.format = 'json'
exts = self.list_extensions()['extensions']
self.format = old_request_format
ns = dict([(ext['alias'], ext['namespace']) for ext in exts])
self.EXTED_PLURALS.update(constants.PLURALS)
return {'plurals': self.EXTED_PLURALS,
'xmlns': constants.XML_NS_V20,
constants.EXT_NS: ns}
def content_type(self, _format=None):
"""Returns the mime-type for either 'xml' or 'json'.
Defaults to the currently set format.
"""
_format = _format or self.format
return "application/%s" % (_format)
def retry_request(self, method, action, body=None,
headers=None, params=None):
"""Call do_request with the default retry configuration.
Only idempotent requests should retry failed connection attempts.
:raises: ConnectionFailed if the maximum # of retries is exceeded
"""
max_attempts = self.retries + 1
for i in range(max_attempts):
try:
return self.do_request(method, action, body=body,
headers=headers, params=params)
except exceptions.ConnectionFailed:
# Exception has already been logged by do_request()
if i < self.retries:
_logger.debug('Retrying connection to BHEC and Colocation service')
time.sleep(self.retry_interval)
elif self.raise_errors:
raise
if self.retries:
msg = (_("Failed to connect to SSS server after %d attempts")
% max_attempts)
else:
msg = _("Failed to connect SSS server")
raise exceptions.ConnectionFailed(reason=msg)
def delete(self, action, body=None, headers=None, params=None):
return self.retry_request("DELETE", action, body=body,
headers=headers, params=params)
def get(self, action, body=None, headers=None, params=None):
return self.retry_request("GET", action, body=body,
headers=headers, params=params)
def post(self, action, body=None, headers=None, params=None):
# Do not retry POST requests to avoid the orphan objects problem.
return self.do_request("POST", action, body=body,
headers=headers, params=params)
def put(self, action, body=None, headers=None, params=None):
return self.retry_request("PUT", action, body=body,
headers=headers, params=params)
def list(self, collection, path, retrieve_all=True, **params):
if retrieve_all:
res = []
for r in self._pagination(collection, path, **params):
res.extend(r[collection])
return {collection: res}
else:
return self._pagination(collection, path, **params)
def _pagination(self, collection, path, **params):
if params.get('page_reverse', False):
linkrel = 'previous'
else:
linkrel = 'next'
next = True
while next:
res = self.get(path, params=params)
yield res
next = False
try:
for link in res['%s_links' % collection]:
if link['rel'] == linkrel:
query_str = urlparse.urlparse(link['href']).query
params = urlparse.parse_qs(query_str)
next = True
break
except KeyError:
break
class Client(ClientBase):
#
# Users
#
user_singular_path = "/users/%s" # {user_id} for Show, Update and Delete
user_list_path = "/users" # for List
user_create_path = "/users" # for Create
@APIParamsCall
def list_users(self, **_params):
"""Fetches a list of all Users of a certain contract_id in SSS."""
return self.get(self.user_list_path, params=_params)
@APIParamsCall
def show_user(self, user_id, **_params):
"""Fetche information of a certain user_id in SSS."""
return self.get(self.user_singular_path % (user_id), params=_params)
#@APIParamsCall
#def update_user(self, body=None, user_id="", *args, **_params):
# """Update information of a certain user_id in SSS."""
# return self.put(self.user_singular_path % (user_id), body=body)
@APIParamsCall
def delete_user(self, user_id, **_params):
"""Deletes a certain user in SSS."""
return self.delete(self.user_singular_path % (user_id), params=_params)
@APIParamsCall
def create_user(self, body=None, *args, **_params):
"""Creates a certain user in SSS.."""
return self.post(self.user_create_path, body=body)
#
# Tenants
#
tenant_singular_path = "/tenants/%s" # {user_id} for Show, Update and Delete
tenant_list_path = "/tenants" # for List
tenant_create_path = "/tenants" # for Create
@APIParamsCall
def list_tenants(self, **_params):
"""Fetches a list of all Tenants of a certain contract_id in SSS."""
return self.get(self.tenant_list_path, params=_params)
@APIParamsCall
def show_tenant(self, tenant_id, **_params):
"""Fetche information of a certain tenant_id in SSS."""
return self.get(self.tenant_singular_path % (tenant_id), params=_params)
@APIParamsCall
def delete_tenant(self, tenant_id, **_params):
"""Deletes a certain tenant in SSS."""
return self.delete(self.tenant_singular_path % (tenant_id), params=_params)
@APIParamsCall
def create_tenant(self, body=None, *args, **_params):
"""Creates a certain tenant in SSS.."""
return self.post(self.tenant_create_path, body=body)
#
# Roles
#
role_create_path = "/roles" # for Create
role_delete_path = "/roles/tenants/%s/users/%s" # {tenant_id}, {user_id} for Delete
@APIParamsCall
def delete_role(self, tenant_id, user_id, **params):
"""Deletes a certain role in SSS."""
return self.delete(self.role_delete_path % (tenant_id, user_id))
@APIParamsCall
def create_role(self, body=None, *args, **_params):
"""Creates a certain role in SSS.."""
return self.post(self.role_create_path, body=body)
#
# API Keypair
#
api_keypair_path = "/keys/%s" # {user_id} for Update
@APIParamsCall
def set_api_keypair(self, user_id, *args, **_params):
"""Sets a certain API keypair in SSS."""
return self.put(self.api_keypair_path % (user_id))
#
# Channel
#
channel_path = "/channels?get_contracts=%s" # {user_id} for Update
@APIParamsCall
def list_channels(self, get_contracts, *args, **_params):
"""List channels in SSS."""
return self.get(self.channel_path % (get_contracts))
#
# Contract
#
contract_show_path = "/contracts/%s" # {contract_id} for Show, Delete
contract_list_path = "/contracts?channel_id=%s" # {channel_id} for List
contract_create_path = "/contracts" # for Create
billing_show_path = "/contracts/%s/billing/%s" # for Show
with_target_contract = "%s/target_contract/%s" # for Show billing of each contract
@APIParamsCall
def list_contracts(self, channel_id, **_params):
"""Fetches a list of all contracts of a certain channel_id in SSS."""
return self.get(self.contract_list_path %(channel_id), params=_params)
@APIParamsCall
def show_contract(self, contract_id, **_params):
"""Fetches information of a certain contract_id in SSS."""
return self.get(self.contract_show_path % (contract_id), params=_params)
@APIParamsCall
def delete_contract(self, contract_id, **params):
"""Deletes a certain contract in SSS."""
return self.delete(self.contract_show_path % (contract_id))
@APIParamsCall
def create_contract(self, body=None, *args, **_params):
"""Creates a certain contract in SSS.."""
return self.post(self.contract_create_path, body=body)
@APIParamsCall
def show_billing(self, contract_id, target_month, **_params):
"""Fetches information of a certain contract_id in SSS."""
billing_action = self.billing_show_path % (contract_id, target_month)
return self.get(billing_action, params=_params)
#
# IAM Endpoints
#
iam_group_list_path = "/iam/groups"
iam_group_create_path = "/iam/groups"
iam_group_delete_path = "/iam/groups/%s"
iam_group_attach_role_path = "/iam/groups/%s/roles/%s"
iam_group_attach_user_path = "/iam/groups/%s/users/%s"
iam_group_detach_role_path = "/iam/groups/%s/roles/%s"
iam_group_detach_user_path = "/iam/groups/%s/users/%s"
iam_role_list_path = "/iam/roles"
iam_role_create_path = "/iam/roles"
iam_role_show_path = "/iam/roles/%s"
iam_role_delete_path = "/iam/roles/%s"
iam_user_list_path = "/iam/groups/%s/users"
@APIParamsCall
def iam_group_list(self, contract_id=None):
url = self.iam_group_list_path
if contract_id:
url += "?contract_id=" + contract_id
return self.get(url)
@APIParamsCall
def iam_group_create(self, iam_group_name=None, contract_id=None, description=None):
body = {"iam_group_name":iam_group_name,
"contract_id":contract_id,
"description":description}
return self.post(self.iam_group_create_path, body=body)
@APIParamsCall
def iam_group_delete(self, iam_group_id=None):
return self.delete(self.iam_group_delete_path % (iam_group_id))
@APIParamsCall
def iam_group_attach_user(self, iam_group_id=None, iam_user_id=None):
return self.put(self.iam_group_attach_user_path % (iam_group_id,iam_user_id))
@APIParamsCall
def iam_group_detach_user(self, iam_group_id=None, iam_user_id=None):
return self.delete(self.iam_group_detach_user_path % (iam_group_id,iam_user_id))
@APIParamsCall
def iam_group_attach_role(self, iam_group_id=None, iam_role_id=None):
return self.put(self.iam_group_attach_role_path % (iam_group_id,iam_role_id))
@APIParamsCall
def iam_group_detach_role(self, iam_group_id=None, iam_role_id=None):
return self.delete(self.iam_group_attach_role_path % (iam_group_id,iam_role_id))
@APIParamsCall
def iam_role_list(self, contract_id=None):
url = self.iam_role_list_path
if contract_id:
url += "?contract_id=" + contract_id
return self.get(url)
def iam_role_show(self, iam_role_id=None):
return self.get(self.iam_role_show_path % (iam_role_id))
@APIParamsCall
def iam_role_create(self, iam_role_name=None, contract_id=None,
description=None, resources=None):
body = {"iam_role_name":iam_role_name,
"contract_id":contract_id,
"description":description,
"resources":resources}
return self.post(self.iam_role_create_path, body=body)
@APIParamsCall
def iam_role_delete(self, iam_role_id=None):
return self.delete(self.iam_role_delete_path % (iam_role_id))
@APIParamsCall
def iam_user_list(self, iam_group_id=None):
return self.get(self.iam_user_list_path % (iam_group_id))
|
StarcoderdataPython
|
4945236
|
<gh_stars>1-10
#adata.pubsub
'''
Publisher Subscriber framework
'''
import wx
import traceback
from wx.lib.pubsub import pub
'''Send message alias
'''
publish = pub.sendMessage
def echo(text, color=None, lf=True, marker=None, icon=None): # no optional **kwargs
'''The Adata console print function sends a pubsub:app.echo message.
Top window's OnEcho method is suscribed to the channel.
'''
style = None
if color is not None:
style = "fore:#%s,bold" % color
pub.sendMessage('app.echo',
text = text,
style = style,
lf = lf,
marker = marker,
icon = icon
)
def excepthook (etype, value, tb) :
'''The application error handler.
Send error details to subscribed consoles.
:param etype: Exception type
:type etype: type
:param value: Exception value
:type value: Exception
:param tb: Traceback
:type tb: ``traceback``
'''
echo(" %s: %s" % (value.__class__.__name__, value), "ff5555", lf=False, marker="error", icon='red_arrow')
echo("", icon='red_back')
for x in traceback.format_tb(tb):
if "code.py" in x: continue
if "codeop.py" in x: continue
if 'File "<input>"' in x: continue
echo( x , "dddddd", lf=False, icon='dots')
where=""
if hasattr(value, "filename"): where += " %s" % value.filename
if hasattr(value, "lineno"): where += " #%s" % value.lineno
if where!="": echo("%s" % where,"888888")
class Output():
'''Standard output like class using echo.
'''
def write(self, line): echo(line)
def flush(self): pass
def subscribe(topic):
''' TODO: Create a subscription decorator for this topic
'''
app = wx.GetApp()
def subscribe_decorator(function):
''' Subscribe to the topic. Beware args
'''
pub.subscribe(function, topic)
return function
return subscribe_decorator
|
StarcoderdataPython
|
8084979
|
import sys
import json
version_list = sys.argv[1:]
if __name__ == '__main__':
version_list.sort(
key=lambda v: [int(u) for u in v.split('.')],
reverse=True
)
version_list = [f'v{version}' for version in version_list]
version_list.insert(0, 'latest')
print(json.dumps(version_list))
|
StarcoderdataPython
|
4830711
|
import os
import cv2
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import argparse
import segmentation_models_v1 as sm
sm.set_framework('tf.keras')
from unet_std import unet # standard unet architecture
from helper_function import plot_deeply_history, plot_history, save_history
from helper_function import precision, recall, f1_score
from sklearn.metrics import confusion_matrix
from helper_function import plot_history_for_callback, save_history_for_callback
def str2bool(value):
return value.lower() == 'true'
def generate_folder(folder_name):
if not os.path.exists(folder_name):
os.system('mkdir -p {}'.format(folder_name))
parser = argparse.ArgumentParser()
parser.add_argument("--docker", type=str2bool, default = True)
parser.add_argument("--gpu", type=str, default = '0')
parser.add_argument("--epoch", type=int, default = 2)
parser.add_argument("--batch", type=int, default = 2)
parser.add_argument("--dataset", type=str, default = 'live_dead')
parser.add_argument("--lr", type=float, default = 1e-3)
parser.add_argument("--train", type=int, default = None)
parser.add_argument("--loss", type=str, default = 'focal+dice')
args = parser.parse_args()
print(args)
model_name = 'unet-set-{}-lr-{}-train-{}-loss-{}-bt-{}-ep-{}'.format(args.dataset, args.lr,\
args.train, args.loss, args.batch, args.epoch)
print(model_name)
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
if args.dataset == 'live_dead':
val_dim = 832
test_dim = val_dim
train_image_set = 'train_images2'
val_image_set = 'val_images2'
test_image_set = 'test_images2'
DATA_DIR = '/data/datasets/{}'.format(args.dataset) if args.docker else './data/{}'.format(args.dataset)
x_train_dir = os.path.join(DATA_DIR, train_image_set)
y_train_dir = os.path.join(DATA_DIR, 'train_masks')
x_valid_dir = os.path.join(DATA_DIR, val_image_set)
y_valid_dir = os.path.join(DATA_DIR, 'val_masks')
x_test_dir = os.path.join(DATA_DIR, test_image_set)
y_test_dir = os.path.join(DATA_DIR, 'test_masks')
print(x_train_dir); print(x_valid_dir); print(x_test_dir)
# classes for data loading
class Dataset:
"""
Args:
images_dir (str): path to images folder
masks_dir (str): path to segmentation masks folder
class_values (list): values of classes to extract from segmentation mask
"""
CLASSES = ['bk', 'live', 'inter', 'dead']
def __init__(
self,
images_dir,
masks_dir,
classes=None,
nb_data=None,
augmentation=None,
preprocessing=None,
):
id_list = os.listdir(images_dir)
if nb_data ==None:
self.ids = id_list
else:
self.ids = id_list[:int(min(nb_data,len(id_list)))]
self.images_fps = [os.path.join(images_dir, image_id) for image_id in self.ids]
self.masks_fps = [os.path.join(masks_dir, image_id) for image_id in self.ids]
print(len(self.images_fps)); print(len(self.masks_fps))
self.class_values = [self.CLASSES.index(cls.lower()) for cls in classes]
self.augmentation = augmentation
self.preprocessing = preprocessing
def __getitem__(self, i):
# read data
image = cv2.imread(self.images_fps[i])
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
mask = cv2.imread(self.masks_fps[i], 0)
masks = [(mask == v) for v in self.class_values]
mask = np.stack(masks, axis=-1).astype('float')
# add background if mask is not binary
if mask.shape[-1] != 1:
background = 1 - mask.sum(axis=-1, keepdims=True)
mask = np.concatenate((mask, background), axis=-1)
return image, mask
def __len__(self):
return len(self.ids)
class Dataloder(tf.keras.utils.Sequence):
"""Load data from dataset and form batches
Args:
dataset: instance of Dataset class for image loading and preprocessing.
batch_size: Integet number of images in batch.
shuffle: Boolean, if `True` shuffle image indexes each epoch.
"""
def __init__(self, dataset, batch_size=1, shuffle=False):
self.dataset = dataset
self.batch_size = batch_size
self.shuffle = shuffle
self.indexes = np.arange(len(dataset))
self.on_epoch_end()
def __getitem__(self, i):
# collect batch data
start = i * self.batch_size
stop = (i + 1) * self.batch_size
data = []
for j in range(start, stop):
data.append(self.dataset[j])
# transpose list of lists
batch = [np.stack(samples, axis=0) for samples in zip(*data)]
return (batch[0], batch[1])
def __len__(self):
"""Denotes the number of batches per epoch"""
return len(self.indexes) // self.batch_size
def on_epoch_end(self):
"""Callback function to shuffle indexes each epoch"""
if self.shuffle:
self.indexes = np.random.permutation(self.indexes)
BATCH_SIZE = args.batch
CLASSES = ['live', 'inter', 'dead']
LR = args.lr
EPOCHS = args.epoch
n_classes = (len(CLASSES) + 1)
#create model
model = unet(classes=n_classes, activation='softmax')
# define optomizer
optim = tf.keras.optimizers.Adam(LR)
class_weights = [1,1,1,1]
# Segmentation models losses can be combined together by '+' and scaled by integer or float factor
# set class weights for dice_loss (car: 1.; pedestrian: 2.; background: 0.5;)
if args.loss =='focal+dice':
dice_loss = sm.losses.DiceLoss(class_weights=np.array(class_weights))
focal_loss = sm.losses.CategoricalFocalLoss()
total_loss = dice_loss + focal_loss
elif args.loss =='dice':
total_loss = sm.losses.DiceLoss(class_weights=np.array(class_weights))
elif args.loss == 'focal':
total_loss = sm.losses.CategoricalFocalLoss()
elif args.loss == 'ce':
total_loss = sm.losses.CategoricalCELoss()
elif args.loss == 'wce':
# weighted wce (live, injured, dead, bk)
#ratios: 0.01 , 0.056, 0.004, 0.929
class_weights = [100., 17.86, 250., 1.08]
total_loss = sm.losses.CategoricalCELoss(class_weights=np.array(class_weights))
metrics = [sm.metrics.IOUScore(threshold=0.5), sm.metrics.FScore(threshold=0.5)]
# compile keras model with defined optimozer, loss and metrics
model.compile(optimizer=optim, loss=total_loss, metrics = metrics)
# Dataset for train images
train_dataset = Dataset(
x_train_dir,
y_train_dir,
classes=CLASSES,
nb_data=args.train
)
# Dataset for validation images
valid_dataset = Dataset(
x_valid_dir,
y_valid_dir,
classes=CLASSES
)
train_dataloader = Dataloder(train_dataset, batch_size=BATCH_SIZE, shuffle=True)
valid_dataloader = Dataloder(valid_dataset, batch_size=1, shuffle=False)
print(train_dataloader[0][0].shape)
# check shapes for errors
assert train_dataloader[0][0].shape == (BATCH_SIZE, val_dim, val_dim, 3)
assert train_dataloader[0][1].shape == (BATCH_SIZE, val_dim, val_dim, n_classes)
model_folder = '/data/natcom_models/std_unet/{}'.format(model_name) if args.docker else './models/natcom_models/std_unet/{}'.format(model_name)
generate_folder(model_folder)
def concat_tile(im_list_2d):
return cv2.vconcat([cv2.hconcat(im_list_h) for im_list_h in im_list_2d])
def save_images(file_name, vols):
shp = vols.shape
ls, lx, ly, lc = shp
sx, sy = int(lx/256), int(ly/256)
vols = vols[:,::sx,::sy,:]
slice_list, rows = [], []
for si in range(vols.shape[0]):
slice = vols[si,:,:,:]
rows.append(slice)
if si%4 == 3 and not si == vols.shape[0]-1:
slice_list.append(rows)
rows = []
save_img = concat_tile(slice_list)
cv2.imwrite(file_name, save_img)
def map2rgb(maps):
shp = maps.shape
rgb_maps = np.zeros((shp[0], shp[1], shp[2], 3), dtype=np.uint8)
rgb_maps[:,:,:,0] = np.uint8((maps==0)*255)
rgb_maps[:,:,:,1] = np.uint8((maps==1)*255)
rgb_maps[:,:,:,2] = np.uint8((maps==2)*255)
return rgb_maps
class HistoryPrintCallback(tf.keras.callbacks.Callback):
def __init__(self):
super(HistoryPrintCallback, self).__init__()
self.history = {}
def on_epoch_end(self, epoch, logs=None):
if logs:
for key in logs.keys():
if epoch == 0:
self.history[key] = []
self.history[key].append(logs[key])
if epoch%5 == 0:
plot_history_for_callback(model_folder+'/train_history.png', self.history)
save_history_for_callback(model_folder, self.history)
gt_vols, pr_vols = [],[]
for i in range(0, len(valid_dataset),int(len(valid_dataset)/36)):
gt_vols.append(valid_dataloader[i][1])
pr_vols.append(self.model.predict(valid_dataloader[i]))
gt_vols = np.concatenate(gt_vols, axis = 0); gt_map = map2rgb(np.argmax(gt_vols,axis =-1))
pr_vols = np.concatenate(pr_vols, axis = 0); pr_map = map2rgb(np.argmax(pr_vols,axis =-1))
if epoch == 0:
save_images(model_folder+'/ground_truth.png'.format(epoch), gt_map)
save_images(model_folder+'/pr-{}.png'.format(epoch), pr_map)
# define callbacks for learning rate scheduling and best checkpoints saving
callbacks = [
tf.keras.callbacks.ModelCheckpoint(model_folder+'/best_model-{epoch:03d}.h5', save_weights_only=True, save_best_only=True, mode='min'),
HistoryPrintCallback(),
]
# train model
history = model.fit_generator(
train_dataloader,
steps_per_epoch=len(train_dataloader),
epochs=EPOCHS,
callbacks=callbacks,
validation_data=valid_dataloader,
validation_steps=len(valid_dataloader),
)
|
StarcoderdataPython
|
1674895
|
<reponame>fxjung/obsidian_tools
import typer
import logging
import asyncio
from pathlib import Path
from obsidian_tools.watch import main
formatter = logging.Formatter("%(asctime)s:%(levelname)s:%(name)s: %(message)s")
ch = logging.StreamHandler()
ch.setFormatter(formatter)
logging.getLogger("").setLevel(logging.WARNING)
logging.getLogger("").addHandler(ch)
log = logging.getLogger(__name__)
app = typer.Typer()
@app.command()
def watch(
target: Path = typer.Argument(
...,
exists=True,
file_okay=True,
dir_okay=False,
writable=True,
readable=True,
resolve_path=True,
)
):
asyncio.run(main(target))
@app.callback(invoke_without_command=True)
def main_cli(ctx: typer.Context, debug: bool = typer.Option(False)):
if debug:
logging.getLogger("").setLevel(logging.DEBUG)
if ctx.invoked_subcommand is not None:
return
print("Welcome to obsidian-tools")
app()
|
StarcoderdataPython
|
11335418
|
<reponame>Izocel/PythonBookHero
import os
import sys
from typing import *
from mysql.connector.connection import *
from getpass import getpass
import hashlib
from datetime import datetime
# Variables globales
BD_CONNECTION = {}
BD_CONFIG = {}
CURSEUR = {}
BASETABLE = ''
def get_config(key:str = '') -> Any:
global BD_CONFIG
if(key == ''):
return BD_CONFIG
else:
return BD_CONFIG[key]
def disconnect_from_mysql() -> bool:
global BD_CONNECTION
global BD_CONFIG
global CURSEUR
CURSEUR.reset()
CURSEUR.close()
CURSEUR = {}
BD_CONNECTION.disconnect()
BD_CONNECTION = {}
BD_CONFIG = {}
print("\n La session SQL est terminée")
return True
####-####-####-#### MySQL App Connection ####-####-####-####
def connect_to_mysql(config_input:dict = {}, autocommit:bool = False, max_retry:int = 5) -> CursorBase:
global BD_CONNECTION
global BD_CONFIG
global CURSEUR
max_retry = 1 if(max_retry == 0) else max_retry
max_retry = abs(max_retry)
max_retry = min(max_retry, 15)
for x in range(max_retry):
if( type(BD_CONNECTION) is not MySQLConnection ):
BD_CONFIG = {
'host' : config_input['host'],
'user' : config_input['user'],
'password' : config_input['password'],
'database' : config_input['database'],
'autocommit': autocommit
}
BD_CONNECTION = MySQLConnection()
BD_CONNECTION.connect(**BD_CONFIG)
config_warning(BD_CONNECTION)
BD_CONFIG['database'] = 'python_book_hero'
CURSEUR = BD_CONNECTION.cursor()
print("\n La session SQL est établie")
return CURSEUR
def config_warning( connection ) -> None:
autocommit = connection.autocommit
if( autocommit == False):
print("\n\n'Autocommit' est: " + str(autocommit) )
print("Les transactions ne seront pas automatiquement soumissent au LGBD...")
print("Des confirmations vous seront demandées lors d'insertions/suppressions/modifications.\n")
else:
print("\n\n!!!!! Attention 'Autocommit' est: " + str(autocommit) + " !!!!!")
print("Les transactions seront automatiquement soumissent au LGBD...\n")
def show_databases_querry() -> str:
querry = "SHOW DATABASES;"
return querry
def show_tables_querry(database) -> str:
querry = "SHOW TABLES FROM " + database
return querry
def select_data_querry(table:str, fields:str = '*', where:str = '', order:str = '', group:str = '', limit:str = '') -> str:
querry = "SELECT "+ fields + " FROM " + table
if(where != ''):
querry += " " + where
if(order != ''):
querry += " " + order
if(group != ''):
querry += " " + group
if(limit != ''):
querry += " " + limit
return querry
def select_colum_name_type_querry(table, database) -> str:
querry = "SELECT COLUMN_NAME,COLUMN_TYPE FROM INFORMATION_SCHEMA.COLUMNS "
querry +="WHERE TABLE_SCHEMA='" + database +"' AND TABLE_NAME='" + table + "';"
return querry
def CURSEUR_name_and_type(CURSEUR, table, database) -> Dict[str,Any]:
tpye_and_colum_querry = select_colum_name_type_querry(table, database)
CURSEUR.execute(tpye_and_colum_querry)
table_resultat = []
table_type = []
table_col = []
for(col, col_type) in CURSEUR:
col_type_format = str(col_type).replace("'", '').replace("b", '')
table_resultat.append( (col, col_type_format) )
table_type.append(col_type_format)
table_col.append(col)
return {'querry': tpye_and_colum_querry, 'results': table_resultat, 'types': table_type, 'names': table_col}
def insertion_querry(table:str, inserts = [[]], champs = []) -> Dict[str,Any]:
querry = "INSERT INTO " + table
champsConcat = ' ('
for champ in champs:
champsConcat += champ
if(champ == champs[len(champs)-1]):
champsConcat += ')'
continue
champsConcat += ", "
typeValeurString = '('
x = 0
valeursSql = []
for valeurs in inserts:
tempValeur = []
for valeur in valeurs:
tempValeur.append(valeur)
if( x == 0):
typeValeurString += "%s"
if(valeur == valeurs[len(valeurs)-1]):
typeValeurString += ')'
continue
typeValeurString += ", "
valeursSql.append(tuple(tempValeur))
x += 1
if(champ):
querry += champsConcat
if(valeur):
querry += " VALUES " + typeValeurString
return {
'sql': querry,
'val' : valeursSql
}
def update_querry(table :str, updates :list[list], champs :list, conds_list :list[list]) -> Dict[str,Any]:
querry = "UPDATE " + table + " SET "
champsConcat = ''
for champ in champs:
champsConcat += champ
if(champ == champs[len(champs)-1]):
champsConcat += " = %s "
continue
champsConcat += " = %s, "
valeursSql = []
for valeurs in updates:
tempValeur = []
for valeur in valeurs:
tempValeur.append(valeur)
condString = ''
if(len(conds_list) == 2):
for valeurs in conds_list['cond_valeurs']:
for valeur in valeurs:
tempValeur.append(valeur)
for champ in conds_list['cond_champs']:
condString += champ + " = %s AND "
valeursSql.append(tuple(tempValeur))
if(champ):
querry += champsConcat
if(condString != ''):
querry += "WHERE " + condString[0:len(condString)-5]
return {
'sql': querry,
'val' : valeursSql
}
def convert_string_to_sql_type(input_str: str, sql_type_str: str) -> str:
if( sql_type_str.startswith("char") ):
return input_str
if( sql_type_str.startswith("varchar") ):
return input_str
if( sql_type_str.startswith("int") ):
return int(input_str)
if( sql_type_str.startswith("float") ):
return float(input_str)
if( sql_type_str.startswith("double") ):
return round(float(input_str),2)
if( sql_type_str.startswith("year") ):
return int(input_str)
if( sql_type_str.startswith("date") ):
return input_str
if( sql_type_str.startswith("time") ):
return input_str
if( sql_type_str.startswith("time", 4) ):
return input_str
if( sql_type_str.startswith("stamp", 4) ):
return input_str
pass
def dataTypeStringNotation(value: Any) -> str:
percent_char = chr(37)
if(type(value) is str):
return percent_char + 's'
if(type(value) == int):
return percent_char + 'i'
if(type(value) is float):
return percent_char + 'f'
return ''
def fetch_CURSEUR(CURSEUR, print_me = False) -> List[List[Any]]:
if(print_me == True):
print("\n")
table = []
for results_row in CURSEUR:
table_row = []
for results in results_row:
table_row.append(results)
if(print_me == True):
print(table_row)
table.append(table_row)
CURSEUR.reset()
return table
def hash_sha2_data(datalist:list[str] = [], hash_length:int = 256) -> List[str]:
hashes = []
if hash_length == 224:
for clear_str in datalist:
string = clear_str
encoded = string.encode()
result = hashlib.sha224(encoded)
hashes.append(result.hexdigest())
elif hash_length == 384:
for clear_str in datalist:
string = clear_str
encoded = string.encode()
result = hashlib.sha384(encoded)
hashes.append(result.hexdigest())
elif hash_length == 512:
for clear_str in datalist:
string = clear_str
encoded = string.encode()
result = hashlib.sha512(encoded)
hashes.append(result.hexdigest())
else: # sha256 if hash_length not supported
for clear_str in datalist:
string = clear_str
encoded = string.encode()
result = hashlib.sha256(encoded)
hashes.append(result.hexdigest())
return hashes
|
StarcoderdataPython
|
11301832
|
from random import randint
def get_random_hex_color():
"""Generates and returns a random hex color."""
color = '#' + ''.join(['{:02X}'.format(randint(0, 255)) for _ in range(3)])
return color
|
StarcoderdataPython
|
1756828
|
<gh_stars>1-10
import numpy as np
import cv2
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from camera_calibration import calib, undistort
from threshold import get_combined_gradients, get_combined_hls, combine_grad_hls
from line import Line, get_perspective_transform, get_lane_lines_img, illustrate_driving_lane, illustrate_info_panel, illustrate_driving_lane_with_topdownview
from moviepy.editor import VideoFileClip
import logging
logging.basicConfig(filename='test.log', level=logging.DEBUG,
format='%(asctime)s:%(levelname)s:%(message)s')
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
# Select desired input name/type #
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
#input_type = "image"
#input_type = 'image'
input_type = 'video'
#input_type = 'frame_by_frame'
#input_name = 'test_images/calibration1.jpg'
input_name = 'project_video.mp4'
#input_name = 'challenge_video.mp4'
#input_name = 'harder_challenge_video.mp4'
# If input_type is `image`, select whether you'd like to save intermediate images or not.
save_img = True
left_line = Line()
right_line = Line()
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
# Tune Parameters for different inputs #
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
th_sobelx, th_sobely, th_mag, th_dir = (35, 100), (30, 255), (30, 255), (0.7, 1.3)
th_h, th_l, th_s = (10, 100), (0, 60), (85, 255)
# camera matrix & distortion coefficient
mtx, dist = calib()
def pipeline(frame):
# Correcting for Distortion
undist_img = undistort(frame, mtx, dist)
# resize video
undist_img = cv2.resize(undist_img, None, fx=1 / 2, fy=1 / 2, interpolation=cv2.INTER_AREA)
rows, cols = undist_img.shape[:2]
combined_gradient = get_combined_gradients(undist_img, th_sobelx, th_sobely, th_mag, th_dir)
combined_hls = get_combined_hls(undist_img, th_h, th_l, th_s)
combined_result = combine_grad_hls(combined_gradient, combined_hls)
c_rows, c_cols = combined_result.shape[:2]
s_LTop2, s_RTop2 = [c_cols / 2 - 24, 5], [c_cols / 2 + 24, 5]
s_LBot2, s_RBot2 = [110, c_rows], [c_cols - 110, c_rows]
src = np.float32([s_LBot2, s_LTop2, s_RTop2, s_RBot2])
dst = np.float32([(170, 720), (170, 0), (550, 0), (550, 720)])
warp_img, M, Minv = get_perspective_transform(combined_result, src, dst, (720, 720))
searching_img = get_lane_lines_img(warp_img, left_line, right_line)
w_comb_result, w_color_result = illustrate_driving_lane(searching_img, left_line, right_line)
# Drawing the lines back down onto the road
color_result = cv2.warpPerspective(w_color_result, Minv, (c_cols, c_rows))
lane_color = np.zeros_like(undist_img)
lane_color[220:rows - 12, 0:cols] = color_result
# Combine the result with the original image
result = cv2.addWeighted(undist_img, 1, lane_color, 0.3, 0)
info_panel, birdeye_view_panel = np.zeros_like(result), np.zeros_like(result)
info_panel[5:110, 5:325] = (255, 255, 255)
birdeye_view_panel[5:110, cols-111:cols-6] = (255, 255, 255)
info_panel = cv2.addWeighted(result, 1, info_panel, 0.2, 0)
birdeye_view_panel = cv2.addWeighted(info_panel, 1, birdeye_view_panel, 0.2, 0)
road_map = illustrate_driving_lane_with_topdownview(w_color_result, left_line, right_line)
birdeye_view_panel[10:105, cols-106:cols-11] = road_map
birdeye_view_panel = illustrate_info_panel(birdeye_view_panel, left_line, right_line)
return birdeye_view_panel
cap = cv2.VideoCapture(input_name)
if (cap.isOpened()== False):
print("Error opening video stream or file")
while(cap.isOpened()):
ret, frame = cap.read()
frame = pipeline(frame)
if ret == True:
cv2.imshow('Frame',frame)
if cv2.waitKey(25) & 0xFF == ord('q'):
break
else:
break
cap.release()
cv2.destroyAllWindows()
file_video = input_name
if input_type == 'video':
white_output = "./output_videos/video_out_1.mp4"
frame = VideoFileClip(file_video)
white_clip = frame.fl_image(pipeline)
#white_clip.write_videofile(white_output, audio=False)
logging.debug("This file is ran and outputs are generated",file_video)
|
StarcoderdataPython
|
5100986
|
import logging
class NullHandler(logging.Handler):
def emit(self, record):
pass
log = logging.getLogger('coapUri')
log.setLevel(logging.ERROR)
log.addHandler(NullHandler())
import re
from . import coapUtils as u
from . import coapOption as o
from . import coapException as e
from . import coapDefines as d
def uri2options(uri):
'''
\brief Converts a coap URI into a list of CoAP options.
Examples:
calling this function with uri="coap://[aaaa::1]:1234/test1/test2"
returns
(
'fc00:e968:6179::de52:7100',
1234,
(
[Uri-Path('test1'),
Uri-Path('test2')],
),
)
Calling this function with uri="http://[aaaa::1]/test1/test2"
raises a coapMalformattedUri.
\param[in] uri A string representing a CoAP URI.
\raises coapMalformattedUri When the string passed in the uri parameter
is not a valid CoAP URI.
\return A tuple with the following elements;
- at index 0, the destination IP address or host name (a string).
- at index 1, the UDP port, possibly default CoAP port if none is
explicitly specified..
- at index 2, a tuple of CoAP options, i.e. (sub-)instances of the
#coapOption objects.
'''
options = []
log.debug('uri : {0}'.format(uri))
# scheme
if not uri.startswith(d.COAP_SCHEME):
raise e.coapMalformattedUri('does not start with {0}'.format(d.COAP_SCHEME))
# remove scheme
uri = uri.split(d.COAP_SCHEME,1)[1]
# host and port
host = None
port = None
hostPort = uri.split('/')[0]
if (not host) or (not port):
# try format [fc00:e968:6179::de52:7100]:1244
m = re.match('\[([0-9a-fA-F:]+)\]:([0-9]+)',hostPort)
if m:
host = m.group(1)
port = int(m.group(2))
if (not host) or (not port):
# try format [aaaa::1]
m = re.match('\[([0-9a-fA-F:]+)\]',hostPort)
if m:
host = m.group(1)
port = d.DEFAULT_UDP_PORT
if (not host) or (not port):
# try formats:
# 172.16.58.3:1234
m = re.match('([0-9.]+):([0-9]+)',hostPort)
if m:
host = '::ffff:{0}'.format(m.group(1))
port = int(m.group(2))
if (not host) or (not port):
# try formats:
# www.example.com:1234
m = re.match('([0-9a-zA.\-\_]+):([0-9]+)',hostPort)
if m:
host = m.group(1)
port = int(m.group(2))
if (not host) or (not port):
# try formats:
# 172.16.58.3
m = re.match('([0-9.]+)',hostPort)
if m:
host = '::ffff:{0}'.format(m.group(1))
port = d.DEFAULT_UDP_PORT
if (not host) or (not port):
# try formats:
# www.example.com
m = re.match('([0-9a-zA-Z.\-\_]+)', hostPort)
if m:
host = m.group(1)
port = d.DEFAULT_UDP_PORT
if (not host) or (not port):
raise e.coapMalformattedUri('invalid host and port {0}'.format(hostPort))
# log
log.debug('host : {0}'.format(host))
log.debug('port : {0}'.format(port))
# remove hostPort
uri = uri.split(hostPort,1)[1]
# Uri-path
paths = [p for p in uri.split('?')[0].split('/') if p]
log.debug('paths : {0}'.format(paths))
for p in paths:
options += [o.UriPath(path=p)]
# Uri-query
if len(uri.split('?'))>1:
queries = [q for q in uri.split('?')[1].split('&') if q]
log.debug('queries : {0}'.format(queries))
raise NotImplementedError()
host=host.lower()
host=u.trimAddress(host)
return (host,port,options)
def options2path(options):
returnVal = []
for option in options:
if isinstance(option,o.UriPath):
returnVal += [option.path]
returnVal = '/'.join(returnVal)
return returnVal
|
StarcoderdataPython
|
11321702
|
<reponame>sa-y-an/retro
from django.http.response import HttpResponse
from django.shortcuts import render
from .classifier import nn_predictions
def home(request) :
return render(request, 'home/home.html')
def about(request) :
return render(request, 'home/about.html')
# modalities
def eda(request):
if request.method == 'POST' :
path = request.FILES['myfile'] # this is my file
ac_class, confidence_score, secs = nn_predictions(path,'EDA')
print(path)
print('here .....................')
print(ac_class, confidence_score)
return render(request, 'home/eda.html',{ "class" : ac_class, "score" : confidence_score , "path" :path , "time" : secs } )
else :
return render(request, 'home/eda.html')
def emg(request) :
if request.method == 'POST' :
path = request.FILES['myfile'] # this is my file
ac_class, confidence_score, secs = nn_predictions(path,'EMG')
print(path)
print('here .....................')
print(ac_class, confidence_score)
return render(request, 'home/emg.html',{ "class" : ac_class, "score" : confidence_score, "path" :path , "time" : secs } )
else :
return render(request, 'home/emg.html')
def ecg(request) :
if request.method == 'POST' :
path = request.FILES['myfile'] # this is my file
ac_class, confidence_score, secs = nn_predictions(path,'ECG')
print(path)
print('here .....................')
print(ac_class, confidence_score)
return render(request, 'home/ecg.html',{ "class" : ac_class, "score" : confidence_score , "path" :path, "time" : secs } )
else :
return render(request, 'home/ecg.html')
def resp(request) :
if request.method == 'POST' :
path = request.FILES['myfile'] # this is my file
ac_class, confidence_score, secs = nn_predictions(path,'Resp')
print(path)
print('here .....................')
print(ac_class, confidence_score)
return render(request, 'home/resp.html',{ "class" : ac_class, "score" : confidence_score , "path" :path , "time" : secs } )
else :
return render(request, 'home/resp.html')
def temp(request) :
if request.method == 'POST' :
path = request.FILES['myfile'] # this is my file
ac_class, confidence_score, secs = nn_predictions(path,'Temp')
print(path)
print('here .....................')
print(ac_class, confidence_score)
return render(request, 'home/temp.html',{ "class" : ac_class, "score" : confidence_score, "path" :path, "time" : secs } )
else :
return render(request, 'home/temp.html')
|
StarcoderdataPython
|
3254750
|
<reponame>Rogdham/bigxml<filename>src/bigxml/handle_mgr.py
from bigxml.handler_creator import create_handler
from bigxml.utils import last_item_or_none
class HandleMgr:
_handle = None
def iter_from(self, *handlers):
if not self._handle:
raise RuntimeError("No handle to use")
handler = create_handler(*handlers)
return self._handle(handler) # pylint: disable=not-callable
def return_from(self, *handlers):
return last_item_or_none(self.iter_from(*handlers))
|
StarcoderdataPython
|
3210926
|
<filename>reconstrcut_tldrQ_highlights.py
import json
highlited_file = '/disk1/sajad/datasets/reddit/tldr-9+/highlights-test/'
with open('/disk1/sajad/datasets/reddit/tldr-9+/test.json') as fR:
for l in fR:
ent = json.loads(l.strip())
src = ent['document'].replace('</s><s> ', '')
summary = ent['summary']
to_be_written = f'{src.strip()}\n@highlights\n{summary.strip()}'
with open(highlited_file + ent['id'].replace('.json', ''), mode='w') as fW:
fW.write(to_be_written)
|
StarcoderdataPython
|
1736738
|
# Copyright(C) 2011,2012,2013 by Abe developers.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Affero General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see
# <http://www.gnu.org/licenses/gpl.html>.
def looks_like_json(val):
return val[:1] in ('"', '[', '{') or val in ('true', 'false', 'null')
def parse_argv(argv, conf={}, config_name='config', strict=False):
arg_dict = conf.copy()
args = lambda var: arg_dict[var]
args.func_dict = arg_dict
i = 0
while i < len(argv):
arg = argv[i]
if arg == '--':
i += 1
break
if arg[:2] != '--':
break
# Strip leading "--" to form a config variable.
# --var=val and --var val are the same. --var+=val is different.
split = arg[2:].split('=', 1)
adding = False
if len(split) == 1:
var = split[0]
if i + 1 < len(argv) and argv[i + 1][:2] != '--':
i += 1
val = argv[i]
else:
val = True
else:
var, val = split
if var[-1:] == '+':
var = var[:-1]
adding = True
if val is not True and looks_like_json(val):
val = parse_json(val)
var = var.replace('-', '_')
if var == config_name:
_include(set(), val, arg_dict, config_name, strict)
elif var not in conf:
break
elif adding:
add(arg_dict, var, val)
else:
arg_dict[var] = val
i += 1
return args, argv[i:]
def include(filename, conf={}, config_name='config', strict=False):
_include(set(), filename, conf, config_name, strict)
return conf
class _Reader:
__slots__ = ['fp', 'lineno', 'line']
def __init__(rdr, fp):
rdr.fp = fp
rdr.lineno = 1
rdr.line = rdr.fp.read(1)
def eof(rdr):
return rdr.line == ''
def getc(rdr):
if rdr.eof():
return ''
ret = rdr.line[-1]
if ret == '\n':
rdr.lineno += 1
rdr.line = ''
c = rdr.fp.read(1)
if c == '':
rdr.line = ''
rdr.line += c
return ret
def peek(rdr):
if rdr.eof():
return ''
return rdr.line[-1]
def _readline(rdr):
ret = rdr.fp.readline()
rdr.line += ret
return ret
def readline(rdr):
ret = rdr.peek() + rdr._readline()
rdr.getc() # Consume the newline if not at EOF.
return ret
def get_error_context(rdr, e):
e.lineno = rdr.lineno
if not rdr.eof():
e.offset = len(rdr.line)
if rdr.peek() != '\n':
rdr._readline()
e.text = rdr.line
def _include(seen, filename, conf, config_name, strict):
if filename in seen:
raise Exception('Config file recursion')
with open(filename) as fp:
rdr = _Reader(fp)
try:
entries = read(rdr)
except SyntaxError, e:
if e.filename is None:
e.filename = filename
if e.lineno is None:
rdr.get_error_context(e)
raise
for var, val, additive in entries:
var = var.replace('-', '_')
if var == config_name:
import os
_include(seen | set(filename),
os.path.join(os.path.dirname(filename), val), conf,
config_name, strict)
elif var not in conf:
if strict:
raise ValueError(
"Unknown parameter `%s' in %s" % (var, filename))
elif additive and conf[var] is not None:
add(conf, var, val)
else:
conf[var] = val
return
def read(rdr):
"""
Read name-value pairs from file and return the results as a list
of triples: (name, value, additive) where "additive" is true if
"+=" occurred between name and value.
"NAME=VALUE" and "NAME VALUE" are equivalent. Whitespace around
names and values is ignored, as are lines starting with '#' and
empty lines. Values may be JSON strings, arrays, or objects. A
value that does not start with '"' or '{' or '[' and is not a
boolean is read as a one-line string. A line with just "NAME"
stores True as the value.
"""
entries = []
def store(name, value, additive):
entries.append((name, value, additive))
def skipspace(rdr):
while rdr.peek() in (' ', '\t', '\r'):
rdr.getc()
while True:
skipspace(rdr)
if rdr.eof():
break
if rdr.peek() == '\n':
rdr.getc()
continue
if rdr.peek() == '#':
rdr.readline()
continue
name = ''
while rdr.peek() not in (' ', '\t', '\r', '\n', '=', '+', ''):
name += rdr.getc()
if rdr.peek() not in ('=', '+'):
skipspace(rdr)
if rdr.peek() in ('\n', ''):
store(name, True, False)
continue
additive = False
if rdr.peek() in ('=', '+'):
if rdr.peek() == '+':
rdr.getc()
if rdr.peek() != '=':
raise SyntaxError("'+' without '='")
additive = True
rdr.getc()
skipspace(rdr)
if rdr.peek() in ('"', '[', '{'):
js = scan_json(rdr)
try:
store(name, parse_json(js), additive)
except ValueError, e:
raise wrap_json_error(rdr, js, e)
continue
# Unquoted, one-line string.
value = ''
while rdr.peek() not in ('\n', ''):
value += rdr.getc()
value = value.strip()
# Booleans and null.
if value == 'true':
value = True
elif value == 'false':
value = False
elif value == 'null':
value = None
store(name, value, additive)
return entries
def add(conf, var, val):
if var not in conf:
conf[var] = val
return
if isinstance(val, dict) and isinstance(conf[var], dict):
conf[var].update(val)
return
if not isinstance(conf[var], list):
conf[var] = [conf[var]]
if isinstance(val, list):
conf[var] += val
else:
conf[var].append(val)
# Scan to end of JSON object. Grrr, why can't json.py do this without
# reading all of fp?
def _scan_json_string(rdr):
ret = rdr.getc() # '"'
while True:
c = rdr.getc()
if c == '':
raise SyntaxError('End of file in JSON string')
# Accept raw control characters for readability.
if c == '\n':
c = '\\n'
if c == '\r':
c = '\\r'
if c == '\t':
c = '\\t'
ret += c
if c == '"':
return ret
if c == '\\':
ret += rdr.getc()
def _scan_json_nonstring(rdr):
# Assume we are at a number or true|false|null.
# Scan the token.
ret = ''
while rdr.peek() != '' and rdr.peek() in '-+0123456789.eEtrufalsn':
ret += rdr.getc()
return ret
def _scan_json_space(rdr):
# Scan whitespace including "," and ":". Strip comments for good measure.
ret = ''
while not rdr.eof() and rdr.peek() in ' \t\r\n,:#':
c = rdr.getc()
if c == '#':
c = rdr.readline() and '\n'
ret += c
return ret
def _scan_json_compound(rdr):
# Scan a JSON array or object.
ret = rdr.getc()
if ret == '{': end = '}'
if ret == '[': end = ']'
ret += _scan_json_space(rdr)
if rdr.peek() == end:
return ret + rdr.getc()
while True:
if rdr.eof():
raise SyntaxError('End of file in JSON value')
ret += scan_json(rdr)
ret += _scan_json_space(rdr)
if rdr.peek() == end:
return ret + rdr.getc()
def scan_json(rdr):
# Scan a JSON value.
c = rdr.peek()
if c == '"':
return _scan_json_string(rdr)
if c in ('[', '{'):
return _scan_json_compound(rdr)
ret = _scan_json_nonstring(rdr)
if ret == '':
raise SyntaxError('Invalid JSON')
return ret
def parse_json(js):
import json
return json.loads(js)
def wrap_json_error(rdr, js, e):
import re
match = re.search(r'(.*): line (\d+) column (\d+)', e.message, re.DOTALL)
if match:
e = SyntaxError(match.group(1))
json_lineno = int(match.group(2))
e.lineno = rdr.lineno - js.count('\n') + json_lineno - 1
e.text = js.split('\n')[json_lineno - 1]
e.offset = int(match.group(3))
if json_lineno == 1 and json_line1_column_bug():
e.offset += 1
return e
def json_line1_column_bug():
ret = False
try:
parse_json("{:")
except ValueError, e:
if "column 1" in e.message:
ret = True
finally:
return ret
|
StarcoderdataPython
|
363604
|
"""The config show command."""
import sys
from putio_cli.commands.config import Config
class Show(Config):
"""
show command to print configuration file
Usage:
putio-cli config show
"""
def run(self):
try:
cfgfile = open(self.cfgfilename, 'r')
except IOError:
sys.exit(
'Config file does not exist, please use template subcommand first')
print cfgfile.read()
cfgfile.close()
|
StarcoderdataPython
|
391178
|
"""
DESCRIPTORS: local ang global descriptors for a rectangular (part of an) image.
"""
__autor__ = '<NAME>'
|
StarcoderdataPython
|
1764639
|
import time
import numpy as np
import tensorflow as tf
from data import num_labels
def accuracy(predictions, labels):
return (100.0 * np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1))
/ predictions.shape[0])
def train_model_in_batches(model, datasets, steps, dropout_keep_prob, load_model = False):
batch_size = model.batch_size
start_time = time.time()
steps_to_training_accuracies = {}
steps_to_validation_predictions = {}
with tf.Session(graph=model.graph) as session:
model.session = session # Save the session for future visualisation use.
init_op = tf.initialize_all_variables()
#saver = tf.train.Saver()
session.run(init_op) # All variables must be initialised before the saver potentionally restores the checkpoint below.
print("Initialized")
model_name = 'model'
if load_model:
ckpt = tf.train.get_checkpoint_state("./")
if ckpt and ckpt.model_checkpoint_path:
model.saver.restore(session, ckpt.model_checkpoint_path)
print("model loaded")
else:
raise Error("No checkpoint.")
learning_check_step = 500
minimum_validation_step_size = 1000
validation_step_size = int(max(steps / 100, minimum_validation_step_size))
save_step_size = 50000
untrained_validation_accuracy = (100 / num_labels) * 1.2
premature_stop_steps_minimum = 3000
for step in range(steps):
offset = (step * batch_size) % (datasets.train_labels.shape[0] - batch_size)
batch_data = datasets.train_dataset[offset:(offset + batch_size), :, :, :]
batch_labels = datasets.train_labels[offset:(offset + batch_size), :]
feed_dict = {
model.tf_train_dataset: batch_data,
model.tf_train_labels : batch_labels,
model.dropout_keep_probability: dropout_keep_prob
}
_, l, predictions = session.run(
[model.optimizer, model.loss, model.train_prediction], feed_dict=feed_dict)
if (step % validation_step_size == 0) or step == learning_check_step:
training_accuracy = accuracy(predictions, batch_labels)
steps_to_training_accuracies[step] = training_accuracy
validation_predictions = eval_predictions(session, model, datasets.valid_dataset, datasets.valid_labels)
#print "validation_predictions shape: ", validation_predictions.shape, " valid_labels shape: ", datasets.valid_labels.shape
steps_to_validation_predictions[step] = validation_predictions
valdiation_accuracy = accuracy(validation_predictions, datasets.valid_labels)
print("step:", step, "minibatch loss:", l, "minibatch accuracy: %.1f%%" % training_accuracy, "validation accuracy: %.1f%%" % valdiation_accuracy)
if valdiation_accuracy < untrained_validation_accuracy and step >= premature_stop_steps_minimum:
print("Premature stop due to low validation accuracy.")
return steps_to_training_accuracies, steps_to_validation_predictions
if step % save_step_size == 0:
save_path = model.saver.save(session, "./%s.ckpt" % model_name, global_step=model.global_step)
save_path = model.saver.save(session, "./%s.ckpt" % model_name, global_step=model.global_step)
test_predictions = eval_predictions(session, model, datasets.test_dataset, datasets.test_labels)
print("Test accuracy at step %s: %.1f%%\n" % (step, accuracy(test_predictions, datasets.test_labels)))
seconds_in_an_hour = 60 * 60
print("Elapsed time: %s hours" % ((time.time() - start_time) / seconds_in_an_hour))
return steps_to_training_accuracies, steps_to_validation_predictions
def eval_predictions(session, model, dataset, labels):
dataset_size = dataset.shape[0]
batch_size = model.eval_batch_size
#print "dataset_size: ", dataset_size, " batch_size: ", batch_size
if dataset_size % batch_size != 0:
raise "batch_size must be a multiple of dataset_size."
predictions = np.ndarray(shape=(dataset_size, num_labels), dtype=np.float32)
steps = dataset_size // batch_size
#print "steps: ", steps
for step in range(steps):
offset = (step * batch_size)
#print "offset ", offset
batch_data = dataset[offset:(offset + batch_size), :, :, :]
feed_dict = {
model.eval_dataset: batch_data,
}
#predictions[offset:offset+batch_size, :] = model.eval_prediction.eval(feed_dict)
predictions[offset:offset+batch_size, :] = session.run(model.eval_prediction, feed_dict=feed_dict)
return predictions
|
StarcoderdataPython
|
11242793
|
<gh_stars>0
#45 What color is square ?
# Asking for number and letter
x = input("Enter the letter : ")
y = int(input("Enter the number : "))
#Assuming a1 as black
if x == "a" or x == "c" or x == "e" or x == "g":
if y % 2 == 0:
z = "White"
else:
z = "Black"
if x == "b" or x == "d" or x == "f" or x == "h":
if y % 2 == 0:
z = "Black"
else:
z = "White"
print(z)
|
StarcoderdataPython
|
1712801
|
def find_record_dimension(d):
"""Find the record dimension (i.e. time) in a netCDF4 Dataset."""
for dim in d.dimensions:
if d.dimensions[dim].isunlimited():
return dim
return None
|
StarcoderdataPython
|
81692
|
# Input: arr[] = {1, 20, 2, 10}
# Output: 72
def single_rotation(arr,l):
temp=arr[0]
for i in range(l-1):
arr[i]=arr[i+1]
arr[l-1]=temp
def sum_calculate(arr,l):
sum=0
for i in range(l):
sum=sum+arr[i]*(i)
return sum
def max_finder(arr,l):
max=arr[0]
for i in range(l):
if max<arr[i]:
max=arr[i]
maximum=max
for i in range(l):
if max == arr[i]:
temp=i
index=temp+1
for j in range(index):
single_rotation(arr,len(arr))
arr=[10, 1, 2, 3, 4, 5, 6, 7, 8, 9]
max_finder(arr,len(arr))
result=sum_calculate(arr,len(arr))
print("Max sum is: "+ str(result))
#optimized approach
# '''Python program to find maximum value of Sum(i*arr[i])'''
# # returns max possible value of Sum(i*arr[i])
# def maxSum(arr):
# # stores sum of arr[i]
# arrSum = 0
# # stores sum of i*arr[i]
# currVal = 0
# n = len(arr)
# for i in range(0, n):
# arrSum = arrSum + arr[i]
# currVal = currVal + (i*arr[i])
# # initialize result
# maxVal = currVal
# # try all rotations one by one and find the maximum
# # rotation sum
# for j in range(1, n):
# currVal = currVal + arrSum-n*arr[n-j]
# if currVal > maxVal:
# maxVal = currVal
# # return result
# return maxVal
# # test maxsum(arr) function
# arr = [10, 1, 2, 3, 4, 5, 6, 7, 8, 9]
# print("Max sum is: ", maxSum(arr))
|
StarcoderdataPython
|
11368550
|
from .hyper import register_hyper_optlib
def convert_param_to_skopt(param, name):
from skopt.space import Real, Integer, Categorical
if param['type'] == 'BOOL':
return Categorical([False, True], name=name)
if param['type'] == 'INT':
return Integer(low=param['min'], high=param['max'], name=name)
if param['type'] == 'STRING':
return Categorical(param['options'], name=name)
if param['type'] == 'FLOAT':
return Real(low=param['min'], high=param['max'], name=name)
if param['type'] == 'FLOAT_EXP':
return Real(low=param['min'], high=param['max'],
base=10, prior='log-uniform', name=name)
else:
raise ValueError("Didn't understand space {}.".format(param))
def get_methods_space(methods):
from skopt.space import Categorical
return [Categorical(methods)]
def convert_to_skopt_space(method, space):
return [
convert_param_to_skopt(param, name=name)
for name, param in space[method].items()
]
def skopt_init_optimizers(
self,
methods,
space,
sampler='et',
method_sampler='et',
sampler_opts=None,
method_sampler_opts=None,
):
"""Initialize the ``skopt`` optimizer.
Parameters
----------
space : dict[str, dict[str, dict]]
The search space.
sampler : str, optional
The regressor to use to optimize each method's search space, see
https://scikit-optimize.github.io/stable/modules/generated/skopt.Optimizer.html#skopt.Optimizer
.
method_sampler : str, optional
Meta-optimizer to use to select which overall method to use.
"""
from skopt.optimizer import Optimizer
sampler_opts = {} if sampler_opts is None else dict(sampler_opts)
method_sampler_opts = ({} if method_sampler_opts is None else
dict(method_sampler_opts))
if method_sampler is None:
method_sampler = sampler
self._method_chooser = Optimizer(
get_methods_space(methods),
base_estimator=method_sampler,
**method_sampler_opts
)
skopt_spaces = {
m: convert_to_skopt_space(m, space) for m in methods
}
self._param_names = {
m: [p.name for p in skopt_spaces[m]]
for m in methods
}
self._optimizers = {
m: Optimizer(skopt_spaces[m], base_estimator=sampler, **sampler_opts)
for m in methods
}
def skopt_get_setting(self):
"""Find the next parameters to test.
"""
# params = self._optimizer.ask()
# return params
import warnings
with warnings.catch_warnings():
warnings.filterwarnings("ignore", module='skopt')
warnings.filterwarnings("ignore", module='sklearn')
method = self._method_chooser.ask()
params = self._optimizers[method[0]].ask()
names = self._param_names[method[0]]
return {
'method_token': method,
'method': method[0],
'params_token': params,
'params': dict(zip(names, params)),
}
def skopt_report_result(self, setting, trial, score):
"""Report the result of a trial to the ``chocolate`` optimizer.
"""
import warnings
with warnings.catch_warnings():
warnings.filterwarnings("ignore", module='skopt')
warnings.filterwarnings("ignore", module='sklearn')
self._method_chooser.tell(
setting['method_token'], score)
self._optimizers[setting['method']].tell(
setting['params_token'], score)
register_hyper_optlib(
'skopt',
skopt_init_optimizers,
skopt_get_setting,
skopt_report_result,
)
|
StarcoderdataPython
|
11368864
|
from django.db import models
from questao.models import Questao
class Resposta(models.Model):
texto = models.TextField(max_length=255)
questao = models.ForeignKey(Questao,related_name="respostas" ,on_delete=models.CASCADE)
correta = models.BooleanField(default=False)
def __str__(self):
return '{id:%d,texto:%s}'% (self.id,self.texto)
|
StarcoderdataPython
|
6462865
|
import pytest
from dsp_be.logic.config import Config
from dsp_be.logic.factory import Factory
from dsp_be.logic.planet import Planet
from dsp_be.logic.stack import Stack
@pytest.fixture
def jupiter():
return Planet(
name="Earth",
resources={"fire_ice": 0.04, "hydrogen": 0.85},
exports=[],
imports=[],
star=None,
)
@pytest.fixture
def veins_utilization_two():
return Config(veins_utilization=2)
@pytest.fixture
def orbital_collector(jupiter, veins_utilization_two):
return Factory(
name="Collector #1",
machine_name="orbital_collector",
recipe_name="orbital_collector",
count=40,
planet=jupiter,
config=veins_utilization_two,
)
def test_orbital_collector(jupiter, orbital_collector):
print(jupiter.production())
assert jupiter.production() == Stack(
products={
"deuterium": 0.0,
"hydrogen": pytest.approx(196.4, 0.01),
"fire_ice": pytest.approx(9.24, 0.01),
}
)
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-vv"]))
|
StarcoderdataPython
|
4915839
|
import torch.nn as nn
import torch
from detection.ResUnet import CRAFT
import detection.craft_utils as craft_utils
from recognition.model import TRBA
import data_utils
from config import config
from torch.autograd import Variable
class CRAFTS(nn.Module) :
def __init__(self, cfg, std_cfg, str_cfg, device) :
super().__init__()
self.cfg = cfg
self.std_cfg = std_cfg
self.str_cfg = str_cfg
self.device = device
self.detection_model = CRAFT(input_channel = 3, n_classes = self.std_cfg.NUM_CLASSES)
self.recognition_model = TRBA(self.str_cfg)
def forward(self, x, padding, converter, word_bboxes_batch, words_batch, words_length_batch) :
std_out, feature = self.detection_model(x)
STR_inputs = torch.cat([std_out[:,:2,:,:], feature], axis = 1).permute(0,2,3,1).cpu().detach().numpy()
STD_batch_size = STR_inputs.shape[0]
## TRAIN
if self.cfg.training :
feature_batch, text_batch, length_batch = [], [], []
for i in range(STD_batch_size) :
STR_input = STR_inputs[i]
word_bboxes = word_bboxes_batch[i]
words = words_batch[i]
words_length = words_length_batch[i]
decoded_words = converter.decode(words, words_length)
for word_bbox, word, decoded_word, word_length in zip(word_bboxes, words, decoded_words, words_length) :
if word_length != 1 :
cropFeature, _ = data_utils.crop_image_by_bbox(STR_input, word_bbox, decoded_word)
feature_batch.append(cropFeature)
text_batch.append(word.unsqueeze(0))
length_batch.append(word_length.unsqueeze(0))
pad_batch = padding(feature_batch).to(self.device)
cropped_batch = Variable(pad_batch.type(torch.FloatTensor)).to(self.device)
text_batch = Variable(torch.cat(text_batch).type(torch.LongTensor)).to(self.device)
length_batch = Variable(torch.cat(length_batch).type(torch.IntTensor)).to(self.device)
if self.str_cfg.ViTSTR :
preds = self.recognition_model(cropped_batch, text_batch)
target = text_batch
else :
preds = self.recognition_model(cropped_batch, text_batch[:, :-1]) # align with Attention.forward
target = text_batch[:, 1:]
return std_out, preds, target, length_batch
## TEST
else :
assert std_out.shape[0] == 1 # 무조건 한장씩??
score_text = std_out[0,0,:,:].cpu().data.numpy()
score_link = std_out[0,1,:,:].cpu().data.numpy()
boxes, polys = data_utils.getDetBoxes(score_text, score_link, self.std_cfg.TEXT_THRESHOLD, self.std_cfg.LINK_THRESHOLD, self.std_cfg.LOW_TEXT, poly = self.std_cfg.POLY)
feature_batch = []
STR_input = STR_inputs[0]
for box in boxes :
cropFeature, _ = data_utils.crop_image_by_bbox(STR_input, box, '')
feature_batch.append(cropFeature)
pad_batch = padding(feature_batch).to(self.device)
batch_size = pad_batch.shape[0]
length_for_pred = torch.IntTensor([self.str_cfg.batch_max_length] * batch_size).to(self.device)
if self.str_cfg.ViTSTR :
text_for_pred = torch.LongTensor(batch_size, self.str_cfg.batch_max_length + 2).fill_(0).to(self.device)
preds = self.recognition_model(pad_batch, text_for_pred, is_train = False)
else :
text_for_pred = torch.LongTensor(batch_size, self.str_cfg.batch_max_length + 1).fill_(0)
preds = self.recognition_model(pad_batch, text_for_pred, is_train = False).to(self.device)
_, preds_index = preds.max(2)
preds_str = converter.decode(preds_index, length_for_pred)
end_token = '[s]'
if end_token is not None :
if self.str_cfg.ViTSTR :
preds_str = [pred[1:pred.find(end_token)] for pred in preds_str]
else :
preds_str = [pred[:pred.find(end_token)] for pred in preds_str]
return std_out, boxes, polys, preds_str
|
StarcoderdataPython
|
62479
|
<gh_stars>0
def print_num(n):
"""Print a number with proper formatting depending on int/float"""
if float(n).is_integer():
return print(int(n))
else:
return print(n)
|
StarcoderdataPython
|
1896767
|
<reponame>paullewallencom/javascript-978-1-8495-1034-9
urlpatterns = patterns(u'',
(ur'^$', directory.views.homepage),
(ur'^accounts/login/$', u'django.contrib.auth.views.login'),
(ur'^admin/', include(admin.site.urls)),
(ur'^ajax/check_login', directory.views.ajax_check_login),
(ur'^ajax/delete', directory.views.ajax_delete),
(ur'^ajax/download/(Email|Entity|Phone|Status|Tag|URL)', directory.views.ajax_download_model),
(ur'^ajax/login', directory.views.ajax_login_request),
(ur'^ajax/new/Entity', directory.views.new_Entity),
(ur'^ajax/profile/(\d+)', directory.views.ajax_profile),
(ur'^ajax/saveimage/(\d+)', directory.views.saveimage),
(ur'^ajax/save', directory.views.save),
(ur'^ajax/search', directory.views.ajax_search),
(ur'^(create/Entity)', directory.views.redirect),
(ur'^(create/Location)', directory.views.redirect),
(ur'^manage/Entity/?(\d*)', directory.views.modelform_Entity),
(ur'^manage/Location/?(\d*)', directory.views.modelform_Location),
(ur'^profile/images/(\d+)', directory.views.image),
(ur'^profile/(new)$', directory.views.profile_new),
(ur'^profile/(\d+)$', directory.views.profile_existing),
)
|
StarcoderdataPython
|
8085733
|
<gh_stars>1-10
from tensorflow.keras.layers import Dense, Conv2D, BatchNormalization, LeakyReLU, Add, AveragePooling2D, ReLU, MaxPool2D
from Model.layers import SpectralNormalization
import tensorflow as tf
#via https://github.com/manicman1999/Keras-BiGAN/blob/master/bigan.py
def up_res_block(input, filters, gen_kernel_size, kernel_init):
skip = up_sample(input)
skip = conv_spectral_norm(skip, filters, gen_kernel_size, 1, kernel_init, False, pad_type='zero')
output = BatchNormalization()(input)
output = ReLU()(output)
output = up_sample(output)
output = conv_spectral_norm(output,filters, gen_kernel_size, 1, kernel_init, False)
output = BatchNormalization()(output)
output = ReLU()(output)
output = conv_spectral_norm(output, filters,gen_kernel_size,1,kernel_init, True)
output = Add()([output, skip])
return output
#via https://github.com/taki0112/Self-Attention-GAN-Tensorflow/blob/master/ops.py
def down_res_block(input, filters, disc_kernel_size, kernel_init):
skip = conv_spectral_norm(input, filters, disc_kernel_size, 1,kernel_init,True, pad_type='zero')
skip = AveragePooling2D()(skip)
output = LeakyReLU(0.2)(input)
output = conv_spectral_norm(output, filters, disc_kernel_size, 1, kernel_init, True)
output = LeakyReLU(0.2)(output)
output = conv_spectral_norm(output, filters, disc_kernel_size, 1, kernel_init, True)
output = AveragePooling2D()(output)
output = Add()([output, skip])
return output
def dense_spectral_norm(input,filters,bias):
spectralDense = SpectralNormalization(
Dense(filters,use_bias=bias)
)
return spectralDense(input)
def conv_spectral_norm(input, filters, kernel_size, stride, kernel_init, bias, pad_type='reflect'):
spectralConv = SpectralNormalization(
Conv2D(filters, kernel_size = (kernel_size,kernel_size), strides = (stride,stride), padding = "same", data_format = "channels_last", kernel_initializer = kernel_init, use_bias=bias)
)
return spectralConv(input)
def up_sample(input):
_, h, w, _ = input.get_shape().as_list()
new_size = [h * 2, w * 2]
return tf.image.resize(input, size=new_size, method='nearest')
def down_sample(input):
return AveragePooling2D(input, pool_size=(2,2), strides = (2,2))
def hw_flatten(x) :
return tf.reshape(x, shape=[tf.shape(x)[0], -1, tf.shape(x)[-1]])
|
StarcoderdataPython
|
269326
|
# -*- coding: utf-8 -*-
# @Author: <NAME>
# @Date: 2016-03-17 17:06:04
# @Last Modified by: <NAME>
# @Last Modified time: 2016-04-07 07:21:56
import os
import json
import numpy as np
import networkx as nx
from .SecondaryStructure import SecondaryStructure as SS
from .Form import Form
from .FormMotif import FormMotif
class FormFabric(object):
# _DEPTH = 7.1
_DEPTH = 9.1
_DEPTH_VAR = 1.1
# _WIDTH = {"H": 9.0, "E": 4.3, "C": 4.3, "X": 4.3}
_WIDTH = {"H": 10.0, "E": 4.8, "C": 4.8, "X": 4.8}
_WIDTH_VAR = {"H": 0.7, "E": 0.2, "C": 0.4, "X": 0.3}
def __init__(self):
self._id = None
self._desc = None
self._layers = []
self._motif = None
self.forms = []
self.Estandard = 8
def build(self, identifier, filename):
self._id = identifier
data = {}
with open(filename) as fd:
data = json.loads(''.join([l.strip() for l in fd]))
if "motif" in data:
p1 = os.path.dirname(os.path.abspath(filename))
p2 = os.path.abspath(data["motif"]["pdb_file"])
data["motif"]["pdb_file"] = os.path.relpath(p2, p1)
self._process(data)
def dump(self, outdir = os.path.join(os.getcwd(), 'forms')):
outdir = os.path.join(outdir, self._id)
if not os.path.isdir(outdir): os.makedirs(outdir)
for x in range(len(self.forms)):
ident = "{0}_{1:06d}".format(self._id, x + 1)
finaldir = os.path.join(outdir, ident)
if not os.path.isdir(finaldir): os.mkdir(finaldir)
identf = os.path.join(finaldir, "info.md")
fasta = os.path.join(finaldir, "fasta.fa")
psipred = os.path.join(finaldir, "psipred.ss2")
constrs = os.path.join(finaldir, "constraints.cst")
targetl = os.path.join(finaldir, "target.loop")
tmplatl = os.path.join(finaldir, "template.loop")
comndfl = os.path.join(finaldir, "run.command")
with open(identf, "w") as fd: fd.write(str(self.forms[x]))
with open(fasta, "w") as fd: fd.write(self.forms[x].to_fasta(self._id))
with open(psipred, "w") as fd: fd.write(self.forms[x].to_psipred_ss())
with open(constrs, "w") as fd: fd.write(self.forms[x].to_file_constraint())
with open(targetl, "w") as fd: fd.write(self._motif.to_target_loops())
with open(tmplatl, "w") as fd: fd.write(self._motif.to_tmpl_loops(self.forms[x]))
if not os.path.isfile(comndfl):
with open(comndfl, "w") as fd: fd.write(self._motif.to_command(targetl, tmplatl, fasta, psipred, constrs, ident, self.forms[x], comndfl))
def print_structures(self):
for x in self._layers:
for y in x:
print y
def reset(self):
self._id = None
self._desc = None
self._layers = None
self.forms = []
def _process(self, description):
self._desc = description
self._motif = FormMotif(description["motif"])
maxL = {"H": 0, "E": 0, "C": 0, "X": 0}
for x in range(len(self._desc["layers"])):
self._layers.append([])
for y in range(len(self._desc["layers"][x])):
ss = SS(self._desc["layers"][x][y], x+1, y+1)
self._layers[-1].append(ss)
if ss.length > maxL[ss.type]: maxL[ss.type] = ss.length
self._apply_lengths(maxL)
self._place_xz()
self._create_forms()
def _create_forms( self ):
i = 1
G = self._create_graph()
path_length = len( G.nodes() )-1
self.forms = []
for node in G.nodes():
for path in self._find_paths(G, node, path_length):
f = Form(path)
if not f.matches_desc(): continue
f.make_structure_sequence()
self.forms.append(f)
i += 1
def _create_graph( self ):
G = nx.Graph()
for x in self._layers:
for sse1 in x:
for sse2 in x:
if sse1 < sse2:
G.add_edge( sse1 , sse2, object=SS )
for lyr1 in range(len(self._layers)):
for lyr2 in range(len(self._layers)):
if abs(lyr1 - lyr2) == 1: # Only consecutive layers
for sse1 in self._layers[lyr1]:
for sse2 in self._layers[lyr2]:
G.add_edge( sse1 , sse2, object=SS )
return G
def _find_paths( self, G, u, n ):
if n == 0: return [[u]]
paths = [[u]+path for neighbor in G.neighbors(u) for path in self._find_paths(G, neighbor, n-1) if u not in path]
return paths
def _place_xz(self):
r = np.random.random_sample()
for x in range(len(self._layers)):
dvar = float(self._DEPTH_VAR * r) - (self._DEPTH_VAR / 2.0)
z = float((self._DEPTH + dvar) * x)
last = 0
for y in range(len(self._layers[x])):
self._layers[x][y].set_z(z)
tp = self._layers[x][y].type
wvar = (self._WIDTH_VAR[tp] * r) - (self._WIDTH_VAR[tp] / 2.0)
xp = last + self._WIDTH[tp] + wvar
last = xp
self._layers[x][y].set_x(xp)
def _apply_lengths(self, lengths):
if lengths["H"] == 0 and lengths["E"] == 0 and lengths["C"] == 0:
lengths["E"] = self.Estandard
if lengths["H"] != 0:
if lengths["E"] == 0: lengths["E"] = int(lengths["H"]/2.0)
if lengths["C"] == 0: lengths["C"] = int(lengths["H"]/2.0)
if lengths["E"] != 0:
if lengths["H"] == 0: lengths["H"] = int(lengths["E"] * 2.0)
if lengths["C"] == 0: lengths["C"] = int(lengths["E"] * 2.0)
if lengths["C"] != 0:
if lengths["H"] == 0: lengths["H"] = int(lengths["C"] * 2.0)
if lengths["E"] == 0: lengths["E"] = int(lengths["C"] * 2.0)
for x in self._layers:
for y in x:
if y.length == 0: y.length = lengths[y.type]
|
StarcoderdataPython
|
1933358
|
<reponame>radiusoss/constructor<filename>constructor/conda_interface.py
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
import json
from os.path import join
import sys
try:
from conda import __version__ as CONDA_INTERFACE_VERSION
conda_interface_type = 'conda'
except ImportError:
raise RuntimeError("Conda must be installed for python interpreter\n"
"with sys.prefix: %s" % sys.prefix)
if conda_interface_type == 'conda':
CONDA_MAJOR_MINOR = tuple(int(x) for x in CONDA_INTERFACE_VERSION.split('.')[:2])
from conda.base.context import context
cc_platform = context.subdir
from conda.exports import fetch_index as _fetch_index, cache_fn_url as _cache_fn_url
from conda.exports import Resolve, NoPackagesFound
from conda.exports import default_prefix
from conda.exports import linked_data
from conda.exports import download as _download
from conda.models.channel import prioritize_channels
def fetch_index(channel_urls):
return _fetch_index(prioritize_channels(channel_urls))
def fetch_pkg(pkginfo, download_dir):
pkg_url = pkginfo['url']
assert pkg_url
_download(pkg_url, join(download_dir, pkginfo['fn']))
def write_repodata(cache_dir, url):
if CONDA_MAJOR_MINOR >= (4, 5):
from conda.core.subdir_data import fetch_repodata_remote_request
raw_repodata_str = fetch_repodata_remote_request(url, None, None)
repodata_filename = _cache_fn_url(url)
with open(join(cache_dir, repodata_filename), 'w') as fh:
fh.write(raw_repodata_str)
elif CONDA_MAJOR_MINOR >= (4, 4):
from conda.core.repodata import fetch_repodata_remote_request
raw_repodata_str = fetch_repodata_remote_request(url, None, None)
repodata_filename = _cache_fn_url(url)
with open(join(cache_dir, repodata_filename), 'w') as fh:
fh.write(raw_repodata_str)
elif CONDA_MAJOR_MINOR >= (4, 3):
from conda.core.repodata import fetch_repodata_remote_request
repodata_obj = fetch_repodata_remote_request(None, url, None, None)
raw_repodata_str = json.dumps(repodata_obj)
repodata_filename = _cache_fn_url(url)
with open(join(cache_dir, repodata_filename), 'w') as fh:
fh.write(raw_repodata_str)
else:
raise NotImplementedError("unsupported version of conda: %s" % CONDA_INTERFACE_VERSION)
cc_platform = cc_platform
fetch_index, fetch_pkg = fetch_index, fetch_pkg
Resolve, NoPackagesFound = Resolve, NoPackagesFound
default_prefix = default_prefix
linked_data = linked_data
|
StarcoderdataPython
|
82924
|
<filename>third_party/OpenFace/model_training/ce-clm_training/cen_training/train_cen.py
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation
from keras.optimizers import SGD, RMSprop, Adagrad, Adadelta, Adam, Adamax
from keras.constraints import max_norm, non_neg
from keras.callbacks import ModelCheckpoint, Callback
import numpy
import scipy
from scipy import io
from itertools import product
import pickle
import time
import uuid
import os
import h5py
import sys
from keras.models import model_from_json
from keras.models import model_from_yaml
from keras.models import load_model
import keras.backend as K
from datetime import datetime
import argparse
logfile = None
final_results = []
def log(message):
with open(logfile, 'a') as f:
f.write("{}\n".format(message))
def log_init(folder, filename):
if not os.path.exists(folder):
os.mkdir(folder)
global logfile
logfile = os.path.join(folder, filename)
with open(logfile, 'a') as f:
f.write("----------------------\n")
f.write("{}\n".format(datetime.now().strftime("%b/%d/%y %H:%M:%S")))
def put_in_format(samples, training_samples_size):
samples=samples.reshape(samples.shape[0]/training_samples_size, training_samples_size,samples.shape[1])
return numpy.squeeze(samples);
def read_data(folder, scale, view, lm):
folder = os.path.join(folder)
print("--------------------------------------------------------------")
try:
#reading from h5
h5_file = os.path.join(folder, str(lm), 'data' + scale + '_' + view + '.mat')
print('loading patches from ' + h5_file)
dataset = h5py.File(h5_file, 'r');
print("Landmark " + str(lm))
except:
print("Landmark " + str(lm) + ' not found!')
print("--------------------------------------------------------------")
sys.exit()
train_data = put_in_format(numpy.array(dataset['samples_train']),81)
train_labels = put_in_format(numpy.array(dataset['labels_train']).T,81)
test_data = put_in_format(numpy.array(dataset['samples_test']),81)
test_labels = put_in_format(numpy.array(dataset['labels_test']).T,81)
train_data_dnn = train_data.reshape([train_data.shape[0]*train_data.shape[1],122])
train_labels_dnn = train_labels.reshape([train_labels.shape[0]*train_labels.shape[1],1])
test_data_dnn = test_data.reshape([test_data.shape[0]*test_data.shape[1],122])
test_labels_dnn = test_labels.reshape([test_labels.shape[0]*test_labels.shape[1],1])
print(train_data_dnn.shape)
print(train_labels_dnn.shape)
print(test_data_dnn.shape)
print(test_labels_dnn.shape)
return train_data_dnn.astype('float32'), train_labels_dnn.flatten().astype('float32'), test_data_dnn.astype('float32'), test_labels_dnn.flatten().astype('float32')
def read_data_menpo(folder, scale, view, lm):
train_file = "menpo_train_data{}_{}_{}.mat".format(scale, view, lm)
valid_file = "menpo_valid_data{}_{}_{}.mat".format(scale, view, lm)
print("training file: {}".format(train_file))
print("validation file: {}".format(valid_file))
print("--------------------------------------------------------------")
try:
#reading from h5
train = h5py.File(os.path.join(folder, train_file), 'r')
valid = h5py.File(os.path.join(folder, valid_file), 'r')
print("Landmark " + str(lm))
except:
print("Landmark " + str(lm) + 'not found!')
print("--------------------------------------------------------------")
sys.exit()
train_data = put_in_format(numpy.array(train['samples']),81)
train_labels = put_in_format(numpy.array(train['labels']).T,81)
train_data_dnn = train_data.reshape([train_data.shape[0]*train_data.shape[1],122])
train_labels_dnn = train_labels.reshape([train_labels.shape[0]*train_labels.shape[1],1])
print(train_data_dnn.shape)
print(train_labels_dnn.shape)
if 'samples' in valid:
valid_data=put_in_format(numpy.array(valid['samples']),81)
valid_labels=put_in_format(numpy.array(valid['labels']).T,81)
valid_data_dnn = valid_data.reshape([valid_data.shape[0]*valid_data.shape[1],122])
valid_labels_dnn = valid_labels.reshape([valid_labels.shape[0]*valid_labels.shape[1],1])
print(valid_data_dnn.shape)
print(valid_labels_dnn.shape)
return train_data_dnn.astype('float32'), train_labels_dnn.flatten().astype('float32'), valid_data_dnn.astype('float32'), valid_labels_dnn.astype('float32')
else:
print("No validation data")
return train_data_dnn.astype('float32'), train_labels_dnn.flatten().astype('float32'), None, None
train.close()
valid.close()
def model_half():
model = Sequential()
model.add(Dense(300, input_dim=122, activation='relu'))
model.add(Dropout(0.3))
model.add(Dense(100, activation='relu'))
model.add(Dropout(0.3))
model.add(Dense(50, activation='sigmoid'))
model.add(Dropout(0.3))
model.add(Dense(1, kernel_constraint=non_neg(), activation='sigmoid'))
return model
def model_300():
model = Sequential()
model.add(Dense(300, input_dim=122, activation='relu'))
model.add(Dropout(0.3))
model.add(Dense(200, activation='relu'))
model.add(Dropout(0.3))
model.add(Dense(100, activation='sigmoid'))
model.add(Dropout(0.3))
model.add(Dense(1, kernel_constraint=non_neg(), activation='sigmoid'))
return model
def arch4():
model = Sequential()
model.add(Dense(500, input_dim=122, activation='relu'))
model.add(Dropout(0.3))
model.add(Dense(200, activation='relu'))
model.add(Dropout(0.3))
model.add(Dense(100, activation='sigmoid'))
model.add(Dropout(0.3))
model.add(Dense(1, kernel_constraint=non_neg(), activation='sigmoid'))
return model
def arch6():
model = Sequential()
model.add(Dense(50, input_dim=122, activation='relu'))
model.add(Dropout(0.3))
model.add(Dense(20, activation='relu'))
model.add(Dropout(0.3))
model.add(Dense(10, activation='sigmoid'))
model.add(Dropout(0.3))
model.add(Dense(1, kernel_constraint=non_neg(), activation='sigmoid'))
return model
def arch6a():
model = Sequential()
model.add(Dense(50, input_dim=122, activation='relu'))
model.add(Dropout(0.3))
model.add(Dense(200, activation='relu'))
model.add(Dropout(0.3))
model.add(Dense(100, activation='sigmoid'))
model.add(Dropout(0.3))
model.add(Dense(1, kernel_constraint=non_neg(), activation='sigmoid'))
return model
def arch7():
model = Sequential()
model.add(Dense(128, input_dim=122, activation='relu'))
model.add(Dropout(0.3))
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.3))
model.add(Dense(32, activation='sigmoid'))
model.add(Dropout(0.3))
model.add(Dense(1, kernel_constraint=non_neg(), activation='sigmoid'))
return model
def arch7a():
model = Sequential()
model.add(Dense(100, input_dim=122, activation='relu'))
model.add(Dropout(0.3))
model.add(Dense(40, activation='relu'))
model.add(Dropout(0.3))
model.add(Dense(20, activation='sigmoid'))
model.add(Dropout(0.3))
model.add(Dense(1, kernel_constraint=non_neg(), activation='sigmoid'))
return model
def arch7b():
model = Sequential()
model.add(Dense(150, input_dim=122, activation='relu'))
model.add(Dropout(0.3))
model.add(Dense(60, activation='relu'))
model.add(Dropout(0.3))
model.add(Dense(30, activation='sigmoid'))
model.add(Dropout(0.3))
model.add(Dense(1, kernel_constraint=non_neg(), activation='sigmoid'))
return model
def arch7c():
model = Sequential()
model.add(Dense(200, input_dim=122, activation='relu'))
model.add(Dropout(0.3))
model.add(Dense(80, activation='relu'))
model.add(Dropout(0.3))
model.add(Dense(40, activation='sigmoid'))
model.add(Dropout(0.3))
model.add(Dense(1, kernel_constraint=non_neg(), activation='sigmoid'))
return model
def arch8():
model = Sequential()
model.add(Dense(512, input_dim=122, activation='relu'))
model.add(Dropout(0.3))
model.add(Dense(32, activation='sigmoid'))
model.add(Dropout(0.3))
model.add(Dense(1, kernel_constraint=non_neg(), activation='sigmoid'))
return model
def arch9():
model = Sequential()
model.add(Dense(500, input_dim=122, activation='relu'))
model.add(Dropout(0.3))
model.add(Dense(200, activation='relu'))
model.add(Dropout(0.3))
model.add(Dense(10, activation='sigmoid'))
model.add(Dropout(0.3))
model.add(Dense(1, kernel_constraint=non_neg(), activation='sigmoid'))
return model
def find_last(epoch_src, acc_file, dataset):
filename = os.path.join(epoch_src, acc_file)
if not os.path.exists(filename):
print("{} not found!".format(filename))
sys.exit()
results = pickle.load(open(filename, 'r'))
print("starting from epoch {}".format(len(results['corr'])))
filename = '{}_epoch_{}.h5'.format(dataset, len(results['corr']) - 1)
print("loading from {}".format(filename))
return os.path.join(epoch_src, filename), len(results['corr'])
def load_old_model(epoch_src, acc_file, dataset):
# load pre-trained model to continue training
filename = None
start_epoch = 0
if acc_file is not None:
filename, start_epoch = find_last(epoch_src, acc_file, dataset)
return filename, start_epoch
def build_model(model_fn, model_file=None):
if model_file is not None:
model = load_model(model_file)
print(model.get_config())
return model
model = model_fn()
optimizers=[]
optimizers.append(SGD(lr=.1, momentum=0.1, decay=0.0))
optimizers.append(RMSprop(lr=0.001,rho=0.9, epsilon=1e-06))
optimizers.append(Adagrad(lr=0.01, epsilon=1e-06))
optimizers.append(Adadelta(lr=1.0, rho=0.95, epsilon=1e-06))
#this is the optimizer that is used - Adam
#you can change the lr parameter
#initial: 2
lr = 0.0001/2
log("Learning rate for Adam: {}".format(lr))
optimizers.append(Adam(lr=lr, beta_1=0.9, beta_2=0.999, epsilon=1e-08))
optimizers.append(Adamax(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=1e-08))
model.compile(loss='mean_squared_error', optimizer=optimizers[4])
return model
corrs = []
class EpochCallback(Callback):
def __init__(self, valid_data, valid_labels):
self.valid_data = valid_data
self.valid_labels = valid_labels
def on_epoch_end(self, epoch, logs={}):
prediction = self.model.predict(self.valid_data, batch_size=4096)
coeff = numpy.corrcoef(prediction.flatten(), self.valid_labels.flatten())[0, 1]
print("RMSE: {}\tCorr: {}".format(numpy.sqrt(logs['val_loss']), coeff ** 2))
global corrs
corrs.append(coeff ** 2)
# TODO: make symbolic
def corr(y_true, y_pred):
return K.constant((numpy.corrcoef(y_true.flatten(), y_pred.flatten())[0, 1]) ** 2)
def get_best(history, corrs):
history = history.history
print("Keys: {}".format(history.keys()))
best_mse = None
best_corr = None
for i, (mse, corr) in enumerate(zip(history['val_loss'], corrs)):
if best_mse is None or mse < best_mse[1]:
best_mse = (i, mse)
if best_corr is None or corr > best_corr[1]:
best_corr = (i, corr)
return best_mse, best_corr
MODELS = {
'model_half': model_half,
'model_300': model_300,
'arch4': arch4,
'arch6': arch6,
'arch7': arch7,
'arch8': arch8,
'arch6a': arch6a,
'arch7a': arch7a,
'arch7b': arch7b,
'arch7c': arch7c,
'arch9': arch9
}
if __name__=='__main__':
parser = argparse.ArgumentParser()
parser.add_argument('mat_src', type=str)
parser.add_argument('model', type=str, choices=MODELS.keys())
parser.add_argument('scale', type=str, help="scale of image")
parser.add_argument('view', type=str, help="view of image")
parser.add_argument('lm', type=int, help="landmark number")
parser.add_argument('minibatch', type=int, help="size of minibatch")
parser.add_argument('results_dir', type=str, help="location to save model epochs")
parser.add_argument('dataset', choices=['general', 'menpo'], help='dataset to train on')
parser.add_argument('--acc_file', type=str, default=None, help='if this option is set, resume training of model based on last epoch in this file (in results_dir)')
parser.add_argument('--num_epochs', type=int, default=25, help='number of epochs to train model')
parser.add_argument('--outfile', type=str, default='accuracies.txt', help='file to save training history to (in results_dir)')
args = parser.parse_args()
logfile = "{}_{}_{}.log".format(args.scale, args.view, args.lm)
log_init('./logs', logfile)
log("""Loading data from: {}
Model: {}\nScale: {}\t View: {}\tLM: {}\nMinibatch size: {}
Results saved to: {}\nDataset: {}\nFile to resume from: {}
# of epochs: {}
Training history saved to: {}""".format(args.mat_src, args.model, args.scale,
args.view, args.lm, args.minibatch, args.results_dir, args.dataset,
args.acc_file, args.num_epochs, args.outfile))
if not os.path.isdir(args.mat_src):
print("Error, mat source {} does not exist".format(args.mat_src))
sys.exit()
model_folder = os.path.join(os.path.abspath(os.path.dirname(__file__)), args.results_dir, args.model)
if not os.path.isdir(model_folder):
os.makedirs(model_folder)
outfolder = os.path.join(model_folder, "{}_{}_{}_{}".format(args.scale, args.view, args.lm, args.minibatch))
if not os.path.isdir(outfolder):
os.mkdir(outfolder)
filename, start_epoch = load_old_model(outfolder, args.acc_file, args.dataset)
model = build_model(MODELS[args.model], model_file=filename)
if args.dataset == 'general':
train_data, train_labels, valid_data, valid_labels = read_data(args.mat_src, args.scale, args.view, args.lm)
elif args.dataset == 'menpo':
train_data, train_labels, valid_data, valid_labels = read_data_menpo(args.mat_src, args.scale, args.view, args.lm)
callbacks = [
EpochCallback(valid_data, valid_labels),
ModelCheckpoint(os.path.join(outfolder, args.dataset + "_epoch_{epoch}.h5"), verbose=1)
]
history = model.fit(train_data, train_labels, verbose=1,
epochs=args.num_epochs+start_epoch, batch_size=args.minibatch,
validation_data=(valid_data, valid_labels), callbacks=callbacks,
initial_epoch=start_epoch)
(mse_index, bestMSE), bestCorr = get_best(history, corrs)
log("Number of epochs run: {}".format(args.num_epochs))
log("Best RMSE {}.\t Best Corr: {}.".format((mse_index, numpy.sqrt(bestMSE)), bestCorr))
# append new training stats to old ones, if continuing
old_data = None
if args.acc_file is not None:
with open(os.path.join(outfolder, args.acc_file), 'r') as g:
old_data = pickle.load(g)
with open(os.path.join(outfolder, args.outfile), 'w') as f:
rmses = list(numpy.sqrt(history.history['val_loss']))
if old_data is not None:
rmses = old_data['rmse'] + rmses
corrs = old_data['corr'] + corrs
results = {'rmse': rmses, 'corr': corrs}
pickle.dump(results, f)
log("Successfully trained\n--------------------")
|
StarcoderdataPython
|
3498640
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 27 21:07:59 2018
@author: JSen
"""
import numpy as np
import matplotlib.pyplot as plt
from numpy import loadtxt, load
import os
from scipy import optimize
from scipy.optimize import minimize
from sklearn import linear_model
import scipy.io as spio
import random
os.chdir('/Users/JSen/Documents/bias_variance/')
#load training data
data = spio.loadmat('ex5data1.mat')
#X 5000x400 y 5000x1
X = data['X']
y = data['y']
Xval = data['Xval']
yval = data['yval']
Xtest = data['Xtest']
ytest = data['ytest']
plt.plot(X, y, 'rx')
plt.xlabel('Change in water level (x)')
plt.ylabel('Water flowing out of the dam (y)')
plt.show()
def linearRegCostFunction(theta, X, y, lambda_coe):
m = len(y)
J = 0
h = np.dot(X, theta)
theta_1 = theta.copy()
# theta_1 = theta_1.reshape(-1, 1)
theta_1[0] = 0 #theta0 should not be regularized!
J = 1/(2*m) * np.sum((h-y)**2) + lambda_coe/(2*m) * np.sum(theta_1**2)
return J
def linearRegGradientFunction(theta, X, y, lambda_coe):
m = len(y)
theta = theta.reshape(-1, 1)
h = np.dot(X, theta)
theta_1 = theta.copy()
theta_1[0] = 0
grad = np.dot(X.T, h-y)/m + lambda_coe/m * theta_1
return grad.ravel()
def test(X, y):
theta = np.array([[1], [1]])
X = np.hstack((np.ones((X.shape[0], 1)), X))
cost = linearRegCostFunction(theta, X, y, 1)
grad = linearRegGradientFunction(theta, X, y, 1)
print(f'cost:{cost}, gradient:{grad}')
test(X, y)
def feature_normalization(X):
X_norm = X
column_mean = np.mean(X_norm, axis=0)
# print('mean=', column_mean)
column_std = np.std(X_norm, axis=0)
# print('std=',column_std)
X_norm = X_norm - column_mean
X_norm = X_norm / column_std
return column_mean.reshape(1, -1), column_std.reshape(1, -1), X_norm
#means, stds, X_norm = feature_normalization(X)
def feature_normalization_with_mu(X, mu, sigma):
mu = mu.reshape(1, -1)
sigma = sigma.reshape(1, -1)
X_norm = X
X_norm = X_norm - mu
X_norm = X_norm / sigma
return X_norm
def trainLinearReg(X, y, lambda_coe):
# X = np.hstack((np.ones((X.shape[0],1)), X))
initial_theta = np.ones((X.shape[1]))
'''注意:此处使用Newton-CG方法,才可以得到和课程中一样的结果,只使用cg方法时,包含10个以上的样本不收敛'''
result = optimize.minimize(linearRegCostFunction, initial_theta, method='Newton-CG' ,jac=linearRegGradientFunction, args=(X, y, lambda_coe), options={'maxiter':200, 'disp':True})
return result['x']
#和上面代码等价的
# res = optimize.fmin_ncg(linearRegCostFunction, initial_theta, fprime=linearRegGradientFunction, args=(X, y, lambda_coe), maxiter=200)
# return res
res = trainLinearReg(X, y, 0)
def plotData(X, y, theta):
plt.plot(X, y, 'ro')
plt.xlabel('Change in water level (x)')
plt.ylabel('Water flowing out of the dam (y)')
plt.hold(True)
X_t = np.hstack((np.ones((X.shape[0],1)), X))
y_t = np.dot(X_t, theta.reshape(-1,1))
plt.plot(X, y_t, 'g-')
plt.hold(False)
plt.show()
#plotData(X, y, res)
def learningCurve(X, y, Xval, yval, lambda_coe):
m = len(y)
error_train = np.zeros((m, 1))
error_val = np.zeros((m, 1))
for i in range(1, m+1):
# i=2
subX = X[:i, :]
X_t = np.hstack((np.ones((subX.shape[0], 1)), subX))
y_t = y[:i, :]
theta = trainLinearReg(X_t, y_t, 0)
theta = theta.reshape(-1, 1)
train_loss = linearRegCostFunction(theta, X_t, y_t, 0) #最小二乘法
X_val_t = np.hstack((np.ones((Xval.shape[0], 1)), Xval))
val_loss = linearRegCostFunction(theta, X_val_t, yval, 0)
error_train[i-1] = train_loss
error_val[i-1] = val_loss
return error_train, error_val
lambda_coe = 0
train_error, val_error = learningCurve(X, y, Xval, yval, lambda_coe)
def plotLearningCurve(train_error, val_error):
# for i in range(len(train_error)):
# print(f'{i} {train_error[i]} {val_error[i]}')
m = len(y)
plt.plot(list(range(1,m+1)), train_error, list(range(1,m+1)), val_error)
plt.title('Learning curve for linear regression')
plt.legend(['Train', 'Cross Validation'])
plt.xlabel('Number of training examples')
plt.ylabel('Error')
plt.axis([0, 13, 0, 150])
plt.show()
plotLearningCurve(train_error, val_error)
def polyFeatures(X: np.ndarray, p: int):
X_t = X.copy()
for i in range(2,p+1, 1):
X_t = np.hstack((X_t, X**i))
return X_t
X_2 = polyFeatures(X, 3)
|
StarcoderdataPython
|
11390747
|
<reponame>tefra/xsdata-w3c-tests<gh_stars>1-10
from output.models.nist_data.list_pkg.float_pkg.schema_instance.nistschema_sv_iv_list_float_max_length_5_xsd.nistschema_sv_iv_list_float_max_length_5 import NistschemaSvIvListFloatMaxLength5
__all__ = [
"NistschemaSvIvListFloatMaxLength5",
]
|
StarcoderdataPython
|
114423
|
<reponame>haru-works/Resize_image<filename>resize_images.py
import os
from glob import glob
from PIL import Image
import argparse
import datetime
#現在時間取得用
dt_now = datetime.datetime.now()
#------------------------------------------------------------------------
# 画像リサイズ処理
#------------------------------------------------------------------------
def resize_images(images_dir, image_save_dir, image_size_h,image_size_w):
# 保存先フォルダ生成
os.makedirs(image_save_dir, exist_ok=True)
# 画像ファイルパス読込
img_paths = glob(os.path.join(images_dir, '*.png'))
# 画像ファイルパスループ
for img_path in img_paths:
# 画像オープン
image = Image.open(img_path)
# RGB変換
rgb_im = image.convert('RGB')
# オリジナル画像サイズ取得
iw, ih = image.size
# リサイズサイズ取得
w, h = (image_size_w,image_size_h)
# 倍率設定
scale = min(w/iw, h/ih)
# リサイズサイズ再計算
nw = int(iw*scale)
nh = int(ih*scale)
# リサイズ実行
rgb_im = rgb_im.resize((nw,nh), Image.BICUBIC)
# 背景生成
back_im = Image.new("RGB", (image_size_w,image_size_h), color=(128,128,128))
back_im.paste(rgb_im, ((w-nw)//2, (h-nh)//2))
# 保存用パス生成&保存
save_path = os.path.join(image_save_dir, os.path.basename(img_path))
end_index = save_path.rfind('.')
save_path = save_path[0:end_index] + "_" + dt_now.strftime('%Y%m%d%H%M%S') + '.png'
print('save',save_path)
back_im.save(save_path,format='PNG')
#------------------------------------------------------------------------
# メイン処理
#------------------------------------------------------------------------
def _main():
# 引数指定
parser = argparse.ArgumentParser(argument_default=argparse.SUPPRESS)
'''
Command line options
'''
parser.add_argument(
'--input', type=str,
help='path to images_dir'
)
parser.add_argument(
'--output', type=str,
help='path to resize_dir '
)
parser.add_argument(
'--h', type=int,
help='resize height'
)
parser.add_argument(
'--w', type=int,
help='resize witdh'
)
ARGS = parser.parse_args()
#入力ディレクトリ
images_dir = ARGS.input
#出力ディレクトリ
image_save_dir = ARGS.output
# ディレクトリが存在しない場合、ディレクトリを作成する
if not os.path.exists(image_save_dir):
os.mkdir(image_save_dir)
# リサイズサイズ設定
image_size_h = ARGS.h
image_size_w = ARGS.w
# 処理実行
resize_images(images_dir=images_dir, image_save_dir=image_save_dir,image_size_h=image_size_h,image_size_w=image_size_w)
if __name__ == '__main__':
_main()
|
StarcoderdataPython
|
1702664
|
<reponame>mbaak/Eskapade-Spark
"""Project: Eskapade - A python-based package for data analysis.
Class: SparkHistogrammarFiller
Created: 2017/06/09
Description:
Algorithm to fill histogrammar sparse-bin histograms from a Spark
dataframe. It is possible to do cleaning of these histograms by
rejecting certain keys or removing inconsistent data types.
Timestamp columns are converted to nanoseconds before
the binning is applied.
Authors:
KPMG Advanced Analytics & Big Data team, Amstelveen, The Netherlands
Redistribution and use in source and binary forms, with or without
modification, are permitted according to the terms listed in the file
LICENSE.
"""
import histogrammar as hg
import histogrammar.sparksql
import numpy as np
import pandas as pd
import pyspark
from pyspark.sql.functions import col as sparkcol
from eskapade.analysis.histogram_filling import HistogramFillerBase
from eskapade.analysis import HistogrammarFiller
from eskapade.analysis.links.hist_filler import hgr_convert_bool_to_str, hgr_fix_contentType, get_n_bins
class SparkHistogrammarFiller(HistogrammarFiller):
"""Fill histogrammar sparse-bin histograms with Spark.
Algorithm to fill histogrammar style sparse-bin and category histograms
with Spark. It is possible to do after-filling cleaning of these
histograms by rejecting certain keys or removing inconsistent data
types. Timestamp columns are converted to nanoseconds before the binning
is applied. Final histograms are stored in the datastore.
Example is available in: tutorials/esk605_hgr_filler_plotter.py.
"""
def __init__(self, **kwargs):
"""Initialize link instance.
Store and do basic check on the attributes of link HistogrammarFiller.
:param str name: name of link
:param str read_key: key of input data to read from data store
:param str store_key: key of output data to store histograms in data store
:param list columns: colums to pick up from input data (default is all columns)
:param dict bin_specs: dictionaries used for rebinning numeric or timestamp columns
Example bin_specs dictionary is:
>>> bin_specs = {'x': {'bin_width': 1, 'bin_offset': 0},
>>> 'y': {'bin_edges': [0, 2, 3, 4, 5, 7, 8]}}
:param dict var_dtype: dict of datatypes of the columns to study from dataframe
(if not provided, try to determine datatypes directy from dataframe)
:param dict quantity: dictionary of lambda functions of how to pars certain columns
Example quantity dictionary is:
>>> quantity = {'y': lambda x: x}
:param bool store_at_finalize: store histograms in datastore at finalize(), not at execute()
(useful when looping over datasets, default is False)
:param drop_keys dict: dictionary used for dropping specific keys from bins dictionaries of histograms
Example drop_keys dictionary is:
>>> drop_keys = {'x': [1, 4, 8, 19],
>>> 'y': ['apple', 'pear', 'tomato'],
>>> 'x:y': [(1, 'apple'), (19, 'tomato')]}
"""
# initialize Link, pass name from kwargs
if 'name' not in kwargs:
kwargs['name'] = 'SparkHistogrammarFiller'
HistogrammarFiller.__init__(self, **kwargs)
self._unit_timestamp_specs = {'bin_width': float(pd.Timedelta(days=7).value),
'bin_offset': float(pd.Timestamp('2017-01-02').value)}
def fill_histogram(self, idf, columns):
"""Fill input histogram with column(s) of input dataframe.
:param idf: input data frame used for filling histogram
:param list columns: histogram column(s)
"""
name = ':'.join(columns)
if name not in self._hists:
# create an (empty) histogram of right type
self._hists[name] = self.construct_empty_hist(idf, columns)
hist = self._hists[name]
# do the actual filling
hist.fill.sparksql(idf)
# remove specific keys from histogram before merging, if so requested
hist.bins = self.drop_requested_keys(name, hist.bins)
self._hists[name] = hist
def construct_empty_hist(self, df, columns):
"""Create an (empty) histogram of right type.
Create a multi-dim histogram by iterating through the columns in
reverse order and passing a single-dim hist as input to the next
column.
:param df: input dataframe
:param list columns: histogram columns
:returns: created histogram
:rtype: histogrammar.Count
"""
hist = hg.Count()
# create a multi-dim histogram by iterating through the columns in reverse order
# and passing a single-dim hist as input to the next column
revcols = list(reversed(columns))
for idx,col in enumerate(revcols):
# histogram type depends on the data type
dt = np.dtype(self.var_dtype[col])
is_number = isinstance(dt.type(), np.number)
is_timestamp = isinstance(dt.type(), np.datetime64)
if is_number or is_timestamp:
# numbers and timestamps are put in a sparse binned histogram
specs = self.var_bin_specs(columns, columns.index(col))
hist = hg.SparselyBin(binWidth=specs['bin_width'], origin=specs['bin_offset'], quantity=df[col], value=hist)
else:
# string and boolians are treated as categories
hist = hg.Categorize(quantity=df[col], value=hist)
# decorators; adding them here doesn't seem to work!
#selected_cols = revcols[:idx+1]
#hist.datatype = [self.var_dtype[col] for col in reversed(selected_cols)]
# FIXME stick data types and number of dimension to histogram
dta = [self.var_dtype[col] for col in columns]
hist.datatype = dta[0] if len(columns) == 1 else dta
hist.n_dim = len(columns)
return hist
def assert_dataframe(self, df):
"""Check that input data is a filled Spark data frame.
:param df: input Spark data frame
"""
if not isinstance(df, pyspark.sql.dataframe.DataFrame):
raise TypeError('Retrieved object not of type Spark DataFrame.')
# assert df.count() > 0, 'input dataframe is empty'
def get_all_columns(self, data):
"""Retrieve all columns / keys from input data.
:param data: input data sample (pandas dataframe or dict)
:returns: list of columns
:rtype: list
"""
if not isinstance(data, pyspark.sql.dataframe.DataFrame):
raise TypeError('Retrieved object not of type Spark DataFrame.')
return sorted(data.columns)
def get_data_type(self, df, col):
"""Get data type of dataframe column.
:param df: input data frame
:param str col: column
"""
if col not in df.columns:
raise KeyError('Column "{0:s}" not in input dataframe.'.format(col))
dt = dict(df.dtypes)[col]
# spark conversions to numpy or python equivalent
if dt == 'string':
dt = 'str'
elif dt == 'timestamp':
dt = np.datetime64
elif dt == 'boolean':
dt = bool
return np.dtype(dt)
def process_columns(self, df):
"""Process columns before histogram filling.
Specifically, in this case convert timestamp columns to nanoseconds
:param df: input data frame
:returns: output data frame with converted timestamp columns
:rtype: DataFrame
"""
# make alias df for value counting (used below)
idf = df.alias('')
# timestamp variables are converted here to ns since 1970-1-1
# histogrammar does not yet support long integers, so convert timestamps to float
#epoch = (sparkcol("ts").cast("bigint") * 1000000000).cast("bigint")
for col in self.dt_cols:
self.logger.debug('Converting column "{col}" of type "{type}" to nanosec.',
col=col, type=self.var_dtype[col])
to_ns = (sparkcol(col).cast("float") * 1e9).cast("float")
idf = idf.withColumn(col, to_ns)
hg.sparksql.addMethods(idf)
return idf
def process_and_store(self):
"""Process and store spark-based histogram objects."""
# if quantity refers to a spark df, the histogram cannot be pickled,
# b/c we cannot pickle a spark df.
# HACK: patch the quantity pickle bug here before storage into the datastore
# Also patch: contentType and keys of sub-histograms
for name, hist in self._hists.items():
hgr_patch_histogram(hist)
hist.n_bins = get_n_bins(hist)
# put hists in datastore as normal
HistogramFillerBase.process_and_store(self)
def hgr_patch_histogram(hist):
"""Apply set of patches to histogrammer histogram.
:param hist: histogrammar histogram to patch up.
"""
hgr_reset_quantity(hist, new_quantity=unit_func)
hgr_fix_contentType(hist)
hgr_convert_bool_to_str(hist)
def unit_func(x):
"""Dummy quantity function for histogrammar objects
:param x: value
:returns: the same value
"""
return x
# name needed for hist.toJson()
unit_func.name = 'unit_func'
def hgr_reset_quantity(hist, new_quantity=unit_func):
"""Reset quantity attribute of histogrammar histogram.
If quantity refers to a Spark df the histogram cannot be pickled,
b/c we cannot pickle a Spark df.
Here we reset the quantity of a (filled) histogram to a neutral lambda function.
:param hist: histogrammar histogram to reset quantity of.
:param new_quantity: new quantity function to reset hist.quantity to. default is lambda x: x.
"""
# nothing left to reset?
if isinstance(hist, hg.Count):
return
# reset quantity
if hasattr(hist, 'quantity'):
hist.quantity = new_quantity
# 1. loop through bins
if hasattr(hist, 'bins'):
for h in hist.bins.values():
hgr_reset_quantity(h, new_quantity)
# 2. loop through values
elif hasattr(hist, 'values'):
for h in hist.values:
hgr_reset_quantity(h, new_quantity)
# 3. process attributes if present
if hasattr(hist, 'value'):
hgr_reset_quantity(hist.value, new_quantity)
if hasattr(hist, 'underflow'):
hgr_reset_quantity(hist.underflow, new_quantity)
if hasattr(hist, 'overflow'):
hgr_reset_quantity(hist.overflow, new_quantity)
if hasattr(hist, 'nanflow'):
hgr_reset_quantity(hist.nanflow, new_quantity)
|
StarcoderdataPython
|
6417557
|
#!/usr/bin/env python
#
# Example pipeline to run Fastqc on one or more Fastq files
# but ignoring any with zero reads
import os
import argparse
from bcftbx.FASTQFile import nreads
from auto_process_ngs.pipeliner import PipelineTask
from auto_process_ngs.pipeliner import PipelineFunctionTask
from auto_process_ngs.pipeliner import PipelineCommandWrapper
from auto_process_ngs.pipeliner import Pipeline
class RunFastqc(PipelineTask):
# Run Fastqc on multiple files
def init(self,fastqs,out_dir):
# Inputs:
# - fastqs: list of input Fastq files
# - out_dir: where to put the Fastqc outputs
# Outputs:
# - files: list of output Fastqc HTML files
self.add_output('files',list())
def setup(self):
if not os.path.exists(self.args.out_dir):
os.mkdir(self.args.out_dir)
for fq in self.args.fastqs:
self.add_cmd(
PipelineCommandWrapper("Run FastQC",
"fastqc",
"-o",self.args.out_dir,
fq))
def finish(self):
for fq in self.args.fastqs:
if fq.endswith(".gz"):
fq = os.path.splitext(fq)[0]
out_file = os.path.join(
self.args.out_dir,
os.path.splitext(
os.path.basename(fq))[0]+"_fastqc.html")
if not os.path.exists(out_file):
self.fail(message="Missing output file: %s" % out_file)
else:
self.output.files.append(out_file)
class FilterEmptyFastqs(PipelineFunctionTask):
# Filter Fastq files based on read count
def init(self,fastqs):
self.add_output('fastqs',list())
def setup(self):
for fq in self.args.fastqs:
self.add_call("Filter out empty fastqs",
self.filter_empty_fastqs,fq)
def filter_empty_fastqs(self,*fastqs):
filtered_fastqs = list()
for fq in fastqs:
if nreads(fq) > 0:
print("%s" % fq)
filtered_fastqs.append(fq)
return filtered_fastqs
def finish(self):
for result in self.result():
for fq in result:
self.output.fastqs.append(fq)
if __name__ == "__main__":
# Command line
p = argparse.ArgumentParser()
p.add_argument("fastqs",nargs='+',metavar="FASTQ")
args = p.parse_args()
# Make and run a pipeline
ppl = Pipeline()
filter_empty_fastqs = FilterEmptyFastqs("Filter empty Fastqs",
args.fastqs)
run_fastqc = RunFastqc("Run Fastqc",
filter_empty_fastqs.output.fastqs,
os.getcwd())
ppl.add_task(filter_empty_fastqs)
ppl.add_task(run_fastqc,requires=(filter_empty_fastqs,))
ppl.run()
print(run_fastqc.output())
|
StarcoderdataPython
|
188981
|
<gh_stars>0
import os
import transform as trans
import numpy as np
from glob import glob
import argparse
def transformObj(inputFile, R, T, outSuffix='-aligned'):
parDir, filename = os.path.split(inputFile)
name, ext = os.path.splitext(filename)
outFile = os.path.join(parDir, name + outSuffix + ext)
lines = []
with open(inputFile) as fInput:
lines = fInput.readlines()
print('transform v and vn...\n')
for i in range(len(lines)):
line = lines[i]
if len(line) > 0 and line[0:2] == 'v ':
line_parts = line.split(' ')
vert = np.array([float(line_parts[1]), float(line_parts[2]), float(line_parts[3])])
outVert = R.dot(vert) + T
outLine = 'v %f %f %f\n' % (outVert[0], outVert[1], outVert[2])
lines[i] = outLine
elif len(line) > 0 and line[0:2] == 'vn':
line_parts = line.split(' ')
norm = np.array([float(line_parts[1]), float(line_parts[2]), float(line_parts[3])])
outNorm = R.dot(norm)
outLine = 'vn %f %f %f\n' % (outNorm[0], outNorm[1], outNorm[2])
lines[i] = outLine
else:
continue
print('write back to file...\n')
with open(outFile, 'w') as fOutput:
for line in lines:
fOutput.write('%s' % line)
print('done.\n')
def trasformPlyVC(inputFile, R, T, outSuffix='-aligned', outputFile=None):
parDir, filename = os.path.split(inputFile)
name, ext = os.path.splitext(filename)
outFile = os.path.join(parDir, name + outSuffix + ext)
if not(outputFile==None): outFile = outputFile
lines = []
with open(inputFile) as fInput:
lines = fInput.readlines()
print('transform v\n')
# find numVerts and idxEndHeader
numVerts = 0
idxEndHeader = 0
for i in range(len(lines)):
line = lines[i].strip(' ')
if len(line) >= 14 and line[0:14] == 'element vertex':
line_parts = line.split(' ')
numVerts = int(line_parts[2])
elif len(line) >= 10 and line[0:10] == 'end_header':
idxEndHeader = i
break
else:
continue
for i in range(numVerts):
idxInLines = idxEndHeader + 1 + i
line = lines[idxInLines].strip(' \n')
line_parts = line.split(' ')
vert = np.array([float(line_parts[0]), float(line_parts[1]), float(line_parts[2])])
outVert = R.dot(vert) + T
if len(line_parts)==6:
color = np.array([int(line_parts[3]), int(line_parts[4]), int(line_parts[5])])
outLine = '%f %f %f %u %u %u\n' % (outVert[0], outVert[1], outVert[2],
color[0], color[1], color[2])
elif len(line_parts)==7:
color = np.array([int(line_parts[3]), int(line_parts[4]), int(line_parts[5]), int(line_parts[6])])
outLine = '%f %f %f %u %u %u %u\n' % (outVert[0], outVert[1], outVert[2],
color[0], color[1], color[2], color[3])
else:
print('wrong ply file.\n')
return
lines[idxInLines] = outLine
print('write back to file...\n')
with open(outFile, 'w') as fOutput:
for line in lines:
fOutput.write('%s' % line)
print('done.\n')
def parseArgs():
parser = argparse.ArgumentParser()
parser.add_argument('--plyFile', type=str, required=True, help="暂时只支持ascii格式的ply文件,且无法向信息")
parser.add_argument('--rtFile', type=str, required=True, help="txt文件,第一行是行主序的旋转矩阵,第二行是平移向量,数字间以空格隔开")
parser.add_argument('--outSuffix', default='_aligned')
parser.add_argument('--outputFile', default=None, type=str)
args = parser.parse_args()
return args
def main():
args = parseArgs()
R, T = trans.readRT(args.rtFile)
trasformPlyVC(args.plyFile, R, T, outSuffix=args.outSuffix, outputFile=args.outputFile)
if __name__=='__main__':
main()
|
StarcoderdataPython
|
1896456
|
<gh_stars>0
from dotenv import load_dotenv, find_dotenv
from libs.google_auth_utils import get_credentials
load_dotenv(find_dotenv())
def main():
# Set the access scopes (Docs: https://developers.google.com/identity/protocols/oauth2/scopes)
SCOPES = ['https://www.googleapis.com/auth/spreadsheets.readonly']
# Call anytime a function needs to access a user's Google resource
creds = get_credentials(SCOPES)
#print(vars(creds))
print(f'Scopes:\n{creds.scopes}\n')
print(f'Token expiration:\n{creds.expiry}')
return
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
9771137
|
<filename>dijkstra.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import math
def dijkstra(weights):
num_nodes = len(weights)
# 重みの合計の暫定値と確定値
sum_tmp = [math.inf for _ in range(num_nodes)]
sum_fixed = [0 for _ in range(num_nodes)]
# 仮のルートと確定のルート
route_tmp = [[] for _ in range(num_nodes)]
route_fixed = [[] for _ in range(num_nodes)]
# 経路のスタートは1
route_tmp[0] = [1]
route_fixed[0] = [1]
min_idx = 0
# ゴールの重みの合計の確定値が出るまで繰り返す
while not sum_fixed[-1]:
for i in range(num_nodes):
# 経路が存在し、重みの合計の暫定の値より小さければ更新
if weights[min_idx][i] != 0 and sum_fixed[min_idx] + weights[min_idx][i] < sum_tmp[i]:
sum_tmp[i] = sum_fixed[min_idx] + weights[min_idx][i]
route_tmp[i] = route_fixed[min_idx] + [i+1]
# 暫定の重みの合計が最小のノードの重みの合計、ルートを確定にする。
min_idx = sum_tmp.index(min(sum_tmp))
sum_fixed[min_idx] = sum_tmp[min_idx]
route_fixed[min_idx] = route_tmp[min_idx]
# 確定にしたノードの重みの合計の暫定値を inf に戻す
sum_tmp[min_idx] = math.inf
return route_fixed[-1], sum_fixed[-1]
def main():
weight_matrix = [[0,50,80,0,0],[0,0,20,15,0],[0,0,0,10,15],[0,0,0,0,30],[0,0,0,0,0]]
route, weight_sum = dijkstra(weight_matrix)
print("Route : ", route)
print("Sum Weights : ", weight_sum)
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
6615242
|
<filename>websubsub/views.py
import json
import logging
from collections import defaultdict
from datetime import timedelta
from django.conf import settings
from django.http import HttpResponse
from django.utils.decorators import classonlymethod
from django.utils.timezone import now
from rest_framework.views import APIView # TODO: can we live without drf dependency?
from rest_framework.response import Response
from rest_framework.status import HTTP_400_BAD_REQUEST
from .models import Subscription
from . import tasks
logger = logging.getLogger('websubsub.views')
class WssView(APIView):
"""
Generic websub callback processing.
Usage:
Create a celery task that will accept incoming data, then in your urls.py:
>>> from websubsub.views import WssView
>>> from .tasks import news_task, reports_task
>>>
>>> urlpatterns = [
>>> path('/websubcallback/news/<uuid:id>', WssView.as_view(news_task), name='webnews')
>>> path('/websubcallback/reports/<uuid:id>', WssView.as_view(reports_task), name='webreports')
>>> ]
"""
handler_task = None
@classonlymethod
def as_view(cls, handler_task, **kwargs):
kwargs['handler_task'] = handler_task
return super().as_view(**kwargs)
def get(self, request, *args, **kwargs):
"""
Hub sends GET request to callback url to verify subscription/unsubscription or
to inform about subscription denial.
"""
if 'hub.topic' not in request.GET:
logger.error(f'{request.path}: GET request is missing hub.topic')
return Response('Missing hub.topic', status=HTTP_400_BAD_REQUEST)
mode = request.GET.get('hub.mode', None)
if mode not in ['subscribe', 'unsubscribe', 'denied']:
logger.error(f'{request.path}: GET request received unknown hub.mode "{mode}"')
return Response('Missing or unknown hub.mode', status=HTTP_400_BAD_REQUEST)
id = args[0] if args else list(kwargs.values())[0]
try:
ssn = Subscription.objects.get(id=id)
except Subscription.DoesNotExist:
logger.error(
f'Received unwanted subscription {id} "{mode}" request with'
f' topic {request.GET["hub.topic"]} !'
)
return Response('Unwanted subscription', status=HTTP_400_BAD_REQUEST)
if mode == 'subscribe':
return self.on_subscribe(request, ssn)
elif mode == 'unsubscribe':
return self.on_unsubscribe(request, ssn)
elif mode == 'denied':
return self.on_denied(request, ssn)
def on_subscribe(self, request, ssn):
"""
The subscriber MUST confirm that the hub.topic corresponds to a pending
subscription or unsubscription that it wishes to carry out. If so, the
subscriber MUST respond with an HTTP success (2xx) code with a response
body equal to the hub.challenge parameter. If the subscriber does not
agree with the action, the subscriber MUST respond with a 404 "Not Found"
response.
Hubs MAY make the hub.lease_seconds equal to the value the subscriber
passed in their subscription request but MAY change the value depending
on the hub's policies. To sustain a subscription, the subscriber MUST
re-request the subscription on the hub before hub.lease_seconds seconds
has elapsed.
Hubs MUST enforce lease expirations, and MUST NOT issue perpetual lease
durations.
"""
if 'hub.challenge' not in request.GET:
logger.error(f'Missing hub.challenge in subscription verification {ssn.pk}!')
tasks.save.delay(
pk = ssn.pk,
subscribe_status = 'verifyerror',
verifyerror_count = ssn.verifyerror_count + 1
)
return Response('Missing hub.challenge', status=HTTP_400_BAD_REQUEST)
if not request.GET.get('hub.lease_seconds', '').isdigit():
logger.error(f'Missing integer hub.lease_seconds in subscription verification {ssn.pk}!')
tasks.save.delay(
pk = ssn.pk,
subscribe_status = 'verifyerror',
verifyerror_count = ssn.verifyerror_count + 1
)
return Response('hub.lease_seconds required and must be integer', status=HTTP_400_BAD_REQUEST)
if ssn.unsubscribe_status is not None:
logger.error(f'Subscription {ssn.pk} received subscription verification request,'
f' but its was explicitly unsubscribed before.')
return Response('Unsubscribed')
tasks.save.delay(
pk = ssn.pk,
subscribe_status = 'verified',
lease_expiration_time = now() + timedelta(seconds=int(request.GET['hub.lease_seconds'])),
connerror_count = 0,
huberror_count = 0,
verifyerror_count = 0,
verifytimeout_count = 0
)
logger.info(f'Got {ssn.pk} subscribe confirmation from hub.')
return HttpResponse(request.GET['hub.challenge'])
def on_unsubscribe(self, request, ssn):
if 'hub.challenge' not in request.GET:
logger.error(f'Missing hub.challenge in unsubscription verification {ssn.pk}!')
tasks.save.delay(
pk = ssn.pk,
unsubscribe_status = 'verifyerror',
verifyerror_count = ssn.verifyerror_count + 1
)
return Response('Missing hub.challenge', status=HTTP_400_BAD_REQUEST)
tasks.save.delay(
pk = ssn.pk,
unsubscribe_status = 'verified',
#lease_expiration_time = None, # TODO: should we reset it?
connerror_count = 0,
huberror_count = 0,
verifyerror_count = 0,
verifytimeout_count = 0
)
logger.info(f'Got {ssn.pk} unsubscribe confirmation from hub.')
return HttpResponse(request.GET['hub.challenge'])
def on_denied(self, request, ssn):
"""
TODO
If (and when), the subscription is denied, the hub MUST inform the subscriber by
sending an HTTP GET request to the subscriber's callback URL as given in the
subscription request. This request has the following query string arguments appended:
hub.mode - REQUIRED. The literal string "denied".
hub.topic -REQUIRED. The topic URL given in the corresponding subscription request.
hub.reason -OPTIONAL. The hub may include a reason for which the subscription has been denied.
Hubs may provide an additional HTTP Location header to indicate that the subscriber may
retry subscribing to a different hub.topic. This allows for limited distribution to
specific groups or users in the context of social web applications.
The subscription MAY be denied by the hub at any point (even if it was previously accepted).
The Subscriber SHOULD then consider that the subscription is not possible anymore.
"""
if not ssn:
logger.error(f'Received denial on unwanted subscription with '
f'topic {request.GET["hub.topic"]}!')
return Response('Unwanted subscription')
logger.error(f'Hub denied subscription {ssn.pk}!')
tasks.save.delay(pk=ssn.pk, subscribe_status='denied')
return Response('')
def post(self, request, *args, **kwargs):
"""
The subscriber's callback URL MUST return an HTTP 2xx response code to
indicate a success. The subscriber's callback URL MAY return an HTTP 410
code to indicate that the subscription has been deleted, and the hub MAY
terminate the subscription if it receives that code as a response. The hub
MUST consider all other subscriber response codes as failures
Subscribers SHOULD respond to notifications as quickly as possible; their
success response code SHOULD only indicate receipt of the message, not
acknowledgment that it was successfully processed by the subscriber.
"""
id = args[0] if args else list(kwargs.values())[0]
try:
ssn = Subscription.objects.get(id=id)
except Subscription.DoesNotExist:
logger.error(
f'Received unwanted subscription {id} POST request! Sending status '
'410 back to hub.'
)
return Response('Unwanted subscription', status=410)
ssn.update(time_last_event_received=now())
self.handler_task.delay(request.data)
return Response('') # TODO
|
StarcoderdataPython
|
9628712
|
import math
r1 = 2.49/2
r2 = 2.15/2
r3 = 0.32/2
r4 = 0.263/2
r5 = 0.077/2
r6 = 0.691/2
chang1 = 0.926821
kuan1 = 0.540230
chang2 = 0.507058
kuan2 = 0.224489
chang3 = 0.829926
kuan3 = 0.106065
chang4 = 0.587294
kuan4 = 0.602596
chang_daa = 0.476477
kuan_daa = 1.015017
chang_da = 1.015017
kuan_da = 0.182756
height1 = 0.413597
height2 = 0.790418
num_floors = 12
num_windows = 730
def yuan(r):
return r*r*math.pi
yuanhuan = yuan(r1)/2 - yuan(r2)/2
dayuan = yuan(r3)/2
zhongyuan = yuan(r4)/2
xiaoyuan = yuan(r5)/2
banyuan = yuan(r6)/2
max_width = 3.654645
max_length = 2.588481
window_width = 0.051671
window_height = 0.03725
factor = 1.91846/window_height
ju1 = chang1 * kuan1
kon1 = chang2 * kuan2
ju2 = chang3 * kuan3
ju3 = chang4 * kuan4
da_lou = chang_daa * kuan_daa
da_ju = chang_da * kuan_da
lou1 = ((ju1 + yuanhuan - kon1 + ju2 + ju3 + zhongyuan*3 + xiaoyuan *
2 + dayuan*2)*2 + banyuan + da_ju + da_lou) * height1
lou2 = da_lou * height2
volume = lou1 + lou2
real_volume = volume * factor * factor * factor
print("num_floors:", num_floors)
print("num_windows:", num_windows)
print("max_width:", max_width * factor)
print("max_length:", max_length * factor)
print("max_height:", (height1 + height2) * factor)
print("volume:", real_volume)
print("window height:", window_height * factor)
print("window width:", window_width * factor)
print("window square:", num_windows * window_height * window_width * factor * factor)
|
StarcoderdataPython
|
1627099
|
<gh_stars>0
from flask import render_template, request, redirect, url_for, abort, flash, session, g, send_from_directory
from flask.globals import session as session_obj
from flask_login import login_user, login_required, logout_user, current_user
from sqlalchemy.orm import exc
from sqlalchemy import and_
import json, os, time, smtplib, bcrypt, hashlib
from collections import OrderedDict
from app import *
from random import choice
from string import ascii_uppercase
from logger import logger
from db.samplev2 import generate_sample_db
from werkzeug.utils import secure_filename
from grader.grader import get_predicted_grade, genGrade
# General Function
def process_student_data(db_session, course_id, student_id):
course = db_session.query(Course). \
filter_by(id=course_id).one()
scores = db_session.query(Score). \
filter(Score.student_id == student_id). \
filter(Score.course_id == course_id). \
filter(Score.course_id == MaxScore.course_id). \
filter(Score.name == MaxScore.name). \
order_by(MaxScore.priority.asc()).all()
max_scores = db_session.query(MaxScore). \
filter_by(course_id=course_id). \
order_by(MaxScore.priority.asc()).all()
student = db_session.query(Student). \
filter_by(id=student_id).one()
course_total = db_session.query(MaxScore.maxscore).filter_by(course_id=course_id,
name='Total').one()[0]
average_query_unsorted = db_session.query(Score.name, func.avg(Score.score).label('Sums')). \
filter_by(course_id=course_id). \
group_by(Score.name). \
subquery()
average_query_sorted = db_session.query(average_query_unsorted.c.name,
average_query_unsorted.c.Sums.label('average')). \
filter(average_query_unsorted.c.name == MaxScore.name). \
filter(MaxScore.course_id == course_id). \
order_by(MaxScore.priority.asc()). \
all()
course_averages = OrderedDict(average_query_sorted)
# Course Average Pre processing
for key in course_averages:
course_averages[key] = round(course_averages[key], 2)
# Get Mid Term Average and Final Average
try:
course_final_average = course_averages['Total']
except KeyError:
course_final_average = 'Average Pending'
try:
course_mid_term_average = course_averages['Mid Term Total']
except KeyError:
course_mid_term_average = 'Average Pending'
logger(course_mid_term_average=course_mid_term_average,
course_final_average=course_final_average)
# Scores with Total in their score name are stripped
scores_actual_json = json.dumps(
[scores[i].score for i in range(len(scores)) if 'tal' not in str(scores[i].name).lower()])
scores_percentages = json.dumps(
[round(float(scores[i].score) * 100 / float(max_scores[i].maxscore), 2) for i in range(len(scores))
if 'tal' not in str(scores[i].name).lower()])
scores_names = json.dumps([i.name for i in scores if 'tal' not in str(i.name).lower()])
scores_distribution_percentages = json.dumps([i.maxscore for i in max_scores if 'tal' not in str(i.name).lower()])
course_averages_for_plot = json.dumps(course_averages)
return scores, \
course, \
scores_distribution_percentages, \
course_total, \
student, \
scores_names, \
scores_percentages, \
scores_actual_json, \
course_averages, \
course_averages_for_plot, \
course_mid_term_average, \
course_final_average
def get_grading(db_session, course_id):
maxscore_by_subject = db_session.query(MaxScore).filter_by(course_id=course_id).all()
score_names = [i.name for i in maxscore_by_subject]
name = score_names[-1]
maximum_scores_unsorted = db_session.query(Score.name, func.max(Score.score).label('Maxs')). \
filter_by(course_id=course_id). \
group_by(Score.name). \
subquery()
maximum_scores_sorted = db_session.query(maximum_scores_unsorted.c.name,
maximum_scores_unsorted.c.Maxs.label('Maximum')). \
filter(maximum_scores_unsorted.c.name == MaxScore.name). \
filter(MaxScore.course_id == course_id). \
order_by(MaxScore.priority.asc()). \
all()
minimum_scores_unsorted = db_session.query(Score.name, func.min(Score.score).label('Mins')). \
filter_by(course_id=course_id). \
group_by(Score.name). \
subquery()
minimum_scores_sorted = db_session.query(minimum_scores_unsorted.c.name,
minimum_scores_unsorted.c.Mins.label('Minimum')). \
filter(minimum_scores_unsorted.c.name == MaxScore.name). \
filter(MaxScore.course_id == course_id). \
order_by(MaxScore.priority.asc()). \
all()
course_total = db_session.query(MaxScore.maxscore).filter_by(course_id=course_id,
name=name).one()[0]
average_query_unsorted = db_session.query(Score.name, func.avg(Score.score).label('Sums')). \
filter_by(course_id=course_id). \
group_by(Score.name). \
subquery()
average_query_sorted = db_session.query(average_query_unsorted.c.name,
average_query_unsorted.c.Sums.label('average')). \
filter(average_query_unsorted.c.name == MaxScore.name). \
filter(MaxScore.course_id == course_id). \
order_by(MaxScore.priority.asc()). \
all()
course_averages = OrderedDict(average_query_sorted)
course_maximums = OrderedDict(maximum_scores_sorted)
course_minimums = OrderedDict(minimum_scores_sorted)
# Course Maximum Preprocessing
for key in course_maximums:
course_maximums[key] = round(course_maximums[key], 2)
try:
course_final_maximum = float(course_maximums[name])
except KeyError:
course_final_maximum = 'Maximum Pending'
# Course Minimum Preprocessing
for key in course_minimums:
course_minimums[key] = round(course_minimums[key], 2)
try:
course_final_minimum = float(course_minimums[name])
except KeyError:
course_final_minimum = 'Minimum Pending'
# Course Average Pre processing
for key in course_averages:
course_averages[key] = round(course_averages[key], 2)
# Get Mid Term Average and Final Average
try:
course_final_average = course_averages[name]
except KeyError:
course_final_average = 'Average Pending'
course_maximum, a, a_minus, b, b_minus, c, c_minus = get_predicted_grade(course_final_average, course_final_minimum,
course_final_maximum)
return course_maximum, a, a_minus, b, b_minus, c, c_minus
def get_last_column(db_session, course_id):
maxscore_by_subject = db_session.query(MaxScore).filter_by(course_id=course_id).all()
score_names = [i.name for i in maxscore_by_subject]
name = score_names[-1]
course_total = db_session.query(MaxScore.maxscore).filter_by(course_id=course_id,
name=name).one()[0]
return name, course_total
"""
Views Start Here
"""
@app.before_request
def make_session_permanent():
"""
Session timeout is defined as 15 minutes and timeout is after inactivity
"""
session.permanent = True
app.permanent_session_lifetime = datetime.timedelta(minutes=15)
session.modified = True
g.user = current_user
def login_prepocess(db_session, user_credentials):
"""
This pre processor is used to identify if a user exists in the faculty or the student table
:param db_session: Database session for the db
:param user_credentials: User credentials of the user logging in
"""
session_obj['userid'] = user_credentials.id.encode('utf-8')
session_obj['isAdmin'] = db_session.query(AuthStore.isAdmin).filter_by(id=session_obj['userid']).one()[0]
try:
session_obj['isSuper'] = db_session.query(Admin.isSuper).filter_by(id=session_obj['userid']).one()[0]
except exc.NoResultFound:
session_obj['isSuper'] = False
# Check is user is faculty or student
isStudent = False
isFaculty = False
try:
db_session.query(Student).filter_by(id=session_obj['userid']).one()
isStudent = True
except exc.NoResultFound:
pass
try:
db_session.query(Faculty).filter_by(id=session_obj['userid']).one()
isFaculty = True
except exc.NoResultFound:
pass
session_obj['isStudent'] = isStudent
session_obj['isFaculty'] = isFaculty
@app.route('/', methods=['GET', 'POST'])
def getHomePage():
if request.method == 'GET':
return render_template('homepage.html')
elif request.method == 'POST':
db_session = DBSession()
''' Password can either be an existing password or a token to be used for password resets '''
userid = request.form['bits-id'].encode('utf-8')
password = request.form['password'].encode('<PASSWORD>')
logger(userid=userid, password=password)
try:
user_credentials = db_session.query(AuthStore).filter_by(id=userid).one()
user_credential_salt = user_credentials.salt.encode('utf-8')
user_credential_phash = user_credentials.phash.encode('utf-8')
user_credential_token_hash = user_credentials.tokenHash
logger(Password=password,
Salt=user_credential_salt,
Phash_DB=user_credential_phash,
Phash_gen=bcrypt.hashpw(password, user_credential_salt))
if bcrypt.hashpw(password, user_credential_phash) == user_credential_phash:
login_user(user_credentials)
login_prepocess(db_session,
user_credentials)
return redirect(url_for('getCourses'))
elif user_credential_token_hash is not None:
sha256object = hashlib.sha256(password)
tokenHash_obtained = sha256object.hexdigest()
token_existence_time = str(db_session.query(func.now()).scalar() - user_credentials.tokenTimeStamp)
logger(tokenHash_actual=user_credential_token_hash,
tokenHash_obtain=tokenHash_obtained,
token_existence_time=token_existence_time)
# Handle token expiry and validity
if tokenHash_obtained == user_credential_token_hash:
if TOKEN_LIFETIME > token_existence_time:
login_user(user_credentials)
login_prepocess(db_session,
user_credentials)
return redirect(url_for('getDashboard'))
else:
error = "Token Expired! Get new token"
return render_template('homepage.html',
error=error)
else:
error = "Wrong username or Password!"
return render_template('homepage.html', error=error)
except exc.NoResultFound:
error = "No such user exists!"
return render_template('homepage.html', error=error)
finally:
db_session.close()
@login_required
@app.route('/logout')
def logout():
logout_user()
return redirect(url_for('getHomePage'))
@celery.task
def upload_async(upload_file_filename_secure):
"""
Celery task to upload files and populate database asynchronously
:param upload_file_filename_secure: Secure file name to load filename
:return:
"""
with app.app_context():
print "Started"
db_session = DBSession()
path = os.path.join(app.config['UPLOAD_FOLDER'], upload_file_filename_secure)
course_id, course_name_unformatted, semester, year = upload_file_filename_secure.split('_')
course_name = " ".join(course_name_unformatted.split('.'))
logger(year=year)
year, _ = year.split('.')
logger(year=year)
print "The path is: " + path
while not os.path.exists(path):
print "Waiting for file to be visible"
time.sleep(1)
if os.path.isfile(path):
print "Now the file is available"
generate_sample_db(path,
course_id,
course_name,
semester,
year,
db_session)
else:
raise ValueError("%s isn't a file!" % path)
db_session.close()
print "Finished"
@login_required
@app.route('/upload', methods=['GET', 'POST'])
def upload():
if request.method == 'GET':
if session_obj['isAdmin']:
return render_template('upload.html')
else:
return redirect(url_for('getHomePage'))
elif request.method == 'POST':
if session_obj['isAdmin']:
if 'file' not in request.files:
logger(request=request)
print "No file was sent"
upload_file = request.files['file']
if upload_file.filename == '':
error = "File wasn't selected!"
print "File wasn't selected"
return render_template('upload.html', error=error)
elif upload_file and allowed_file(upload_file.filename):
upload_file_filename_secure = secure_filename(upload_file.filename)
upload_file.save(os.path.join(app.config['UPLOAD_FOLDER'], upload_file_filename_secure))
print "Uploaded file successfully"
flash('Upload Successfull!')
upload_async.delay(upload_file_filename_secure)
return redirect(url_for('upload'))
error = "Incorrect file format was chosen!"
return render_template('upload.html', error=error)
else:
print request.path
abort(400)
def download(filename):
uploads = os.path.join(app.config['UPLOAD_FOLDER'], filename)
print uploads
return send_from_directory(directory=uploads, filename=filename)
@app.route('/grade/<string:course_id>', methods=['GET', 'POST'])
@login_required
def grade(course_id):
if request.method == 'GET':
if session_obj['isAdmin']:
db_session = DBSession()
a_max, a, a_minus, b, b_minus, c, c_minus = get_grading(db_session, course_id=course_id)
course_data = db_session.query(Course).filter_by(id=course_id).one()
course_name = course_data.name
course_name = '.'.join(course_name.split(' '))
filename = course_id + '_' + course_name + '_' + course_data.semester + '_' + course_data.year + '.csv'
logger(filename=filename)
db_session.close()
return render_template('grade.html', course_id=course_id, a_max=a_max, a_min=a, a_minus_max=a,
a_minus_min=a_minus, b_max=a_minus, b_min=b, b_minus_max=b, b_minus_min=b_minus,
c_max=b_minus, c_min=c, c_minus_max=c, c_minus_min=c_minus, filename=filename)
else:
return redirect(url_for('getHomePage'))
elif request.method == 'POST':
print request.form
if session_obj['isAdmin']:
a_max = float(request.form['A_max'])
a_min = float(request.form['A_min'])
a_minus_max = float(request.form['A-_max'])
a_minus_min = float(request.form['A-_min'])
b_max = float(request.form['B_max'])
b_min = float(request.form['B_min'])
b_minus_max = float(request.form['B-_max'])
b_minus_min = float(request.form['B-_min'])
c_max = float(request.form['C_max'])
c_min = float(request.form['C_min'])
c_minus_max = float(request.form['C-_max'])
c_minus_min = float(request.form['C-_min'])
db_session = DBSession()
course_data = db_session.query(Course).filter_by(id=course_id).one()
course_name = course_data.name
course_name = '.'.join(course_name.split(' '))
filename = course_id + '_' + course_name + '_' + course_data.semester + '_' + course_data.year + '.csv'
logger(filename=filename)
name, total = get_last_column(db_session, course_id)
column = name + '-' + str(total)
genGrade(filename=filename, column=column, a_min=a_min, a_minus_min=a_minus_min, b_min=b_min,
b_minus_min=b_minus_min, c_min=c_min, db_session=db_session)
db_session.close()
uploads = os.path.join(app.config['UPLOAD_FOLDER'])
print uploads
return send_from_directory(directory=uploads, filename=filename, as_attachment=True)
else:
print request.path
abort(400)
@app.route('/courses')
@login_required
def getCourses():
db_session = DBSession()
try:
# If session is not created by an admin user then load student courses else load all courses
if not session_obj['isAdmin']:
course_ids = db_session.query(Score.course_id).filter_by(student_id=session_obj['user_id']).distinct()
courses = db_session.query(Course).filter(Course.id.in_(course_ids)).all()
return render_template('courses.html',
courses=courses,
user_id=session_obj['userid'],
admin=False, super=False)
elif session_obj['isSuper']:
courses = db_session.query(Course).all()
return render_template('courses.html',
courses=courses,
user_id=session_obj['userid'],
admin=True,
super=True)
elif session_obj['isAdmin']:
courses = db_session.query(Course).all()
return render_template('courses.html',
courses=courses,
user_id=session_obj['userid'],
admin=True,
super=False)
except exc.NoResultFound:
return render_template('courses.html')
finally:
db_session.close()
# Need this for admin only
@app.route('/courses/<string:course_id>')
@login_required
def getStudentsByCourse(course_id):
if session_obj['isAdmin']:
db_session = DBSession()
students = db_session.query(Student).filter(
and_(Student.id == Score.student_id, Score.course_id == Course.id, Course.id == course_id)).all()
db_session.close()
return render_template('students.html',
students=students,
course_id=course_id)
else:
# If not a admin raise a 404 Not Found
abort(404)
@app.route('/courses/<string:course_id>/<string:student_id>')
@login_required
def getScoresByStudent(course_id, student_id):
db_session = DBSession()
scores, \
course, \
scores_distribution_percentages, \
course_total, \
student, \
scores_names, \
scores_percentages, \
scores_actual_json, \
course_averages, \
course_averages_for_plot, \
course_mid_term_average, course_final_average = process_student_data(db_session, course_id, student_id)
db_session.close()
return render_template('studentScore.html',
scores=scores,
course=course,
max_scores=scores_distribution_percentages,
course_total=course_total,
student=student,
x_=scores_names,
y_percentages=scores_percentages,
y_actual=scores_actual_json,
course_averages=course_averages,
course_averages_for_plot=course_averages_for_plot,
course_mid_term_average=course_mid_term_average,
course_final_average=course_final_average,
isAdmin=session_obj['isAdmin'])
@app.route('/predictions')
@login_required
def getPredictions():
return "<h1>Predictions</h1>"
@app.route('/admins')
@login_required
def getAllAdmins():
if request.method == 'GET':
if session_obj['isSuper']:
db_session = DBSession()
admins = db_session.query(Admin).all()
users_non_admin = db_session.query(AuthStore.id).filter_by(isAdmin=False)
students_non_admin = db_session.query(Student).filter(Student.id.in_(users_non_admin)).all()
faculty_non_admin = db_session.query(Faculty).filter(Faculty.id.in_(users_non_admin)).all()
db_session.close()
return render_template('admins.html',
admins=admins,
students=students_non_admin,
faculty=faculty_non_admin)
else:
abort(404)
@app.route('/admins/grant/<string:admin_id>', methods=['GET', 'POST'])
@login_required
def grantAdminPermissions(admin_id):
logger(method='Grant',
admin_id=admin_id)
if admin_id is not session_obj['userid']:
db_session = DBSession()
admin_credentials_status = db_session.query(AuthStore).filter_by(id=admin_id).first()
admin_credentials_status.isAdmin = True
new_admin = None
is_student = False
try:
new_admin = db_session.query(Student).filter_by(id=admin_id).one()
is_student = True
print "Success!"
except exc.NoResultFound:
print "Doesn't belong to Student table!"
if not is_student:
try:
new_admin = db_session.query(Faculty).filter_by(id=admin_id).one()
print "Success"
except exc.NoResultFound:
print "Doesn't belong to Faculty table!"
if new_admin:
db_session.add(Admin(id=new_admin.id,
name=new_admin.name,
gender=new_admin.gender,
isSuper=False))
db_session.commit()
db_session.close()
return redirect(url_for('getAllAdmins'))
else:
flash("Invalid Permission upgrade request!")
return redirect(url_for('getAllAdmins'))
@app.route('/admins/revoke/<string:admin_id>', methods=['GET', 'POST'])
@login_required
def revokeAdminPermissions(admin_id):
logger(method='Revoke',
admin_id=admin_id)
if admin_id is not session_obj['userid']:
db_session = DBSession()
admin_credentials_status = db_session.query(AuthStore).filter_by(id=admin_id).one()
admin_details = db_session.query(Admin).filter_by(id=admin_id).one()
if admin_details.isSuper is False:
admin_credentials_status.isAdmin = False
# Remove Admin from system only if not a superuser
db_session.delete(admin_details)
db_session.commit()
else:
flash("Removal of Superuser is disallowed!")
db_session.close()
return redirect(url_for('getAllAdmins'))
db_session.close()
return redirect(url_for('getAllAdmins'))
else:
flash("Can't revoke permissions of user in session!")
return redirect(url_for('getAllAdmins'))
@login_required
@app.route('/dashboard', methods=['GET', 'POST'])
def getDashboard():
if request.method == 'GET':
db_session = DBSession()
user = None
if session_obj['isStudent']:
user = db_session.query(Student).filter_by(id=session_obj['userid']).one()
elif session_obj['isFaculty']:
user = db_session.query(Faculty).filter_by(id=session_obj['userid']).one()
db_session.close()
if user:
return render_template('dashboard.html',
user=user)
else:
abort(404)
else:
# Process new password and change user password
new_password = request.form['password'].encode('utf-8')
if len(new_password) >= 6:
db_session = DBSession()
user_salt_new = bcrypt.gensalt()
user_phash_new = bcrypt.hashpw(new_password, user_salt_new)
user_credentials = db_session.query(AuthStore).filter_by(id=session_obj['userid']).one()
user_credentials.salt = user_salt_new
user_credentials.phash = user_phash_new
user_credentials.tokenHash = None
db_session.commit()
db_session.close()
flash("Password Successfully Changed!")
else:
flash("Password must have more than 6 characters")
return redirect(url_for('getDashboard'))
@app.route('/forgotpassword', methods=['GET', 'POST'])
def forgotPassword():
if request.method == 'GET':
return render_template('forgotpassword.html')
elif request.method == 'POST':
user_id = request.form['user-id'].encode('utf-8')
''' Store newly SHA256 hash of the generated token in the column token hash '''
db_session = DBSession()
user_credentials = db_session.query(AuthStore).filter_by(id=user_id).one()
token = ''.join([choice(ascii_uppercase) for i in range(16)])
logger(new_password=token)
sha256object = hashlib.sha256(token)
tokenHash = sha256object.hexdigest()
logger(tokenHash=tokenHash)
user_credentials.tokenHash = tokenHash
user_credentials.tokenTimeStamp = func.now()
user_email = ""
if session_obj['isStudent']:
user = db_session.query(Student).filter_by(id=user_id).one()
user_email = user.email
elif session_obj['isFaculty']:
user = db_session.query(Faculty).filter_by(id=user_id).one()
user_email = user.email
db_session.commit()
db_session.close()
# Send Mail Task to Celery
if user_email and '@' in user_email:
sendmail.delay(user_email, user_id, token)
else:
flash('Default Recovery Email not set!')
return redirect(url_for('forgotPassword'))
return redirect(url_for('forgotPassword'))
@login_required
@app.route('/dashboard/recoverymail', methods=['GET', 'POST'])
def passwordRecoveryMail():
if request.method == 'POST':
db_session = DBSession()
password_recovery_mail = request.form['email'].encode('utf-8')
logger(password_recovery_mail=password_recovery_mail)
if session_obj['isStudent'] and '@' in password_recovery_mail:
user = db_session.query(Student).filter_by(id=session_obj['userid']).one()
user.email = password_recovery_mail
db_session.commit()
elif session_obj['isFaculty'] and '@' in password_recovery_mail:
user = db_session.query(Faculty).filter_by(id=session_obj['userid']).one()
user.email = password_recovery_mail
db_session.commit()
db_session.close()
return redirect(url_for('getDashboard'))
@celery.task
def sendmail(user_mail, user_id, new_password):
# Set as environment variables
gmail_user = '<EMAIL>'
gmail_password = '<PASSWORD>'
# Mail Send
sent_from = gmail_user
to = user_mail
subject = 'Forgot Password for Grade Predictor and Analyzer'
body = 'Your new password for ' + user_id + ' is ' + new_password + '\n - Admin'
email_text = """Subject: %s\n%s\n""" % (subject, body)
logger(message=email_text)
try:
server = smtplib.SMTP('smtp.gmail.com', 587)
server.ehlo()
server.starttls()
server.login(gmail_user, gmail_password)
server.sendmail(sent_from, to, email_text)
server.close()
logger(email="Email Sent Succesfully!")
except:
logger(email="Something went wrong!")
@login_required
@app.route('/courses/<string:course_id>/metrics')
def getCourseMetrics(course_id):
db_session = DBSession()
course = db_session.query(Course).filter_by(id=course_id).one()
maxscore_by_subject = db_session.query(MaxScore).filter_by(course_id=course.id).all()
score_names = [i.name for i in maxscore_by_subject]
max_scores = [i.maxscore for i in maxscore_by_subject]
list_scores_by_test_type = []
size_of_score_names = len(score_names)
for i in range(size_of_score_names):
score_query = db_session.query(Score).filter_by(course_id=course_id,
name=score_names[i]).all()
list_scores_by_test_type.append([(float(j.score) / float(max_scores[i])) * 100 for j in score_query])
db_session.close()
return render_template('metrics.html',
course_id=course_id,
max_score=max_scores,
scores=list_scores_by_test_type,
names=score_names)
@login_required
@app.route('/courses/<string:course_id>/<string:student_id>/<string:test_name>/edit', methods=['POST'])
def editMarks(student_id, course_id, test_name):
db_session = DBSession()
if session_obj['isAdmin']:
updated_score = request.form['update-score'].encode('utf-8')
max_score = db_session.query(MaxScore).filter_by(course_id=course_id,
name=test_name).one()
# Max score check
if float(updated_score) <= max_score:
score = db_session.query(Score).filter_by(course_id=course_id,
student_id=student_id,
name=test_name).one()
diff = float(updated_score) - score.score
# Total and Mid Term Total Update
try:
mid_term_total_priority = \
db_session.query(MaxScore.priority).filter_by(course_id=course_id,
name='Mid Term Total').one()[0]
final_total_priority = \
db_session.query(MaxScore.priority).filter_by(course_id=course_id,
name='Total').one()[0]
mid_term_total = db_session.query(Score).filter_by(course_id=course_id,
student_id=student_id,
name='Mid Term Total').one()
final_total = db_session.query(Score).filter_by(course_id=course_id,
student_id=student_id,
name='Total').one()
logger(total=final_total)
if max_score.priority < mid_term_total_priority:
mid_term_total.score += diff
final_total.score += diff
db_session.commit()
elif mid_term_total_priority < max_score.priority < final_total_priority:
final_total.score += diff
db_session.commit()
except:
pass
score.score = updated_score
db_session.commit()
scores, \
course, \
scores_distribution_percentages, \
course_total, \
student, \
scores_names, \
scores_percentages, \
scores_actual_json, \
course_averages, \
course_averages_for_plot, \
course_mid_term_average, course_final_average = process_student_data(db_session, course_id, student_id)
db_session.close()
return render_template('studentScore.html',
scores=scores,
course=course,
max_scores=scores_distribution_percentages,
course_total=course_total,
student=student,
x_=scores_names,
y_percentages=scores_percentages,
y_actual=scores_actual_json,
course_averages=course_averages,
course_averages_for_plot=course_averages_for_plot,
course_mid_term_average=course_mid_term_average,
course_final_average=course_final_average,
isAdmin=session_obj['isAdmin'])
|
StarcoderdataPython
|
6602002
|
import os
from office365.sharepoint.client_context import ClientContext
from settings import settings
cert_settings = {
'client_id': '51d03106-4726-442c-86db-70b32fa7547f',
'thumbprint': "6B36FBFC86FB1C019EB6496494B9195E6D179DDB",
'certificate_path': '{0}/selfsigncert.pem'.format(os.path.dirname(__file__))
}
ctx = ClientContext(settings['url']).with_client_certificate(settings.get('tenant'),
cert_settings['client_id'],
cert_settings['thumbprint'],
cert_settings['certificate_path'])
current_web = ctx.web.get().execute_query()
print("{0}".format(current_web.url))
|
StarcoderdataPython
|
6460002
|
# coding=utf-8
from flask import Flask, g, render_template, current_app, request, redirect, abort, url_for
from jinja2.utils import Markup
from werkzeug.routing import BaseConverter, ValidationError
from werkzeug.datastructures import MultiDict
from contextlib import contextmanager
from collections import namedtuple, OrderedDict
import sys
import re
import sqlite3
import urllib
import datetime
import bisect
sqlite3.register_converter('book', int)
app = Flask(__name__, static_folder='res', template_folder='tmpl')
@app.template_filter('classes')
def filter_classes(v):
if not v: return u''
return Markup(u' class="%s"') % u' '.join(v)
@app.template_filter('book')
def filter_book(v):
return mappings.books[v]
@app.template_filter('htmltext')
def filter_htmltext(s, meta=None, keywords=None):
if not s: return u''
slower = s.lower()
if meta is not None:
extra = bytes(meta).split('\xff')
markup = map(ord, extra[0])
assert len(markup) == len(s) or len(markup) == len(s) + 1
else:
extra = []
markup = [0] * len(s)
kwmark = [None] * len(s)
# flags:
# * 127 (bit mask)
# 0 -- normal
# 1 -- italicized (artificial text in KJV)
# 2 -- Capitalized
# 3 -- UPPERCASED
# * 128 (bit mask) -- will fetch the annotation from the extra *before* this
# * 255 -- separators for markup
# add pseudo keyword marks for query highlighting
# marks are added in reverse, so the earlier keyword overwrites others.
for k, keyword in list(enumerate(keywords or ()))[::-1]:
keyword = keyword.lower()
pos = -1
while True:
pos = slower.find(keyword, pos+1)
if pos < 0: break
for i in xrange(pos, pos+len(keyword)):
kwmark[i] = k
ss = []
cur = []
prevstyle = 0
prevmark = None
nextann = 1 # since extra[0] == markup (if any)
lastflags = None
if len(markup) > len(s): # there are len(s)+1 positions where annotated text can go
lastflags = markup.pop()
assert (lastflags & 127) == 0 # annotated text only
for ch, flags, mark in zip(s, markup, kwmark) + [(u'', lastflags, None)]:
flags = flags or 0
style = flags & 127
if flags & 128:
ss.append(Markup().join(cur))
ss.append(Markup('<small>%s</small>') % extra[nextann].decode('utf-8'))
cur = []
nextann += 1
if not ch or (style, mark) != (prevstyle, prevmark):
ss.append(Markup().join(cur))
cur = []
closing = []
opening = []
cascade = False
if cascade or mark != prevmark:
if prevmark is not None: closing.append(Markup('</mark>'))
if mark is not None: opening.append(Markup('<mark class="keyword%d">' % mark))
cascade = True
if cascade or style != prevstyle:
if prevstyle:
closing.append(Markup(('', '</i>', '</em>', '</strong>',
'</small>')[prevstyle]))
if style:
opening.append(Markup(('', '<i>', '<em>', '<strong>',
'<small>')[style]))
cascade = True
prevstyle = style
prevmark = mark
ss.extend(closing[::-1])
ss.extend(opening)
cur.append(ch)
return Markup().join(ss)
class Entry(sqlite3.Row):
def __init__(self, *args, **kwargs):
sqlite3.Row.__init__(self, *args, **kwargs)
self._primary_field = None
def __getattr__(self, name):
try:
return self[name]
except KeyError:
raise AttributeError(name)
def set_primary(self, name):
assert name in self.keys()
self._primary_field = name
return self
def __unicode__(self):
if self._primary_field:
return unicode(self[self._primary_field])
else:
return sqlite3.Row.__unicode__(self)
def __str__(self):
if self._primary_field:
return str(self[self._primary_field])
else:
return sqlite3.Row.__str__(self)
@contextmanager
def database():
db = sqlite3.connect('db/bible.db', detect_types=sqlite3.PARSE_COLNAMES)
db.row_factory = Entry
try:
yield db
finally:
db.close()
# a universal cache for immutable data
class Mappings(object):
def __init__(self):
self.reload()
def normalize(self, s):
if not isinstance(s, (str, unicode)): s = str(s)
return u''.join(s.split()).upper()
def reload(self):
with database() as db:
self.books = []
self.bookaliases = {}
self.versions = {}
self.versionaliases = {}
self.blessedversions = {}
# book: (minchapter, maxchapter)
self.chapterranges = {}
# (book,verse): (minverse, maxverse, deltaindex, deltaordinal)
self.verseranges = {}
# lexicographical_code: [(minordinal, maxordinal), ...]
dailyranges = {}
for row in db.execute('select * from books order by book;'):
assert len(self.books) == row['book']
row.set_primary('code')
self.books.append(row)
self.bookaliases[row['book']] = row
for row in db.execute('select * from bookaliases;'):
self.bookaliases[row['alias']] = self.books[row['book']], row['lang']
for row in db.execute('select * from versions;'):
row.set_primary('version')
self.versions[row['version']] = row
self.versionaliases[self.normalize(row['version'])] = row
if row['blessed']:
self.blessedversions[row['lang']] = row
for row in db.execute('select * from versionaliases;'):
self.versionaliases[row['alias']] = self.versions[row['version']]
for row in db.execute('''select book,
min(chapter) as minchapter,
max(chapter) as maxchapter
from verses group by book;'''):
self.chapterranges[row['book']] = \
(row['minchapter'], row['maxchapter'])
for row in db.execute('''select book, chapter,
min(verse) as minverse,
max(verse) as maxverse,
min("index") as minindex,
max("index") as maxindex,
min(ordinal) as minordinal,
max(ordinal) as maxordinal
from verses group by book, chapter;'''):
assert (row['maxverse'] - row['minverse'] ==
row['maxindex'] - row['minindex'] ==
row['maxordinal'] - row['minordinal'])
self.verseranges[row['book'], row['chapter']] = \
(row['minverse'], row['maxverse'],
row['minindex'] - row['minverse'],
row['minordinal'] - row['minverse'])
for row in db.execute('''select code,
v1.book as book1, v1.chapter as chapter1, v1.verse as verse1,
v2.book as book2, v2.chapter as chapter2, v2.verse as verse2
from topics
inner join verses v1 on v1.ordinal = ordinal1
inner join verses v2 on v2.ordinal = ordinal2
where kind = ?;''', ('daily',)):
bcv1 = (row['book1'], row['chapter1'], row['verse1'])
bcv2 = (row['book2'], row['chapter2'], row['verse2'])
dailyranges.setdefault(row['code'], []).append((bcv1, bcv2))
for ranges in dailyranges.values(): ranges.sort()
self.dailyranges = sorted(dailyranges.items())
# TODO
self.DEFAULT_VER = self.blessedversions['ko']['version']
self.DEFAULT_VER_PER_LANG = {
'ko': self.blessedversions['ko']['version'],
'en': self.blessedversions['en']['version'],
}
def find_book_by_alias(self, alias):
return self.bookaliases[self.normalize(alias)][0]
def find_book_and_lang_by_alias(self, alias):
return self.bookaliases[self.normalize(alias)]
def find_version_by_alias(self, alias):
return self.versionaliases[self.normalize(alias)]
def get_recent_daily(self, code):
# XXX a hack to locate the entry next to the today's entry
index = bisect.bisect_right(self.dailyranges, (code + unichr(sys.maxunicode),))
assert index <= 0 or self.dailyranges[index-1][0] <= code
assert index >= len(self.dailyranges) or self.dailyranges[index][0] > code
return Daily(index-1)
def to_ordinal(self, (b,c,v)):
try:
minord = self.minordinals[b,c]
maxord = self.maxordinals[b,c]
except KeyError:
raise ValueError('invalid book-chapter-verse pair')
ord = minord + (v - 1)
if not (minord <= ord <= maxord):
raise ValueError('invalid book-chapter-verse pair')
return ord
mappings = Mappings()
@app.context_processor
def inject_mappings():
return {
'debug': app.debug,
'mappings': mappings,
'build_query_suffix': build_query_suffix,
}
_triple = namedtuple('triple', 'book chapter verse index ordinal')
class triple(_triple):
def __new__(cls, book, chapter, verse):
try:
minchapter, maxchapter = mappings.chapterranges[book]
except KeyError:
raise ValueError('invalid book')
if chapter == '$': chapter = maxchapter
elif chapter <= 0: chapter = minchapter
try:
minverse, maxverse, deltaindex, deltaordinal = mappings.verseranges[book, chapter]
except KeyError:
raise ValueError('invalid chapter')
if verse == '$': verse = maxverse
elif verse <= 0: verse = minverse
if not (minverse <= verse <= maxverse):
raise ValueError('invalid verse')
index = deltaindex + verse
ordinal = deltaordinal + verse
return _triple.__new__(cls, book, chapter, verse, index, ordinal)
@property
def book_and_chapter(self):
return (self.book, self.chapter)
@property
def min_verse_in_chapter(self):
minverse, maxverse, deltaindex, deltaordinal = mappings.verseranges[self.book, self.chapter]
return minverse
@property
def max_verse_in_chapter(self):
minverse, maxverse, deltaindex, deltaordinal = mappings.verseranges[self.book, self.chapter]
return maxverse
class Daily(object):
def __init__(self, index):
code, ranges = mappings.dailyranges[index]
self.index = index
self.code = code
self.ranges = [(triple(*bcv1), triple(*bcv2)) for bcv1, bcv2 in ranges]
self.month, self.day = map(int, code.split('-', 1))
@property
def start(self):
return self.ranges[0][0]
@property
def end(self):
return self.ranges[-1][1]
@property
def num_verses(self):
return sum(e.ordinal - s.ordinal + 1 for s, e in self.ranges)
@property
def prev(self):
return Daily((self.index - 1) % len(mappings.dailyranges))
@property
def next(self):
return Daily((self.index + 1) % len(mappings.dailyranges))
class Normalizable(namedtuple('Normalizable', 'before after')):
def __str__(self): return str(self.after)
def __unicode__(self): return unicode(self.after)
def __getattr__(self, name): return getattr(self.after, name)
sqlite3.register_adapter(Entry, str)
sqlite3.register_adapter(Normalizable, str)
class BookConverter(BaseConverter):
def to_python(self, value):
try:
return Normalizable(value, mappings.find_book_by_alias(value))
except KeyError:
raise ValidationError()
def to_url(self, value):
return str(value)
class IntOrEndConverter(BaseConverter):
regex = r'(?:\d+|\$)'
num_convert = int
def to_python(self, value):
if value != '$':
try: value = int(value)
except ValueError: raise ValidationError
return value
def to_url(self, value):
if value != '$':
value = str(int(value))
return value
app.url_map.converters['book'] = BookConverter
app.url_map.converters['int_or_end'] = IntOrEndConverter
def build_query_suffix(**repl):
searching = repl.pop('_searching', False)
normalized_version = str(g.version1) + (','+str(g.version2) if g.version2 else '')
normalized_cursor = str(g.cursor) if g.cursor is not None else ''
newquery = MultiDict(request.args)
# unlike others, search may reset `v` (and try to redirect) according to the query,
# but if the explicit `v` is set it should NOT alter that.
if normalized_version and (searching or normalized_version != mappings.DEFAULT_VER):
newquery['v'] = normalized_version
else:
newquery.poplist('v')
if normalized_cursor:
newquery['c'] = normalized_cursor
else:
newquery.poplist('c')
kvs = [(k, v.encode('utf-8')) for k, v in newquery.items(multi=True) if k not in repl]
kvs += [(k, unicode(v).encode('utf-8')) for k, v in repl.items() if v is not None]
newquery = urllib.urlencode(kvs)
if newquery:
return '?' + newquery
else:
return ''
def normalize_url(self, **kwargs):
searching = kwargs.pop('_searching', False)
# common parameter `v`: version(s)
# v=<v1> or v=<v1>,<v2>
orig_version = request.args.get('v', '')
v1, _, v2 = orig_version.partition(',')
try:
g.version1 = mappings.find_version_by_alias(v1)
except Exception:
g.version1 = mappings.versions[mappings.DEFAULT_VER]
try:
g.version2 = mappings.find_version_by_alias(v2)
if g.version1 == g.version2: g.version2 = None
except Exception:
g.version2 = None
normalized_version = str(g.version1) + (','+str(g.version2) if g.version2 else '')
# same caveat to build_query_suffix applies for the searching.
if not searching and normalized_version == mappings.DEFAULT_VER: normalized_version = ''
# common parameter `c`: cursor
# c=<after> or c=<~before>
orig_cursor = request.args.get('c', '')
try:
g.cursor = int(orig_cursor) if len(orig_cursor) <= 6 else None
except Exception:
g.cursor = None
normalized_cursor = str(g.cursor) if g.cursor is not None else ''
need_redirect = (orig_version != normalized_version or \
orig_cursor != normalized_cursor)
normalized_kwargs = {}
for k, v in kwargs.items():
if isinstance(v, Normalizable):
after = unicode(v.after).encode('utf-8')
before = unicode(v.before).encode('utf-8')
normalized_kwargs[k] = after
need_redirect = need_redirect or after != before
else:
normalized_kwargs[k] = str(v).encode('utf-8')
if need_redirect:
abort(redirect(url_for(self, **normalized_kwargs) +
build_query_suffix(_searching=searching)))
# when we are not searching, we also parse and store (but do not normalize) `q`.
# this is a subset of search query syntax; the search procedure will produce
# a reconstructed list of normalized "keywords", and the syntax should match.
if not searching:
query = request.args.get('q', '').strip()
keywords = []
for m in re.findall(ur'(?ui)(?:"([^"]*)"|\'([^\']*)\'|((?:[^\W\d]|[\-\'])+))', query):
keyword = m[0] or m[1] or m[2]
if keyword: keywords.append(keyword)
g.keywords = keywords
def render_verses(tmpl, (prevc, verses, nextc), **kwargs):
query = kwargs.get('query', None)
highlight = kwargs.get('highlight', None)
if 'keywords' not in kwargs: kwargs['keywords'] = g.keywords
prev = None
prevhl = False
rows = []
tbodys = []
for verse in verses:
hl = highlight(verse['book'], verse['chapter'], verse['verse']) if highlight else False
if prevhl != hl:
sclasses = []
if prevhl: sclasses.append('highlight')
if rows: tbodys.append({'classes': sclasses, 'verses': rows})
rows = []
prevhl = hl
text = verse['text']
meta = verse['meta']
prefix = u''
vclasses = []
if (verse['book'], verse['chapter'], verse['verse']-1) == prev:
vclasses.append('cont')
rows.append({
'book': mappings.books[verse['book']],
'chapter': verse['chapter'],
'classes': vclasses,
'verse': verse['verse'],
'prefix': prefix,
'text': text,
'meta': meta,
'text2': verse['text2'] if 'text2' in verse.keys() else None,
'meta2': verse['meta2'] if 'meta2' in verse.keys() else None,
})
prev = (verse['book'], verse['chapter'], verse['verse'])
if rows:
sclasses = []
if prevhl: sclasses.append('highlight')
tbodys.append({'classes': sclasses, 'verses': rows})
return render_template(tmpl, version1=g.version1, version2=g.version2,
sections=tbodys, prevc=prevc, nextc=nextc, **kwargs)
def execute_verses_query(db, cursor=None, where='1', args=(), count=100):
inverted = False
if cursor is not None:
if cursor >= 0:
where += ' and v.ordinal >= ?'
args += (cursor,)
else:
where += ' and v.ordinal <= ?'
args += (~cursor,)
inverted = True
limit = ''
if count:
limit = ' limit ?'
args += (count,)
if g.version2:
verses = db.execute('''
select v.book as "book [book]", v.*, d.text as text, d.meta as meta,
d2.text as text2, d2.meta as meta2
from verses v left outer join data d on d.version=? and v.ordinal=d.ordinal
left outer join data d2 on d2.version=? and v.ordinal=d2.ordinal
where ''' + where + '''
order by ordinal ''' + ('desc' if inverted else 'asc') + limit + ''';
''', (g.version1, g.version2) + args)
else:
verses = db.execute('''
select v.book as "book [book]", v.*, d.text as text, d.meta as meta
from verses v left outer join data d on d.version=? and v.ordinal=d.ordinal
where ''' + where + '''
order by ordinal ''' + ('desc' if inverted else 'asc') + limit + ''';
''', (g.version1,) + args)
verses = verses.fetchall()
if inverted: verses.reverse()
return verses
def adjust_for_cursor(verses, cursor, count):
excess = count and len(verses) > count
if cursor is None:
nextc = verses.pop().ordinal if excess else None # pop first
prevc = None
elif cursor >= 0:
nextc = verses.pop().ordinal if excess else None # pop first
prevc = ~(verses[0].ordinal - 1) if verses and verses[0].ordinal > 0 else None
else:
prevc = ~verses.pop(0).ordinal if excess else None # pop first
nextc = verses[-1].ordinal + 1 if verses else None
return prevc, verses, nextc
def get_verses_unbounded(db, where='1', args=(), count=100, **kwargs):
verses = execute_verses_query(db, g.cursor, where=where, args=args,
count=count+1 if count else None, **kwargs)
return adjust_for_cursor(verses, g.cursor, count)
def get_verses_bounded(db, minordinal, maxordinal, where='1', args=(), count=100, **kwargs):
verses = execute_verses_query(db, g.cursor, where=where, args=args,
count=count+2 if count else None, **kwargs)
smaller = []
inrange = []
larger = []
for row in verses:
if row['ordinal'] < minordinal:
smaller.append(row)
elif row['ordinal'] > maxordinal:
larger.append(row)
else:
inrange.append(row)
# cursor code expects at most `count+1` verses.
# +2 adjustment was only to account for a non-empty `smaller`, so we no longer need that.
del inrange[count+1:]
# if we have a non-empty `larger`, `inrange` should be the final page.
if not larger:
prevc, inrange, nextc = adjust_for_cursor(inrange, g.cursor, count)
if prevc and ~prevc < minordinal: prevc = None
else:
prevc = ~(inrange[0].ordinal - 1) if inrange and inrange[0].ordinal > minordinal else None
nextc = None
return smaller[-1] if smaller else None, (prevc, inrange, nextc), larger[0] if larger else None
@app.route('/')
def index():
today = datetime.date.today()
daily = mappings.get_recent_daily('%02d-%02d' % (today.month, today.day))
return render_template('index.html', query=u'', books=mappings.books, daily=daily)
@app.route('/+/about')
def about():
return render_template('about.html', query=u'')
@app.route('/+/daily/')
@app.route('/+/daily/<code>')
def daily(code=None):
if code is None:
today = datetime.date.today()
actualcode = '%02d-%02d' % (today.month, today.day)
else:
if len(code) == 5 and code[:2].isdigit() and code[2] == '-' and code[3:].isdigit():
actualcode = code
else:
abort(404)
normalize_url('.daily', code=actualcode)
daily = mappings.get_recent_daily(actualcode)
if daily.code != code:
return redirect(url_for('.daily', code=daily.code))
with database() as db:
where = ' and '.join(['(v.ordinal between ? and ?)'] * len(daily.ranges))
args = tuple(bcv.ordinal for start_end in daily.ranges for bcv in start_end)
verses_and_cursors = get_verses_unbounded(db, where, args, count=None)
query = u'' # XXX
return render_verses('daily.html', verses_and_cursors, query=query, daily=daily)
@app.route('/+/daily/list')
def daily_list():
today = datetime.date.today()
daily = mappings.get_recent_daily('%02d-%02d' % (today.month, today.day))
dailylist = map(Daily, xrange(len(mappings.dailyranges)))
return render_template('daily_list.html', query=u'', daily=daily, dailylist=dailylist)
@app.route('/search')
def search():
query = request.args.get('q', u'').strip()
if not query: return redirect('/')
normalize_url('.search', _searching=True)
# search syntax:
# - lexemes are primarily separated (and ordered) by non-letter characters.
# - quotes can be used to escape whitespaces. no intra-quotes are accepted.
# - any lexeme (including quoted one) can be prefixed by `tag:` to clarify meaning.
# - there can only be one occurrence of chapter-verse range spec.
# the spec has its own syntax and is not governed by the ordinary separator.
# the range spec does not include the ordinary number, so that "1 John" etc. can be parsed.
# - a series of untagged unquoted lexemes is concatenated *again* and checked for known tokens.
# - any unrecognized token becomes a search keyword.
#
# example:
# "John 3:16" -> book:John, range:3:16
# "John 3 - 4 (KJV/개역)" -> book:John, range:3-4, version:KJV, version:개역
# "John b:3 - 4 (KJV/개역)" -> book:John, book:3, keyword:4, version:KJV, version:개역
# "2 1 John" -> range:2, book:1John
# "<NAME> 2 John" -> book:1John, book:2John (probably an error)
# "요한 계시록 어린양" -> book:Rev, keyword:어린양
# "요한 keyword:어린양 계시록" -> keyword:요한, keyword:어린양, keyword:계시록
# "'alpha and omega' niv" -> keyword:"alpha and omega", version:NIV
TAGS = {
u'v': u'version', u'ver': u'version', u'version': u'version',
u'q': u'keyword', u'keyword': u'keyword',
u'b': u'book', u'book': u'book',
# the pseudo-tag "range" is used for chapter and verse ranges
}
# parse the query into a series of tagged and untagged lexeme
lexemes = []
for m in re.findall(ur'(?ui)'
# chapter-verse range spec (1:2, 1:2-3:4, 1:2 ~ 4 etc.)
ur'(\d+\s*:\s*\d+)(?:\s*[-~]\s*(\d+(?:\s*:\s*\d+)?))?|'
# chapter-only range spec (1-2, 1 ~ 2 etc.)
# the single number is parsed later
ur'(\d+)\s*[-~]\s*(\d+)|'
# lexeme with optional tag (foo, v:asdf, "a b c", book:'x y z' etc.)
# a row of letters and digits does not mix (e.g. 창15 -> 창, 15)
ur'(?:(' + u'|'.join(map(re.escape, TAGS)) + ur'):)?'
ur'(?:"([^"]*)"|\'([^\']*)\'|((?:[^\W\d]|[\-\'])+|\d+))', query):
if m[0]:
chap1, _, verse1 = m[0].partition(u':')
chap1 = int(chap1)
verse1 = int(verse1)
if m[1]:
chap2, _, verse2 = m[1].rpartition(u':')
chap2 = int(chap2 or chap1)
verse2 = int(verse2)
else:
chap2 = verse2 = None
lexemes.append(('range', (chap1, chap2, verse1, verse2)))
elif m[2]:
chap1 = int(m[2])
if m[3] is not None:
chap2 = int(m[3])
else:
chap2 = None
lexemes.append(('range', (chap1, chap2, None, None)))
elif m[4]:
lexemes.append((TAGS[m[4]], m[5] or m[6] or m[7]))
elif m[5] or m[6]:
# quoted untagged lexemes are always keywords
lexemes.append(('keyword', m[5] or m[6]))
else:
# unquoted untagged lexemes are resolved later
if not (lexemes and lexemes[-1][0] is None):
lexemes.append((None, []))
lexemes[-1][1].append(m[7])
# resolve remaining unquoted untagged lexemes
tokens = []
for lexeme in lexemes:
if lexeme[0] is None:
unquoted = lexeme[1]
start = 0
while start < len(unquoted):
s = u''
# avoid quadratic complexity, no token is more than 5 words long
for i in xrange(start, min(start+5, len(unquoted))):
s += unquoted[i]
try:
book, lang = mappings.find_book_and_lang_by_alias(s)
tokens.append(('book', s))
start = i + 1
break
except KeyError:
pass
try:
version = mappings.find_version_by_alias(s)
if version.blessed: # TODO temporary
tokens.append(('version',s))
start = i + 1
break
except KeyError:
pass
else:
if unquoted[start].isdigit():
tokens.append(('range', (int(unquoted[start]), None, None, None)))
else:
tokens.append(('keyword', unquoted[start]))
start += 1
else:
tokens.append(lexeme)
# implied language is used to resolve versions when no other infos are available
implied_lang = set()
tagged = {}
for tag, value in tokens:
tagged.setdefault(tag, []).append(value)
if tag == 'version':
try:
version = mappings.find_version_by_alias(s)
implied_lang.add(version.lang)
except KeyError:
pass
elif tag == 'book':
try:
book, lang = mappings.find_book_and_lang_by_alias(s)
if lang: implied_lang.add(lang)
except KeyError:
pass
elif tag == 'keyword':
if all(u'가' <= c <= u'힣' for c in value):
implied_lang.add('ko')
elif all(not c.isalpha() or u'a' <= c <= 'z' or u'A' <= c <= u'Z' for c in value):
implied_lang.add('en')
if len(implied_lang) == 1:
implied_lang, = list(implied_lang)
else:
implied_lang = None # unknown or ambiguous
old_version = g.version1, g.version2
if 'version' in tagged:
versions = []
seen = set()
for s in tagged['version']:
try:
version = mappings.find_version_by_alias(s)
if version.blessed: # TODO temporary
if version.version not in seen:
seen.add(version.version)
versions.append(version)
except KeyError:
pass
g.version1 = versions[0] if len(versions) > 0 else None
g.version2 = versions[1] if len(versions) > 1 else None
# TODO version3 and later
else:
# if there is no other version hint but an implied lang, use it
if not request.args.get('v') and implied_lang:
g.version1 = mappings.versions[mappings.DEFAULT_VER_PER_LANG[implied_lang]]
version_updated = (g.version1, g.version2) != old_version
if 'book' in tagged:
books = []
for s in tagged['book']:
try:
book = mappings.find_book_by_alias(s)
books.append(book)
except KeyError:
pass
if books:
book = books[0]
# TODO 2 or more books
if 'range' in tagged:
# TODO check for len(tagged['range']) > 1
chap1, chap2, verse1, verse2 = tagged['range'][0]
if verse1:
if chap2:
assert verse2
url = url_for('.view_verses', book=book, chapter1=chap1, verse1=verse1,
chapter2=chap2, verse2=verse2)
else:
url = url_for('.view_verse', book=book, chapter=chap1, verse=verse1)
else:
if chap2:
url = url_for('.view_chapters', book=book, chapter1=chap1, chapter2=chap2)
else:
url = url_for('.view_chapter', book=book, chapter=chap1)
else:
url = url_for('.view_book', book=book)
return redirect(url + build_query_suffix(q=None))
keywords = tagged.get('keyword', [])
if not keywords: return redirect('/')
uniqwords = OrderedDict((w.lower(), w) for w in keywords) # zap duplicates
# for reconstructed queries, we need to insert quotes as needed.
# we also have to preserve cases while removing duplicates...
keywords = uniqwords.keys()
query = u' '.join(u'"%s"' % w if w.startswith("'") or
any(not c.isalpha() and not c.isdigit() and
c != '-' and c != "'" for c in w) else
u"'%s'" % w if w.startswith("'") else
w for w in uniqwords.values())
# version parameter should be re-normalized
if version_updated:
return redirect(url_for('.search') + build_query_suffix(q=query, _searching=True))
with database() as db:
verses_and_cursors = get_verses_unbounded(db,
' and '.join(['d."text" like ?'] * len(keywords)),
tuple('%%%s%%' % keyword for keyword in keywords))
return render_verses('search.html', verses_and_cursors, query=query, keywords=keywords)
@app.route('/<book:book>/')
def view_book(book):
normalize_url('.view_book', book=book)
return redirect(url_for('.view_chapter', book=book, chapter=1) + build_query_suffix())
@app.route('/<book:book>/<int_or_end:chapter>')
def view_chapter(book, chapter):
normalize_url('.view_chapter', book=book, chapter=chapter)
try:
start = triple(book.book, chapter, 0)
end = triple(book.book, chapter, '$')
except Exception:
abort(404)
with database() as db:
prev, verses_and_cursors, next = get_verses_bounded(db, start.ordinal, end.ordinal,
'v.ordinal between ? and ?', (start.ordinal-1, end.ordinal+1))
query = u'%s %d' % (book.abbr_ko, start.chapter)
return render_verses('chapters.html', verses_and_cursors, query=query, prev=prev, next=next,
book=book, chapter1=start.chapter, chapter2=end.chapter)
@app.route('/<book:book>/<int_or_end:chapter1>-<int_or_end:chapter2>')
def view_chapters(book, chapter1, chapter2):
normalize_url('.view_chapters', book=book, chapter1=chapter1, chapter2=chapter2)
try:
start = triple(book.book, chapter1, 0)
end = triple(book.book, chapter2, '$')
except Exception:
abort(404)
if start.ordinal > end.ordinal:
return redirect(url_for('.view_chapters', book=book, chapter1=chapter2, chapter2=chapter1))
with database() as db:
prev, verses_and_cursors, next = get_verses_bounded(db, start.ordinal, end.ordinal,
'v.ordinal between ? and ?', (start.ordinal-1, end.ordinal+1))
query = u'%s %d-%d' % (book.abbr_ko, start.chapter, end.chapter)
return render_verses('chapters.html', verses_and_cursors, query=query, prev=prev, next=next,
book=book, chapter1=start.chapter, chapter2=end.chapter)
def do_view_verses(book, start, end, query):
bcv1 = (start.book, start.chapter, start.verse)
bcv2 = (end.book, end.chapter, end.verse)
highlight = lambda b,c,v: bcv1 <= (b,c,v) <= bcv2
with database() as db:
verses_and_cursors = get_verses_unbounded(db, 'v.book=? and v."index" between ? and ?',
(book.book, start.index-5, end.index+5))
return render_verses('verses.html', verses_and_cursors, query=query, highlight=highlight,
book=book, chapter1=start.chapter, verse1=start.verse,
chapter2=end.chapter, verse2=end.verse)
@app.route('/<book:book>/<int_or_end:chapter>.<int_or_end:verse>')
def view_verse(book, chapter, verse):
normalize_url('.view_verse', book=book, chapter=chapter, verse=verse)
try:
start = end = triple(book.book, chapter, verse)
except Exception:
abort(404)
query = u'%s %d:%d' % (book.abbr_ko, start.chapter, start.verse)
return do_view_verses(book, start, end, query)
@app.route('/<book:book>/<int_or_end:chapter1>.<int_or_end:verse1>-<int_or_end:chapter2>.<int_or_end:verse2>')
def view_verses(book, chapter1, verse1, chapter2, verse2):
normalize_url('.view_verses', book=book, chapter1=chapter1, verse1=verse1,
chapter2=chapter2, verse2=verse2)
try:
start = triple(book.book, chapter1, verse1)
end = triple(book.book, chapter2, verse2)
except Exception:
abort(404)
if start.ordinal > end.ordinal:
return redirect(url_for('.view_verses', book=book, chapter1=chapter2, verse1=verse2,
chapter2=chapter1, verse2=verse1))
if chapter1 == chapter2:
query = u'%s %d:%d-%d' % (book.abbr_ko, start.chapter, start.verse, end.verse)
else:
query = u'%s %d:%d-%d:%d' % (book.abbr_ko, start.chapter, start.verse,
end.chapter, end.verse)
return do_view_verses(book, start, end, query)
@app.before_request
def compile_less():
if current_app.debug:
import sys, os, os.path, subprocess
path = app.static_folder
entrypoint = os.path.join(path, 'style.less')
combined = os.path.join(path, 'style.css')
combinedtime = os.stat(combined).st_mtime
if any(os.stat(os.path.join(path, f)).st_mtime > combinedtime
for f in os.listdir(path) if f.endswith('.less')):
print >>sys.stderr, ' * Recompiling %s' % entrypoint
subprocess.call(['lessc', '-x', entrypoint, combined])
if __name__ == '__main__':
import os
from flask.cli import main
os.environ['FLASK_APP'] = __file__
os.environ['FLASK_DEBUG'] = '1'
main(as_module=True)
|
StarcoderdataPython
|
12836395
|
<filename>dmlab2d/random_agent.py
# Copyright 2019 The DMLab2D Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Random agent for running against DM Lab2D environments."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import json
import numpy as np
import pygame
import dmlab2d
from dmlab2d import runfiles_helper
def _make_int32_distribution(random, minimum, maximum):
def function():
return random.randint(minimum, maximum + 1)
return function
def _make_float64_distribution(random, minimum, maximum):
def function():
return random.uniform(minimum, maximum)
return function
class PyGameRandomAgent(object):
"""Random agent works with int32 or float64 bounded actions."""
def __init__(self, action_spec, observation_name, observation_spec, seed,
scale):
"""Create a PyGame agent.
Args:
action_spec: Environment action spec used to generate random actions.
observation_name: Name of observation to render each frame.
observation_spec: Environment observation spec for creating PyGame window.
seed: Agent seed used for generating random actions.
scale: Scales screen.
"""
self._observation_name = observation_name
random = np.random.RandomState(seed)
self._actions = []
self._scores = []
self._scale = scale
for name, spec in action_spec.items():
if spec.dtype == np.dtype('int32'):
self._actions.append(
(name, _make_int32_distribution(random, spec.minimum,
spec.maximum)))
elif spec.dtype == np.dtype('float64'):
self._actions.append(
(name, _make_float64_distribution(random, spec.minimum,
spec.maximum)))
else:
print("Warning '{}' is not supported".format(spec))
obs_spec = observation_spec[observation_name]
self._setup_py_game(obs_spec.shape)
def _setup_py_game(self, shape):
pygame.init()
pygame.display.set_caption('DM Lab2d')
self._game_display = pygame.display.set_mode(
(int(shape[1] * self._scale), int(shape[0] * self._scale)))
def _render_observation(self, observation):
obs = np.transpose(observation, (1, 0, 2))
surface = pygame.surfarray.make_surface(obs)
rect = surface.get_rect()
surf = pygame.transform.scale(
surface, (int(rect[2] * self._scale), int(rect[3] * self._scale)))
self._game_display.blit(surf, dest=(0, 0))
pygame.display.update()
def step(self, timestep):
"""Renders timestep and returns random actions according to spec."""
self._render_observation(timestep.observation[self._observation_name])
display_score_dirty = False
if timestep.reward is not None:
if timestep.reward != 0:
self._scores[-1] += timestep.reward
display_score_dirty = True
else:
self._scores.append(0)
display_score_dirty = True
if display_score_dirty:
pygame.display.set_caption('%d score' % self._scores[-1])
return {name: gen() for name, gen in self._actions}
def print_stats(self):
print('Scores: ' + ', '.join(str(score) for score in self._scores))
def _create_environment(args):
"""Creates an environment.
Args:
args: See `main()` for description of args.
Returns:
dmlab2d.Environment with one observation.
"""
args.settings['levelName'] = args.level_name
lab2d = dmlab2d.Lab2d(runfiles_helper.find(), args.settings)
return dmlab2d.Environment(lab2d, [args.observation], args.env_seed)
def _run(args):
"""Runs a random agent against an environment rendering the results.
Args:
args: See `main()` for description of args.
"""
env = _create_environment(args)
agent = PyGameRandomAgent(env.action_spec(), args.observation,
env.observation_spec(), args.agent_seed, args.scale)
for _ in range(args.num_episodes):
timestep = env.reset()
# Run single episode.
while True:
# Query PyGame for early termination.
if any(event.type == pygame.QUIT for event in pygame.event.get()):
print('Exit early last score may be truncated:')
agent.print_stats()
return
action = agent.step(timestep)
timestep = env.step(action)
if timestep.last():
# Observe last frame of episode.
agent.step(timestep)
break
# All episodes completed, report per episode.
agent.print_stats()
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'--level_name', type=str, default='clean_up', help='Level name to load')
parser.add_argument(
'--observation',
type=str,
default='WORLD.RGB',
help='Observation to render')
parser.add_argument(
'--settings', type=json.loads, default={}, help='Settings as JSON string')
parser.add_argument(
'--env_seed', type=int, default=0, help='Environment seed')
parser.add_argument('--agent_seed', type=int, default=0, help='Agent seed')
parser.add_argument(
'--num_episodes', type=int, default=1, help='Number of episodes')
parser.add_argument(
'--scale', type=float, default=1, help='Scale to render screen')
args = parser.parse_args()
_run(args)
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
6543189
|
#!/usr/bin/env python
# encoding: utf-8
# <NAME>, 2016
'''Uses Fypp as Fortran preprocessor (.F90 -> .f90).'''
import re
import os.path
from waflib import Configure, Logs, Task, TaskGen, Tools
try:
import fypp
except ImportError:
fypp = None
Tools.ccroot.USELIB_VARS['fypp'] = set([ 'DEFINES', 'INCLUDES' ])
FYPP_INCPATH_ST = '-I%s'
FYPP_DEFINES_ST = '-D%s'
FYPP_LINENUM_FLAG = '-n'
################################################################################
# Configure
################################################################################
def configure(conf):
fypp_check(conf)
fypp_add_user_flags(conf)
@Configure.conf
def fypp_add_user_flags(conf):
'''Import user settings for Fypp.'''
conf.add_os_flags('FYPP_FLAGS', dup=False)
@Configure.conf
def fypp_check(conf):
'''Check for Fypp.'''
conf.start_msg('Checking for fypp module')
if fypp is None:
conf.fatal('Python module \'fypp\' could not be imported.')
version = fypp.VERSION
version_regexp = re.compile(r'^(?P<major>\d+)\.(?P<minor>\d+)'\
'(?:\.(?P<patch>\d+))?$')
match = version_regexp.search(version)
if not match:
conf.fatal('cannot parse fypp version string')
version = (match.group('major'), match.group('minor'))
conf.env['FYPP_VERSION'] = version
conf.end_msg('found (version %s.%s)' % version)
################################################################################
# Build
################################################################################
class fypp_preprocessor(Task.Task):
ext_in = [ '.F90' ]
ext_out = [ '.f90' ]
color = 'CYAN'
def keyword(self):
return 'Processing'
def run(self):
opts = fypp.FyppOptions()
argparser = fypp.get_option_parser()
args = [FYPP_LINENUM_FLAG]
args += self.env.FYPP_FLAGS
args += [FYPP_DEFINES_ST % ss for ss in self.env['DEFINES']]
args += [FYPP_INCPATH_ST % ss for ss in self.env['INCLUDES']]
opts = argparser.parse_args(args, namespace=opts)
infile = self.inputs[0].abspath()
outfile = self.outputs[0].abspath()
if Logs.verbose:
Logs.debug('runner: fypp.Fypp %r %r %r'
% (args, infile, outfile))
tool = fypp.Fypp(opts)
tool.process_file(infile, outfile)
return 0
def scan(self):
parser = FyppIncludeParser(self.generator.includes_nodes)
nodes, names = parser.parse(self.inputs[0])
if Logs.verbose:
Logs.debug('deps: deps for %r: %r; unresolved: %r'
% (self.inputs, nodes, names))
return (nodes, names)
TaskGen.feature('fypp')(Tools.ccroot.propagate_uselib_vars)
TaskGen.feature('fypp')(Tools.ccroot.apply_incpaths)
@TaskGen.extension('.F90')
def fypp_preprocess_F90(self, node):
'Preprocess the .F90 files with Fypp.'
f90node = node.change_ext('.f90')
self.create_task('fypp_preprocessor', node, [ f90node ])
if 'fc' in self.features:
self.source.append(f90node)
################################################################################
# Helper routines
################################################################################
class FyppIncludeParser(object):
'''Parser for include directives in files preprocessed by Fypp.
It can not handle conditional includes.
'''
# Include file pattern, opening and closing quoute must be replaced inside.
INCLUDE_PATTERN = re.compile(r'^\s*#:include\s*(["\'])(?P<incfile>.+?)\1',
re.MULTILINE)
def __init__(self, incpaths):
'''Initializes the parser.
:param quotes: Tuple containing the opening and closing quote sign.
:type quotes: tuple
'''
# Nodes still to be processed
self._waiting = []
# Files we have already processed
self._processed = set()
# List of dependent nodes
self._dependencies = []
# List of unresolved dependencies
self._unresolved = set()
# Paths to consider when checking for includes
self._incpaths = incpaths
def parse(self, node):
'''Parser the includes in a given node.
:return: Tuple with two elements: list of dependent nodes and list of
unresolved depencies.
'''
self._waiting = [ node, ]
# self._waiting is eventually extended during _process() -> iterate
while self._waiting:
curnode = self._waiting.pop(0)
self._process(curnode)
return (self._dependencies, list(self._unresolved))
def _process(self, node):
incfiles = self._get_include_files(node)
for incfile in incfiles:
if incfile in self._processed:
continue
self._processed.add(incfile)
incnode = self._find_include_node(node, incfile)
if incnode:
self._dependencies.append(incnode)
self._waiting.append(incnode)
else:
self._unresolved.add(incfile)
def _get_include_files(self, node):
txt = node.read()
matches = self.INCLUDE_PATTERN.finditer(txt)
incs = [ match.group('incfile') for match in matches ]
return incs
def _find_include_node(self, node, filename):
for incpath in self._incpaths:
incnode = incpath.find_resource(filename)
if incnode:
break
else:
incnode = node.parent.find_resource(filename)
return incnode
|
StarcoderdataPython
|
8196845
|
import os
from dotenv import load_dotenv
import psycopg2
load_dotenv()
DB_NAME = os.getenv("DB_NAME", default="OH_NO!")
DB_USER = os.getenv("DB_USER", default="OH_NO!")
DB_PW = os.getenv("DB_PW", default="<PASSWORD>!")
DB_HOST = os.getenv("DB_HOST", default="OH_NO!")
CSV_FILEPATH = "titanic.csv"
conn = psycopg2.connect(dbname=DB_NAME, user=DB_USER,
password=DB_PW, host=DB_HOST)
cursor = conn.cursor()
def query_execute(cursor, query):
cursor.execute(query)
return cursor.fetchall()
SURVIVOR_NUM = """
SELECT
COUNT(survived)
FROM titanic_queries
WHERE survived = True;
"""
survivor_count = query_execute(cursor, SURVIVOR_NUM)
print(f"There were {survivor_count} survivors from the Titanic")
PCLASS_NUM = """
SELECT
COUNT(pclass)
FROM titanic_queries
GROUP BY pclass;
"""
pclass_count = query_execute(cursor, PCLASS_NUM)
print("1, 2, 3:", pclass_count)
PCLASS_SORTED_SURVIVORS = """
SELECT
COUNT(pclass),
COUNT(survived)
FROM titanic_queries
WHERE survived = true
GROUP BY pclass;
"""
PCLASS_SORTED_DEATHS = """
SELECT
COUNT(pclass),
COUNT(survived)
FROM titanic_queries
WHERE survived = false
GROUP BY pclass;
"""
sorted_survivors = query_execute(cursor, PCLASS_SORTED_SURVIVORS)
sorted_deaths = query_execute(cursor, PCLASS_SORTED_DEATHS)
print("Survivors/Deaths by Class: 1, 2, 3:", sorted_survivors, sorted_deaths)
AVERAGE_AGE = """
SELECT
AVG(age),
survived
FROM titanic_queries
GROUP BY survived;
"""
avg_age = query_execute(cursor, AVERAGE_AGE)
print(avg_age)
conn.commit()
cursor.close()
conn.close()
|
StarcoderdataPython
|
4984283
|
#!/usr/bin/env python
#-*- coding: utf-8 -*
#
# Copyright 2012 msx.com
# by <EMAIL>
# 2012-4-14
#
# Sputnik DBObject Cache
#
# ToDoList:
#
import redis
class VectorCache:
def __init__(self):
pass
class ViewCache:
def __init__(self):
pass
class ViewMetadataTable:
def __init__(self):
pass
class DataModifyTable:
def __init__(self):
pass
class SpuDBObjectCache:
cache_conf = None
redis = None
@classmethod
def init_dbobject_cache(cls, conf):
"""
{
'enable' : True,
'db': 8,
'host': 'localhost',
'port': 6379
}
"""
cls.cache_conf = conf
pool = redis.ConnectionPool(
host = cls.cache_conf['host'],
port = cls.cache_conf['port'],
db = cls.cache_conf['db'])
cls.redis = redis.Redis(connection_pool=pool)
def __init__(self):
pass
def _get_view(self):
pass
def _get_view_list(self):
pass
def _modify(self, type, dbobject):
pass
def set_cache(self, dbobject):
"""
set dbobject or dbobject list to cache
"""
pass
def get_cache(self, dbobject):
"""
get dbobject from cache
"""
pass
def insert_modify(self, dbobject):
pass
def update_modify(self, dbobject):
pass
def delete_modify(self, dbobject):
pass
|
StarcoderdataPython
|
6598190
|
from __future__ import annotations
import itertools
import threading
import traceback
from datetime import datetime, timedelta
from time import sleep
import keyboard as kb
# functions to be run, you can change these!
from waiting import wait
import foe_desktops
from foe_bot_army import processArmy
from foe_bot_gold import goldCollector
from foe_bot_goods import processGoods
from foe_bot_idle import processIdleBuildings
from foe_bot_loot import lootCollector2
from foe_bot_social import initSocialProcesses, processSocial
from foe_bot_supplies import processSupplies
from foe_bot_zoom import zoomOut
from foe_control import ydiff1, pressEsc, pressButton
from foe_pics import *
from foe_pool import initPool
from foe_utils import randDur, checkIfPaused, randSleepSec, lock
collectGold = True # collect gold from buildings.
collectArmy = True # collect gold from buildings.
collectSupplies = True # collect supplies from buildings.
restartIdleBuildings = True # restart any idle building.
collectGoods = True # collect goods from buildings other than supplies and gold.
collectSocial = True # automatically aid other people and accept friend requests.
doZoomOut = True # automatically zoom out
collectGuild = True # collect guild if full
doUnstuck = True # reboot if session expired
doSwitchBots = True # switch virtual screens to another accounts
rebootSomeTime = True # reboot game some times
doCollectLoot2 = True # collect in-game loot
minimumTimeOnDesktop = 120 # minimum amount of time to spend on one desktop, sec
pyautogui.FAILSAFE = False
pyautogui.PAUSE = 0
class GameState:
games = []
def __init__(self):
self.lastRebooted = datetime.now()
@classmethod
def needToReboot(cls) -> bool:
game_state = cls.getCurrentGameState()
if game_state is None:
return False
return game_state.lastRebooted + timedelta(hours=1) < datetime.now()
@classmethod
def getCurrentGameState(cls) -> GameState:
global currentDesktop
if currentDesktop == 0 or currentDesktop > len(cls.games) - 1:
return None
return cls.games[currentDesktop]
@classmethod
def rebooted(cls):
game_state = cls.getCurrentGameState()
if game_state is None:
return
game_state.lastRebooted = datetime.now()
def initGamesState():
for i in range(1, foe_desktops.numberOfDesktops):
GameState.games.append(GameState())
initGamesState()
socialProcesses = []
def processGuild():
guild = findGuild()
if guild is None:
randSleepSec(60, 120)
return
else:
pressEsc()
with lock:
checkIfPaused()
x = guild.left
y = guild.top + guild.height + 1
leftRegion = (x + 3, y, 8, 7)
rightRegion = (x + guild.width / 2 + 2, y, 8, 7)
leftScreen = pyautogui.screenshot(region=leftRegion)
rightScreen = pyautogui.screenshot(region=rightRegion)
found = pyautogui.locate(leftScreen, rightScreen, confidence=0.8)
if found:
logging.info("Found full guild")
pyautogui.moveTo(guild.left, guild.top + guild.height + ydiff1,
duration=randDur())
pyautogui.click()
guildGet = findPic('guildGet')
tries = 10
while guildGet is None and tries > 0:
tries = tries - 1
randSleepSec(1, 3)
guildGet = findPic('guildGet')
pressButton(guildGet, False)
else:
logging.debug("Guild is not full")
pressEsc()
randSleepSec(60, 180)
def reboot():
with lock:
checkIfPaused()
activateWindow()
pyautogui.press('f5')
GameState.rebooted()
sleep(1)
def activateWindow():
pyautogui.moveTo(pyautogui.size().width / 2, 15)
pyautogui.click()
def unstuck():
if findPic('sessionExpired') is not None:
pressButton(findPic('rebootNow'), True)
randSleepSec(5, 10)
playBtn = findPic('play')
if playBtn is not None:
pressButton(playBtn, True)
worldBtn = findPic('world')
if worldBtn is not None:
pressButton(worldBtn, True)
randSleepSec(5, 10)
eventsPanel = findPic('events')
if eventsPanel is not None:
pressEsc()
randSleepSec(5, 10)
if findPic('visitUnavailable') is not None:
pressButton(findPic('ok'), True)
returnToCity = findPic('returnToCity')
if returnToCity is not None:
pressButton(returnToCity, False)
if findPic('cannotHelp') is not None:
reboot()
randSleepSec(5, 15)
currentDesktop = 1
def switchSlave():
windows = foe_desktops.getGameWindows()
foe_desktops.hideAll()
for window in itertools.cycle(windows):
with lock:
checkIfPaused()
foe_desktops.show(window)
initSocialProcesses()
sleep(minimumTimeOnDesktop)
with lock:
foe_desktops.hide(window)
def startBot(botFunction, toggle):
if not toggle:
return
botName = botFunction.__name__
logging.info("Starting bot " + botName)
thread = threading.Thread(name=botName, target=safeInfiniteLoop,
args=(botFunction,))
thread.setDaemon(True)
thread.start()
return thread
def rebooter():
wait(GameState.needToReboot)
reboot()
def safeInfiniteLoopFactory(func):
return lambda: safeInfiniteLoop(func)
def safeInfiniteLoop(func):
while True:
try:
logging.info("Bot iteration")
checkIfPaused()
func()
except Exception as e:
logging.error(traceback.format_exc())
logging.error(e)
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(threadName)s:%(levelname)s: %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
if doSwitchBots:
foe_desktops.moveToFirstDesktop()
startBot(goldCollector, collectGold)
startBot(processSupplies, collectSupplies)
startBot(processIdleBuildings, restartIdleBuildings)
startBot(processGoods, collectGoods)
startBot(processSocial, collectSocial)
startBot(processArmy, collectArmy)
startBot(zoomOut, doZoomOut)
startBot(processGuild, collectGuild)
startBot(unstuck, doUnstuck)
startBot(switchSlave, doSwitchBots)
startBot(rebooter, rebootSomeTime)
initPool()
startBot(lootCollector2, doCollectLoot2)
while not kb.is_pressed('end'):
sleep(1)
logging.info("Bye!")
|
StarcoderdataPython
|
11274463
|
<filename>app/migration.py
from datetime import datetime
import requests
import pandas as pd
import pandas_gbq
from config import config_eu, config_us
from google.cloud import bigquery
from typing import List
def operation_refine_city_data_appendbq(project_id:str, destination_tableid:str, newly_arrived: pd.DataFrame, *args, **kwargs):
district = pandas_gbq.read_gbq(
f"""
SELECT district
FROM `{project_id}.crime_statistics_polisenapi.dim_district`
""", project_id=project_id)
details_list = newly_arrived['details'] + ' ' + newly_arrived['summary'] + ' ' + newly_arrived['name']
newly_arrived['location_details'] = [extract_location_details(detail, district=district) for detail in details_list]
newly_arrived['location_details'] = newly_arrived['location_details'] + ' ' + newly_arrived['location_name'] + ' ' + 'Sweden'
pandas_gbq.to_gbq(newly_arrived, f'crime_statistics_polisenapi.{destination_tableid}', project_id=project_id, if_exists='append')
print(f'{newly_arrived.shape[0]} rows added to table: crime_statistics_polisenapi.{destination_tableid}')
def move_tables(project_id: str, config_source: dict=config_us, config_destination: dict=config_eu,
table_ids:List[str]=['raw', 'cities_refined_en', 'cities_refined','dim_district' ]):
source_dataset_id = config_source['dataset_id']
destination_dataset_id = config_destination['dataset_id']
for table_id in table_ids:
df = pandas_gbq.read_gbq(
f"""
SELECT *
FROM `{project_id}.{source_dataset_id}.{table_id}`
""")
pandas_gbq.to_gbq(df, f'{destination_dataset_id}.{table_id}', project_id=project_id, location=config_destination['location'],
if_exists='replace')
def main():
bq_client = bigquery.Client()
project_id = bq_client.project
move_tables(project_id=project_id)
if __name__=='__main__':
main()
|
StarcoderdataPython
|
1847642
|
return "the end"
x = 2 + 2
|
StarcoderdataPython
|
3538161
|
<reponame>starkyller/dissertacao<filename>backend/hmobiweb/apps/monitoring_solutions/api/views.py
from rest_framework import generics
from rest_framework.authentication import TokenAuthentication
from rest_framework.permissions import IsAuthenticated
from ..models import (
MonitoringCategory,
SolutionObjective,
Solution,
)
from .serializers import (
MonitoringCategorySerializer,
SolutionObjectiveSerializer,
SolutionSerializer,
SolutionDetailSerializer,
)
class MonitoringCategoryListAPIView(generics.ListAPIView):
"""
get:
Return a list of all the existing monitoring categories.
"""
queryset = MonitoringCategory.objects.all()
serializer_class = MonitoringCategorySerializer
class MonitoringCategoryDetailAPIView(generics.RetrieveAPIView):
"""
get:
Return an existing monitoring category.
"""
lookup_field = 'slug'
queryset = MonitoringCategory.objects.all()
serializer_class = MonitoringCategorySerializer
class SolutionObjectiveListAPIView(generics.ListAPIView):
"""
get:
Return a list of all the existing solution objectives.
"""
lookup_field = 'slug'
queryset = SolutionObjective.objects.all()
serializer_class = SolutionObjectiveSerializer
class SolutionObjectiveDetailAPIView(generics.RetrieveAPIView):
"""
get:
Return a existing detailed solution objective.
"""
lookup_field = 'slug'
queryset = SolutionObjective.objects.all()
serializer_class = SolutionObjectiveSerializer
class SolutionListAPIView(generics.ListAPIView):
"""
get:
Return a list of all the existing solutions.
"""
queryset = Solution.objects.all()
serializer_class = SolutionSerializer
class SolutionDetailAPIView(generics.RetrieveAPIView):
"""
get:
Return a detailed solution.
"""
lookup_field = 'slug'
queryset = Solution.objects.all()
serializer_class = SolutionDetailSerializer
|
StarcoderdataPython
|
4879699
|
import fileinput
from collections import defaultdict
from utils import parse_nums, memoize
@memoize
def resolve(r):
if type(rules[r]) == str:
return [rules[r]]
matches = []
for subrule in rules[r]:
submatches = ['']
for n in subrule:
new = []
for m in resolve(n):
for existing in submatches:
new.append(existing + m)
submatches = new
matches.extend(submatches)
return matches
rules = defaultdict(list)
messages = []
for line in fileinput.input():
line = line.strip()
nums = parse_nums(line)
if nums:
parts = line.split(": ")[1].split(" | ")
r = nums[0]
for p in parts:
if '"' in p:
rules[r] = p[1]
else:
rules[r].append([int(x) for x in p.split(' ')])
elif line:
messages.append(line)
pl = len(resolve(42)[0])
part_1 = 0
part_2 = 0
for line in messages:
if line in resolve(0):
part_1 += 1
orig_line = line
a = 0
b = 0
while line[:pl] in resolve(42):
line = line[pl:]
a += 1
while line[:pl] in resolve(31):
line = line[pl:]
b += 1
if a > b and b > 0 and not line:
print orig_line
part_2 += 1
print "Part 1:", part_1
print "Part 2:", part_2
|
StarcoderdataPython
|
8155308
|
<reponame>yvanlvA/Possible_Web_Server_Frame<filename>WebServer/possible/Context/Cookie.py
#_*_ coding:utf-8 _*_
import http.cookies
class Cookie(object):
def __init__(self):
self.sCookie = http.cookies.SimpleCookie()
self.reqCookieDic = {}
def CookieLoad(self, rewdata):
if rewdata == "" or rewdata == None:
return
cTeList = []
if ';' in rewdata:
cTeList = rewdata.split('; ')
else:
cTeList.append(rewdata)
for item in cTeList:
key, value = item.split('=')
self.reqCookieDic[key] = value
def GetCookie(self, key):
return self.reqCookieDic.get(key, None)
def RetCookie(self):
set_cookieList = self.sCookie.output().splitlines()
ret = set()
for item in set_cookieList:
key, value = item.split(': ')
ret.add((key, value))
return ret
def main():
c = Cookie()
#sessionid=88245be7fbf549c13b42075d; check="gAAAAABcpfvM8KrsntMb4jxihY="; user=gAAAAABcpfv7B_pfTcuWFvCPUs60
c.CookieLoad("sessionid=88245be7fbf549c13b42075d; check=gAAAAABcpfvM8KrsntMb4jxihY=; user=gAAAAABcpfv7B_pfTcuWFvCPUs60")
print(c.reqCookieDic)
print(c.GetCookie("sessionid"))
'''
c.sCookie["sessionid"] = "1111111"
c.sCookie["sessionid"]["path"] = "/"
c.sCookie["222"] = "22222222"
c.sCookie["222"]["path"] = "/"
c.sCookie["222"]['max-age'] = 123
print(c.RetCookie())
expires # 日期字符串 指定过期时间
path # 匹配路径
comment # 给服务器说明如何使用该Cookie UTF-8
domain # 服务器只向指定的域名发送Cookie
max-age # 指定过期秒数
secure # 如果使用该属性,则只有在 Http SSL 安全连接时发送
version # 强制,对应Cookie版本 但没见人用过 :D
httponly # cookie中设置了HttpOnly属性,那么通过js脚本将无法读取到cookie信息,这样能有效的防止XSS攻击
from http import cookies
c = cookies.SimpleCookie()
c["supar"] = "netWork"
c["supar"]["path"] = "/"
c.output()
'Set-Cookie: supar=netWork; Path=/'
c["supar"]["max-age"] = "360"
c.output()
'Set-Cookie: supar=netWork; Max-Age=360; Path=/'
c["supar"]["domain"] = "yvan.top"
c.output()
'Set-Cookie: supar=netWork; Domain=yvan.top; Max-Age=360; Path=/'
'''
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
9618698
|
# -*- coding: utf-8 -*-
# Copyright (C) 2020 - KMEE
import os
import sys
from os import path
from xmldiff import main
from lxml import etree as etree_
sys.path.append(path.join(path.dirname(__file__), '..', 'nfselib'))
from nfselib.ginfes.v3_01 import (
servico_enviar_lote_rps_envio,
servico_consultar_nfse_rps_envio,
)
def parsexml_(infile, parser=None, keep_signature=False, **kwargs):
if parser is None:
# Use the lxml ElementTree compatible parser so that, e.g.,
# we ignore comments.
parser = etree_.ETCompatXMLParser()
doc = etree_.parse(infile, parser=parser, **kwargs)
root = doc.getroot()
# remove Signature element before XML comparison
if not keep_signature:
for child in root:
if child.tag in ["{http://www.w3.org/2000/09/xmldsig#}Signature",
"{http://www.w3.org/2000/09/xmldsig#}\
ds:Signature"]:
root.remove(child)
subtree = etree_.ElementTree(root)
return subtree
def parse(inFilename, supermod):
parser = None
doc = parsexml_(inFilename, parser)
rootNode = doc.getroot()
rootTag, rootClass = supermod.get_root_tag(rootNode)
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
return rootObj
def execute_test(path, servico):
for filename in os.listdir(path):
subtree = parsexml_('%s/%s' % (path, filename,))
inputfile = 'input.xml'
subtree.write(inputfile, encoding='utf-8')
obj = parse(inputfile, servico)
outputfile = 'output.xml'
with open(outputfile, 'w') as f:
obj.export(f, 0, namespaceprefix_='')
diff = main.diff_files(inputfile, outputfile)
print(diff)
assert len(diff) == 0
def test_enviar_lote_rps_envio():
xml_path = 'tests/nfse/ginfes/enviar_lote_rps_envio'
execute_test(xml_path, servico_enviar_lote_rps_envio)
def test_consultar_nfse_rps_envio():
xml_path = 'tests/nfse/ginfes/consultar_nfse_rps_envio'
execute_test(xml_path, servico_consultar_nfse_rps_envio)
|
StarcoderdataPython
|
5169063
|
# -*-coding:utf-8-*-
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np
def int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def image2example(image, label):
return tf.train.Example(features=tf.train.Features(feature={
"image/encoded": bytes_feature(image),
"image/label": int64_feature(label)
}))
def image2tfrecord(image_paths, tfrecord_path):
with tf.python_io.TFRecordWriter(tfrecord_path) as tf_writer:
for idx, ele in enumerate(image_paths):
with tf.gfile.Open(ele, "rb") as f:
image_raw = f.read()
example = image2example(image_raw, idx)
tf_writer.write(example.SerializeToString())
def tfrecord2example():
return {
"image/encoded": tf.FixedLenFeature([], dtype=tf.string),
"image/label": tf.FixedLenFeature([], dtype=tf.int64)
}
if __name__ == "__main__":
image1_path = "../test_image/image1.jpg"
image2_path = "../test_image/image2.jpg"
tfrecord_path = "../test_image/image1.tfrecords"
image2tfrecord([image1_path, image2_path], tfrecord_path)
tf_reader = tf.TFRecordReader()
"""
tf.train.string_input_producer() 输入为文件列表, 记得加 []
该函数创建输入文件队列, 返回 A queue with the output strings.
A `QueueRunner` for the Queue is added to the current `Graph`'s `QUEUE_RUNNER` collection.
然后可以通过 tf.train.batch() tf.train.shuffle_batch() 可以通过 num_threads
指定多个线程同时执行入队操作, tf.train.shuffle_batch() 的入队操作就是数据读取和预处理的过程
多个线程会同时读取一个文件的不同样例并进行预处理
"""
file_queue = tf.train.string_input_producer([tfrecord_path])
# key 就是文件的路径名称, 如 "b'../test_image/image1.tfrecords:0'"
key, image_raw = tf_reader.read(file_queue)
features_map = tfrecord2example()
features = tf.parse_single_example(image_raw, features=features_map)
label = features["image/label"]
image = features["image/encoded"]
print('after features["image/encoded"]: ', image.shape)
image = tf.image.decode_jpeg(image, channels=3)
print("after tf.image.decode_jpeg: ", image.shape)
image = tf.image.resize_image_with_crop_or_pad(image, 400, 400)
# 使用 tf.train.batch() image 必须有明确的 shape, 所以需要预处理设置 shape
image, label = tf.train.batch([image, label], batch_size=2, num_threads=2)
with tf.Session() as sess:
# print(sess.run([key]))
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
for _ in range(1):
image, label = sess.run([image, label])
print("label: ", label)
print("after sess.run: ", image.shape)
for ele in range(1):
# 只用有一个位置为-1,
# 不能写成 ele = np.reshape(image[ele, :, :, :], [-1, -1, 3])
ele = np.reshape(image[ele, :, :, :], [400, -1, 3])
print(ele.shape)
plt.imshow(ele)
# plt.axis("off")
plt.show()
coord.request_stop()
coord.join(threads)
|
StarcoderdataPython
|
1663609
|
<reponame>AABur/python-project-lvl2
# -*- coding:utf-8 -*-
import pytest
from gendiff.loader import GendiffFileError, collect_data
@pytest.mark.parametrize(
'file_path',
[
('tests/fixtures/wrong_ext.ttt'),
('tests/fixtures/wrong_json.json'),
('tests/fixtures/wrong_yaml.yaml'),
('file_not_exists'),
],
)
def test_wrong_file(file_path):
with pytest.raises(GendiffFileError):
assert collect_data(file_path)
|
StarcoderdataPython
|
6017
|
# @Time : 2022/1/26 23:07
# @Author : zhaoyu
# @Site :
# @File : __init__.py.py
# @Software: PyCharm
# @Note : xx
|
StarcoderdataPython
|
1616350
|
<filename>classify.py<gh_stars>0
import spacy
import pandas as pd
import numpy as np
import math
# import random
from collections import Counter, defaultdict
import sys
import re
import os
import dataManagment as dm
nlp = spacy.load('en')
import spacy.parts_of_speech as pos_t
VERB = 'VERB'
nsubj = 'nsubj'
dobj = 'dobj'
NN = 'NOUN'
NNS = 'NNS'
ADJ = 'ADJ'
sessions = set()
# this should be replaced with a entity recognition
directions = ["up", "down", "side", "left", "right", "in", "out", "forward", "backward", "north", "south", "east", "west"]
def isDirection(text):
if text in directions:
return True
else:
return False
class Ideas(object):
def __init__(self, doc_):
self.ideas = []
# start with verbs
for word in doc_:
if word.pos_ == VERB:
self.ideas.append(Idea(word))
for idea in self.ideas:
idea.removeBlockWords()
idea.sortTokensToArray()
def __str__(self):
my_string = ""
for idea in self.ideas:
my_string = "{0} *** {1}".format(my_string, str(idea))
# idea should contain noun chunks i think
return my_string
class Idea(object):
def __init__(self, verb):
self.words = set()
self.wordsArray=[]
self.verb = verb
self.words.add(verb)
self.addToken(verb)
def addToken(self, token):
for child in token.children:
if child.pos != pos_t.VERB:
self.addToken(child)
self.words.add(child)
def sortTokensToArray(self):
self.wordsArray = sorted(self.words, key=lambda Token: Token.i)
# run before sort
# removed stop words that are not directions
def removeBlockWords(self):
nonBlockedWords=set()
for token in self.words:
if notStopWord(token):
nonBlockedWords.add(token)
self.words = nonBlockedWords
def __str__(self):
return str(self.wordsArray)
# doc = nlp(u"the rain goes up into the clouds and then comes back down")
# ideas = Ideas(doc)
# print(ideas)
def getIntents(text):
doc=nlp(text)
print("doc")
print(doc)
conseptsPresent=Ideas(doc)
classifications = []
info = {}
info['response']=[]
info["category"]=[]
for idea in conseptsPresent.ideas:
# print(idea)
classifications.append(classify_baysian(idea.wordsArray, catagories, likelihood))
info['response'].append(str(idea))
info['category'].append(classify_baysian(idea.wordsArray, catagories, likelihood))
# save learning data as JSON
for i in range(len(info['response'])):
entry = dm.LearingEntry(info['category'], info['response'][i], info['response'][i])
updateLearingFile("Training_test/learning.json" , entry)
return(classifications)
def read_training_file(fpath):
catagories = Counter()
likelihood =defaultdict(Counter)
training_data = dm.loadData(fpath)
for entry in training_data:
doc =nlp(entry["phrase"])
catagories[entry["classification"]] += 1
for word in doc:
if notStopWord(word):
likelihood[entry["classification"]][word.lemma_] +=1
return (catagories, likelihood)
def printDict(dict):
print(len(dict))
for key, value in dict.items():
print(key, value)
def notStopWord(token):
return not token.is_stop or isDirection(token.lemma_)
# return the class that maxamizes postereor with min probobility
def classify_baysian(doc, priors, likelihood):
# print("************************************************")
# printDict(priors)
if len(doc) < 1:
return "garbage"
min_prob = 1E-9
max_class = (-1E6, '')
for catagory in priors:
p=priors[catagory]
n=float(sum(likelihood[catagory].values()))
for token in doc:
p = p * max(min_prob,likelihood[catagory][token.lemma_] )/ n
if p > max_class[0]:
max_class=(p, catagory)
return max_class[1]
def updateLearingFile(fpath, entry):
currentData = dm.loadData(fpath)
currentData.append(entry.getJSON())
dm.saveData(fpath, currentData)
def is_non_zero_file(fpath):
return True if os.path.isfile(fpath) and os.path.getsize(fpath) > 0 else False
def train_modle():
training_file = "Training_test/training.json"
global catagories
global likelihood
(catagories, likelihood) = read_training_file(training_file)
load_responces("Training_test/nodes.json")
def load_responces(fpath):
# csvFile = pd.read_csv(filename, low_memory=False, encoding='ISO-8859-1')
global nodes
nodes = set()
loadedNodes = dm.loadData(fpath)
for node in loadedNodes:
nodes.add(node)
for node in nodes:
print(node.name)
class Session(object):
def __init__(self, sessionId,baseNode):
self.id=sessionId
self.nodesActivated=[]
self.sortedNodes= sorted(nodes, key=lambda Node: Node.numberOfCalls)
# def nodeAvailible(self, inputContext):
# return self.nodesActivated[-1].
def forceNode(self, actionIntent, decition):
if "yesno" in actionIntent:
print("decition ", decition)
if "yes" in decition:
print("got here yes!!!!!!!!!!!!!!!!!!!!!!!")
if self.activateNode(getNode(self.nodesActivated[-1].yes_force)):
return self.getCurrentBOTResponce()
else:
print("error: could not add forced yes")
if "no" in decition:
if self.activateNode(getNode(self.nodesActivated[-1].no_force)):
return self.getCurrentBOTResponce()
else:
print("error: could not add forced no")
else:
if "restart" in decition:
self.nodesActivated=[]
if self.activateNode(getNode(actionIntent)):
return self.getCurrentBOTResponce()
else:
print("error: could not add foced node")
def nextNode(self, intents):
# self.nodesActivated
# self.wordsArray = sorted(self.words, key=lambda Token: Token.i)
for node in self.sortedNodes: #search ordered list of lest used responces
if node.name in intents: #if the node is in the list of intents
if self.activateNode(node): #try and add the node to current sesstion
return self.getCurrentBOTResponce() # if added return responce
# for intent in intents:
# if self.activateNode(getNode(str(intent))):
# return self.getCurrentBOTResponce()
# not found
return "defalt text responce"
def activateNode(self, node):
if self.isContextAvailable(node.input_context):
self.nodesActivated.append(node)
# self.sortedNodes = sorted(self.nodesActivated, key=lambda Token: Token.i
self.sortedNodes = sorted(nodes, key=lambda Node: Node.numberOfCalls)
for node in self.sortedNodes:
print(node.name," ",node.numberOfCalls)
return True
else:
return False
def printHistory(self):
histString=''
for node in self.nodesActivated:
histString = "{0} > {1}".format(histString, node.name)
print(histString)
def getCurrentBOTResponce(self):
callResponceIndex = self.nodesActivated[-1].getCallNumberInctement()
print(callResponceIndex)
return self.nodesActivated[-1].responce[callResponceIndex]
def isContextAvailable(self, input_contexts):
if len(self.nodesActivated) == 0: #first welcome node
return True
# if "pass_through" in input_contexts:
# if len(self.nodesActivated)<2:
# print("not sure how we got here with less then 2 activate nodes")
# return self.nodesActivated[-2].isContextAvailableNode(input_contexts)
# else:
return self.nodesActivated[-1].isContextAvailableNode(input_contexts)
def currentContext(self):
return self.nodesActivated[-1].output_context
class Node(object):
# def __init__(self, name, responce,input_context,output_context):
def __init__(self, nodeLoadedInfo):
# csvFile["category"][i], csvFile["reply"][i],
# csvFile["input_context"][i],csvFile["output_context"][i]
self.name = nodeLoadedInfo["classification"]
self.numberOfCalls=0
input_context = nodeLoadedInfo["input_context"]
output_context = nodeLoadedInfo["input_context"]
self.responses = []
for responce in nodeLoadedInfo["response"]:
self.responses.append(Responce(responce))
# self.yes_force = nodeLoadedInfo["yes_force"]
# self.no_force = nodeLoadedInfo["no_force"]
# this should indicate if we have gone through them all which it does not right now ********
def getCallNumberInctement(self):
currentCallIndex = self.numberOfCalls
print(self.responce)
self.numberOfCalls = (self.numberOfCalls + 1)%len(self.responce)
return currentCallIndex
# check
def isContextAvailableNode(self, input_contexts):
for input_context in input_contexts:
if input_context in self.output_context:
return True
return False
# self.availibleNodes=2
class Responce(object):
def __init__(self, responceLoaded):
self.text = responceLoaded["text"]
self.input_context = responceLoaded["input_context"]
self.output_context = responceLoaded["output_context"]
self.decisions = set()
for decision in responceLoaded["decision"]:
self.decisions.add(Decision(decision))
class Decision(object):
def __init__(self, loadedDecision):
self.name = loadedDecision["name"]
self.destination = loadedDecision["node"]
def getNode(category):
for node in nodes:
if node.name == category:
return node
print(category, " is unclasified")
for node in nodes:
if node.name == "unknown":
return node
# should never get here
return False
def startSession(ID, node):
session = Session(ID, node)
sessions.add(session)
return session
def getSession(ID):
for session in sessions:
if ID == session.id:
return session
# if not found
return startSession(ID, getNode("base"))
|
StarcoderdataPython
|
11200448
|
<gh_stars>0
import logging
from common.asserts import assert_overflowing
from common.components.hero import assert_h1_spacing, assert_subtext_spacing
from common.components.para_blocks import *
from . import mobile_browser as browser
logger = logging.getLogger(__name__)
def test_hero_section(browser):
section_class = 'fyle-design-system'
assert_h1_spacing(browser, section_class, 16)
assert_subtext_spacing(browser, section_class, 0)
def test_para_block_spacing(browser):
assert_para_blocks(browser, para_width=440, image_width=440)
def test_page_overflow(browser):
assert_overflowing(browser)
|
StarcoderdataPython
|
5181209
|
import pickle as pkl
import numpy as np
import numpy.linalg as linalg
# import scipy.linalg as linalg
import scipy.stats as stats
import pandas as pd
import copy as cp
def getPeaksAndBWs(strf,dt=5,df=1/6, discard_thresh=0.05):
original_strf= strf
strf=np.maximum(original_strf,0)
l2_norm_pos = np.sum(strf[:]**2)
[u,s,v] = linalg.svd(strf)
f1 = u[:,0]
t1 = v.T[:,0]
abs_max_f1_val = np.max(np.abs(f1))
abs_max_f1_ix = np.argmax(np.abs(f1))
abs_max_t1_val = np.max(np.abs(t1))
abs_max_t1_ix = np.argmax(np.abs(t1))
pos_peaks_ix = np.argwhere(np.abs(t1)>0.1*abs_max_t1_val)
if len(pos_peaks_ix)>1:
pos_first_peak_ix = pos_peaks_ix[-1]
else:
pos_first_peak_ix = pos_peaks_ix
f_pos_peak = (abs_max_f1_ix)*df
f_pos_bw = np.sum(np.abs(f1)>0.5*abs_max_f1_val)*df
t_pos_peak = (len(t1) - abs_max_t1_ix)*dt*-1
t_pos_bw = np.sum(np.abs(t1)>0.5*abs_max_t1_val)*dt
#Inhibition:
strf=np.minimum(original_strf,0)
l2_norm_neg = np.sum(strf[:]**2)
[u,s,v] = linalg.svd(strf)
f1 = u[:,0]
t1 = v.T[:,0]
abs_max_f1_val = np.max(np.abs(f1))
abs_max_f1_ix = np.argmax(np.abs(f1))
abs_max_t1_val = np.max(np.abs(t1))
abs_max_t1_ix = np.argmax(np.abs(t1))
neg_peaks_ix = np.argwhere(np.abs(t1)>0.1*abs_max_t1_val)
if len(neg_peaks_ix)>1:
neg_first_peak_ix = neg_peaks_ix[-1]
else:
neg_first_peak_ix = neg_peaks_ix
f_neg_peak = (abs_max_f1_ix)*df
f_neg_bw = np.sum(np.abs(f1)>0.5*abs_max_f1_val)*df
t_neg_peak = (len(t1) - abs_max_t1_ix)*dt*-1
t_neg_bw = np.sum(np.abs(t1)>0.5*abs_max_t1_val)*dt
discard_pos = False
discard_neg = False
flip_pos_neg = False
if l2_norm_neg<discard_thresh*l2_norm_pos:
discard_neg = True
f_neg_bw = 0
t_neg_bw = 0
elif l2_norm_pos<discard_thresh*l2_norm_neg:
discard_pos = True
f_pos_bw = 0
t_pos_bw = 0
if (neg_first_peak_ix>pos_first_peak_ix and not discard_neg) or discard_pos:
# print('flip_pos_neg = True')
flip_pos_neg = True
discard_neg = discard_pos
f_peak = [f_neg_peak, f_pos_peak]
f_bw = [f_neg_bw, f_pos_bw]
t_peak = [t_neg_peak, t_pos_peak]
t_bw = [t_neg_bw, t_pos_bw]
else:
f_peak = [f_pos_peak,f_neg_peak]
f_bw = [f_pos_bw,f_neg_bw]
t_peak = [t_pos_peak,t_neg_peak]
t_bw = [t_pos_bw,t_neg_bw]
# flags = [flip_pos_neg, discard_neg]
return [f_peak,f_bw, t_peak,t_bw, flip_pos_neg, discard_neg]
def flip_neg_weights(weights,n_h = 40, dt = 5,dF = 1/6):
numweights = weights.shape[0]
mf_peak = np.empty([numweights,2])
mf_bw = np.empty([numweights,2])
mt_bw = np.empty([numweights,2])
mt_peak = np.empty([numweights,2])
m_pow = np.empty([numweights, n_h])
flip_pos_neg = np.empty([numweights])
discard_neg = np.empty([numweights])
for ii in np.arange(numweights):
#normalize weight so that all are in same range
this_weight = weights[ii,:,:]
this_weight_norm = this_weight/np.max(np.abs(this_weight[:]))
[mf_peak[ii,:],mf_bw[ii,:], mt_peak[ii,:],mt_bw[ii,:], flip_pos_neg[ii], discard_neg[ii]] = getPeaksAndBWs(this_weight_norm,dt,dF)
if flip_pos_neg[ii]:
this_weight = -this_weight
weights[ii,:,:] = this_weight
return weights
def quantify_strfs(weights,n_h = 40, dt = 5,dF = 1/6):
numweights = weights.shape[0]
mf_peak = np.empty([numweights,2])
mf_bw = np.empty([numweights,2])
mt_bw = np.empty([numweights,2])
mt_peak = np.empty([numweights,2])
m_pow = np.empty([numweights, n_h])
flip_pos_neg = np.empty([numweights])
discard_neg = np.empty([numweights])
# Get measures for real and model data
for ii in np.arange(numweights):
#normalize weight so that all are in same range
this_weight = cp.deepcopy(weights[ii,:,:])
if np.max(np.abs(this_weight[:]))>0:
this_weight /=np.max(np.abs(this_weight[:]))
[mf_peak[ii,:],mf_bw[ii,:], mt_peak[ii,:],mt_bw[ii,:], flip_pos_neg[ii], discard_neg[ii]] = getPeaksAndBWs(this_weight,dt,dF)
m_pow[ii,:] = np.sum(this_weight**2, axis=0)
mf_peak_pos = mf_peak[:,0]
mf_bw_pos = mf_bw[:,0]
mt_peak_pos = mt_peak[:,0]
mt_bw_pos = mt_bw[:,0]
mf_peak_neg = mf_peak[np.logical_not(discard_neg),1]
mf_bw_neg = mf_bw[:,1]
# mf_bw_neg = mf_bw[np.logical_not(discard_neg),1]
mt_peak_neg = mt_peak[np.logical_not(discard_neg),1]
# mt_bw_neg = mt_bw[np.logical_not(discard_neg),1]
mt_bw_neg = mt_bw[:,1]
return [mf_peak_pos, mf_peak_neg, mf_bw_pos, mf_bw_neg, mt_peak_pos, mt_peak_neg, mt_bw_pos, mt_bw_neg, m_pow]
def add_mean_ks(this_pd):
# temp_pd = this_pd.copy()
colnames = []
# keys = this_pd.colnames
# print(this_pd.keys().unique())
for colname in this_pd.columns:
# print(colname)
# print(('peak' in colname and 'pos'in colname) )
# if 'ks' in colname:
if 'ks' in colname and ('bw' in colname):# or ('peak' in colname and 'pos' in colname)):
colnames.append(colname)
print(colnames)
this_pd['mean_ks'] = 0
# tempp = this_pd[colnames]
n_measures = 0
for colname in colnames:
# print(this_pd[colname])
this_pd['mean_ks'] += this_pd[colname]
n_measures += 1
this_pd['mean_ks'] /= n_measures
return this_pd
def compare_real_model_distributions(mstrfs, rstrfs, pd_entry):
[mf_peak_pos, mf_peak_neg, mf_bw_pos, mf_bw_neg, mt_peak_pos, mt_peak_neg, mt_bw_pos, mt_bw_neg, m_pow] = quantify_strfs(mstrfs)
[rf_peak_pos, rf_peak_neg, rf_bw_pos, rf_bw_neg, rt_peak_pos, rt_peak_neg, rt_bw_pos, rt_bw_neg, r_pow] = quantify_strfs(rstrfs, n_h=38)
#Exclude any entries where bw=0
mf_ix = [mf_bw_neg>0] #and [mf_bw_pos>0]
rf_ix = [rf_bw_neg>0] #and [rf_bw_pos>0]
mt_ix = [mt_bw_neg>0] #and [mt_bw_pos>0]
rt_ix = [rt_bw_neg>0] #and [rt_bw_pos>0]
mf_bw_pos = mf_bw_pos[mf_ix]
rf_bw_pos = rf_bw_pos[rf_ix]
mt_bw_pos = mt_bw_pos[mt_ix]
rt_bw_pos = rt_bw_pos[rt_ix]
mf_bw_neg = mf_bw_neg[mf_ix]
rf_bw_neg = rf_bw_neg[rf_ix]
mt_bw_neg = mt_bw_neg[mt_ix]
rt_bw_neg = rt_bw_neg[rt_ix]
ks_t_bw = np.zeros([2])
ks_f_bw = np.zeros([2])
ks_t_peak = np.zeros([2])
ks_f_peak = np.zeros([2])
[ks_t_bw[0],p] = stats.ks_2samp(mt_bw_pos,rt_bw_pos)
[ks_t_bw[1],p] = stats.ks_2samp(mt_bw_neg,rt_bw_neg)
[ks_t_peak[0],p] =stats.ks_2samp((mt_bw_pos-mt_bw_neg)/(mt_bw_pos+mt_bw_neg),(rt_bw_pos-rt_bw_neg)/(rt_bw_pos+rt_bw_neg))
[ks_t_peak[1],p] =stats.ks_2samp((mt_bw_pos-mt_bw_neg),(rt_bw_pos-rt_bw_neg))
# [ks_t_peak[0],p] =stats.ks_2samp((mt_bw_pos-mt_bw_neg),(rt_bw_pos-rt_bw_neg))
# [ks_t_peak[1],p] =stats.ks_2samp((mt_bw_pos-mt_bw_neg),(rt_bw_pos-rt_bw_neg))
# [ks_t_peak[1],p] =stats.ks_2samp(mt_peak_neg,rt_peak_neg)
[ks_f_bw[0],p] = stats.ks_2samp(mf_bw_pos,rf_bw_pos)
[ks_f_bw[1],p] = stats.ks_2samp(mf_bw_neg,rf_bw_neg)
# [ks_f_peak[0],p] = stats.ks_2samp(mf_peak_pos,rf_peak_pos)
# [ks_f_peak[1],p] = stats.ks_2samp(mf_peak_neg,rf_peak_neg)
[ks_f_peak[0],p] =stats.ks_2samp((mf_bw_pos-mf_bw_neg)/(mf_bw_pos+mf_bw_neg),(rf_bw_pos-rf_bw_neg)/(rf_bw_pos+rf_bw_neg))
[ks_f_peak[1],p] =stats.ks_2samp((mf_bw_pos-mf_bw_neg),(rf_bw_pos-rf_bw_neg))
# [ks_f_peak[0],p] =stats.ks_2samp((mf_bw_pos-mf_bw_neg),(rf_bw_pos-rf_bw_neg))
# [ks_f_peak[1],p] =stats.ks_2samp((mf_bw_pos-mf_bw_neg),(rf_bw_pos-rf_bw_neg))
ks_t_peak[np.isnan(ks_t_peak)] = 1
ks_t_bw[np.isnan(ks_t_bw)] = 1
ks_f_bw[np.isnan(ks_f_bw)] = 1
ks_f_peak[np.isnan(ks_f_peak)] = 1
if pd_entry is not None:
pd_entry['f_peak_pos'] = mf_peak_pos
pd_entry['f_peak_neg'] = mf_peak_neg
pd_entry['f_bw_pos'] = mf_bw_pos
pd_entry['f_bw_neg'] = mf_bw_neg
pd_entry['t_peak_pos'] = mt_peak_pos
pd_entry['t_peak_neg'] = mt_peak_neg
pd_entry['t_bw_pos'] = mt_bw_pos
pd_entry['t_bw_neg'] = mt_bw_neg
pd_entry['t_pow'] = m_pow
pd_entry['ks_f_peak_pos'] = ks_f_peak[0]
pd_entry['ks_f_peak_neg'] = ks_f_peak[1]
pd_entry['ks_t_peak_pos'] = ks_t_peak[0]
pd_entry['ks_t_peak_neg'] = ks_t_peak[1]
pd_entry['ks_t_bw_pos'] = ks_t_bw[0]
pd_entry['ks_t_bw_neg'] = ks_t_bw[1]
pd_entry['ks_f_bw_pos'] = ks_f_bw[0]
pd_entry['ks_f_bw_neg'] = ks_f_bw[1]
return pd_entry
# print(ks_t_bw)
return [ks_t_peak, ks_t_bw, ks_f_bw]
def compare_real_model_populations(this_pd, rstrfs, display=1, keep_prop=0.01):
n_h = 40
num_freq = 32
out_pd = this_pd.copy()
ii= 0
for entry_loc,this_entry in this_pd.iterrows():
pth = this_entry['results_path']
pred_net = pkl.load(open(pth, 'rb'))
if not isinstance(pred_net, dict):
network_params = pred_net.network_params
cost_history = pred_net.cost_history
else:
network_params = pred_net['network_params']
cost_history = pred_net['cost_history']
weights = network_params[0].T
l2_norm = np.sum(weights**2,axis=1)
keep_ix = l2_norm>keep_prop*max(l2_norm)
mstrfs = weights[keep_ix,:]
num_mstrfs = mstrfs.shape[0]
mstrfs =np.reshape(mstrfs,[num_mstrfs, n_h, num_freq])
mstrfs = np.rollaxis(mstrfs,2,1)
if ii == 0:
temp = compare_real_model_distributions(mstrfs, rstrfs, this_entry)
for cname in temp.index:
if cname not in out_pd.columns:
temp[cname] = None
else:
temp = temp.drop(cname)
out_pd = out_pd.assign(**temp)
this_entry = compare_real_model_distributions(mstrfs, rstrfs, this_entry)
out_pd.loc[entry_loc,this_entry.index.tolist()] = this_entry
ii+=1
return pd.DataFrame(out_pd)
|
StarcoderdataPython
|
3587458
|
import requests
import json
def test_healthcheck():
# Setup
url = 'http://localhost:5000'
headers = {'Content-Type': 'application/json' }
# Action
resp = requests.get(url, headers=headers)
# Check
assert resp.status_code == 200
resp_body = resp.json()
assert resp_body['code'] == 1
def test_hr_weekly():
# Setup
url = 'http://localhost:5000/hr/weekly'
headers = {'Content-Type': 'application/json' }
params = {'until': 1585699200}
# Action
resp = requests.get(url, headers=headers, params=params)
# Check
assert resp.status_code == 200
resp_body = resp.json()
assert isinstance(resp_body, list)
assert len(resp_body) > 0
assert resp_body[0] == ['Time','Value']
def test_bp_weekly():
# Setup
url = 'http://localhost:5000/bp/weekly'
headers = {'Content-Type': 'application/json' }
params = {'until': 1585699200}
# Action
resp = requests.get(url, headers=headers, params=params)
# Check
assert resp.status_code == 200
resp_body = resp.json()
assert isinstance(resp_body, list)
assert len(resp_body) > 0
assert resp_body[0] == ['Time','Diastolic','Systolic']
def test_steps_monthly():
# Setup
url = 'http://localhost:5000/steps/monthly'
headers = {'Content-Type': 'application/json' }
params = {'until': 1585699200}
# Action
resp = requests.get(url, headers=headers, params=params)
# Check
assert resp.status_code == 200
resp_body = resp.json()
assert isinstance(resp_body, list)
assert len(resp_body) > 0
assert resp_body[0] == ['Time','Steps']
assert isinstance(resp_body[1], list)
def test_weight_all_data():
# Setup
url = 'http://localhost:5000/weight'
headers = {'Content-Type': 'application/json' }
params = {'until': 1590969600}
# Action
resp = requests.get(url, headers=headers, params=params)
# Check
assert resp.status_code == 200
resp_body = resp.json()
assert isinstance(resp_body, list)
assert len(resp_body) == 192
assert resp_body[0] == ['Time','Weight']
assert isinstance(resp_body[1], list)
def test_weight_until_april_1():
# Setup
url = 'http://localhost:5000/weight'
headers = {'Content-Type': 'application/json' }
params = {'until': 1585699200}
# Action
resp = requests.get(url, headers=headers, params=params)
# Check
assert resp.status_code == 200
resp_body = resp.json()
assert isinstance(resp_body, list)
assert len(resp_body) == 154
assert resp_body[0] == ['Time','Weight']
assert isinstance(resp_body[1], list)
|
StarcoderdataPython
|
3406530
|
<filename>resnet/data_loader.py
import numpy as np
from os.path import join
class Loader:
X_train = None
y_train = None
X_valid = None
y_valid = None
X_test = None
y_test = None
X_seq_train = None
y_seq_train = None
X_seq_valid = None
y_seq_valid = None
X_seq_test = None
y_seq_test = None
def load_pretrain(self, fold=0):
path = './20_fold_data'
npz = np.load(join(path,str(fold)+'.npz'))
self.X_train = npz['X_train']
self.y_train = npz['y_train']
self.X_valid = npz['X_valid']
self.y_valid = npz['y_valid']
self.X_test = npz['X_test']
self.y_test = npz['y_test']
def load_finetune(self, fold=0 ):
path = './20_fold_data'
npz = np.load(join(path,str(fold)+'.npz'))
self.X_seq_train = npz['X_seq_train']
self.y_seq_train = npz['y_seq_train']
self.X_seq_valid = npz['X_seq_valid']
self.y_seq_valid = npz['y_seq_valid']
self.X_seq_test = npz['X_seq_test']
self.y_seq_test = npz['y_seq_test']
if __name__ == '__main__':
d = Loader()
d.load_pretrain()
print(d.X_train.shape)
d.load_finetune()
print(d.y_seq_test.shape)
|
StarcoderdataPython
|
6626342
|
<gh_stars>10-100
#!/usr/bin/env python
import unittest
import sys
import shutil
import os
import io
import re
import gzip
import numpy
if "DEBUG" in sys.argv:
sys.path.insert(0, "..")
sys.path.insert(0, "../../")
sys.path.insert(0, ".")
sys.argv.remove("DEBUG")
import metax.Formats as Formats
from M00_prerequisites import ProcessPrerequisites
class Dummy(object):
pass
def buildDummyArgs(root):
dummy = Dummy()
dummy.verbosity = 10
dummy.dosage_folder = os.path.join(root, "dosage_set_1")
dummy.snp_list = os.path.join(root, "snp.txt.gz")
dummy.output_folder = os.path.join(root, "intermediate/filtered")
dummy.file_pattern = "set_(.*)"
dummy.population_group_filters = ["HERO"]
dummy.individual_filters = ["ID.*"]
dummy.input_format = Formats.IMPUTE
dummy.output_format = Formats.PrediXcan
return dummy
def setupDataForArgs(args, root):
if os.path.exists(root):
shutil.rmtree(root)
shutil.copytree("tests/_td/dosage_set_1", os.path.join(root,"dosage_set_1"))
shutil.copy("tests/_td/snp.txt.gz", root)
def cleanUpDataForArgs(root):
shutil.rmtree(root)
class TestM00(unittest.TestCase):
def testProcessPrerequisitesnoArgConstructor(self):
with self.assertRaises(AttributeError):
dummy = Dummy()
p = ProcessPrerequisites(dummy)
def testProcessPrerequisitesConstructor(self):
dummy = buildDummyArgs("_test")
setupDataForArgs(dummy, "_test")
p = ProcessPrerequisites(dummy)
self.assertEqual(p.dosage_folder, "_test/dosage_set_1")
self.assertEqual(p.snp_list, "_test/snp.txt.gz")
self.assertEqual(p.output_folder, "_test/intermediate/filtered")
self.assertEqual(p.population_group_filters, ["HERO"])
# Previous check below looked at memory locations, which failed under
# some conditions even though the expressions were effectively identical
self.assertEqual([x.pattern for x in p.individual_filters], ["ID.*"])
self.assertEqual(p.chromosome_in_name_regex,re.compile("set_(.*)"))
self.assertEqual(p.samples_input, "_test/dosage_set_1/set.sample")
self.assertEqual(p.samples_output, "_test/intermediate/filtered/set.sample")
cleanUpDataForArgs("_test")
def testProcessPrerequisitesRun(self):
dummy = buildDummyArgs("_test")
setupDataForArgs(dummy, "_test")
p = ProcessPrerequisites(dummy)
try:
p.run()
except:
self.assertEqual(False, True, "Prerequisites should have run without error")
with open(p.samples_output) as f:
expected_lines = ["ID POP GROUP SEX",
"ID1 K HERO male",
"ID2 K HERO female",
"ID3 K HERO female"]
for i,expected_line in enumerate(expected_lines):
actual_line = f.readline().strip()
self.assertEqual(actual_line, expected_line)
path = os.path.join(p.output_folder, "set_chr1.dosage.gz")
with io.TextIOWrapper(gzip.open(path), newline="") as f:
expected_lines = ["chr1 rs2 2 G A 0.166666666667 0 1 0",
"chr1 rs3 3 G A 0.333333333333 0 1 1",
"chr1 rs4 4 G A 1.0 2 2 2"]
for i,expected_line in enumerate(expected_lines):
actual_line = f.readline().strip()
actual_comps = actual_line.split(" ")
expected_comps = expected_line.split(" ")
self.assertEqual(actual_comps[0:5], expected_comps[0:5])
numpy.testing.assert_almost_equal(float(actual_comps[6]), float(actual_comps[6]))
self.assertEqual(actual_comps[7:], expected_comps[7:])
cleanUpDataForArgs("_test")
if __name__ == "__main__":
unittest.main()
|
StarcoderdataPython
|
1780446
|
<gh_stars>1-10
import sys
def printc(*s, color="grey", hl=None, bg=None, file=sys.stderr):
"""
Prints some text with some color, using Terminal escape sequences
>>> printc("Hello world", color="blue")
\033[1;34mHello world\033[1;m
>>> printc("Hello world", color="blue", hl=True)
\033[1;44mHello world\033[1;m
"""
colors = {
'grey': 30,
'black': 31,
'red': 31,
'green': 32,
'yellow': 33,
'blue': 34,
'magenta': 35,
'purple': 35,
'cyan': 36,
}
if color == "grey":
hl = True
code = colors.get(color)
text = ' '.join(str(x) for x in s)
if code:
hl = 1 if hl else 0
if bg:
code += 10
file.write("\r\033[{hl};{color}m{text}\033[1;m\n".format(
hl=hl, text=text, color=code))
else:
file.write(text + '\n')
file.flush()
|
StarcoderdataPython
|
308801
|
import sqlite3
import subprocess as sp
"""
database code
"""
def create_table():
conn = sqlite3.connect('testdb.sqlite')
cursor = conn.cursor()
query = '''
CREATE TABLE IF NOT EXISTS student(
id INTEGER PRIMARY KEY,
roll INTEGER,
name TEXT,
phone TEXT
)
'''
cursor.execute(query)
conn.commit()
conn.close()
def add_student(roll,name,phone):
conn = sqlite3.connect('testdb.sqlite')
cursor = conn.cursor()
query = '''
INSERT INTO student( roll, name, phone )
VALUES ( ?,?,? )
'''
cursor.execute(query,(roll,name,phone))
conn.commit()
conn.close()
def get_students():
conn = sqlite3.connect('testdb.sqlite')
cursor = conn.cursor()
query = '''
SELECT roll, name, phone
FROM student
'''
cursor.execute(query)
all_rows = cursor.fetchall()
conn.commit()
conn.close()
return all_rows
def get_student_by_roll(roll):
conn = sqlite3.connect('testdb.sqlite')
cursor = conn.cursor()
query = '''
SELECT roll, name, phone
FROM student
WHERE roll = {}
''' .format(roll)
cursor.execute(query)
all_rows = cursor.fetchall()
conn.commit()
conn.close()
return all_rows
def update_student(roll,name,phone):
conn = sqlite3.connect('testdb.sqlite')
cursor = conn.cursor()
query = '''
UPDATE student
SET name = ?, phone = ?
WHERE roll = ?
'''
cursor.execute(query,(name,phone,roll))
conn.commit()
conn.close()
def delete_student(roll):
conn = sqlite3.connect('testdb.sqlite')
cursor = conn.cursor()
query = '''
DELETE
FROM student
WHERE roll = {}
''' .format(roll)
cursor.execute(query)
all_rows = cursor.fetchall()
conn.commit()
conn.close()
return all_rows
create_table()
"""
main code
"""
def add_data(id_,name,phone):
add_student(id_,name,phone)
def get_data():
return get_students()
def show_data():
students = get_data()
for student in students:
print(student)
def show_data_by_id(id_):
students = get_student_by_roll(id_)
if not students:
print("No data found at roll",id_)
else:
print (students)
def select():
sp.call('clear',shell=True)
sel = input("1. Add data\n2.Show Data\n3.Search\n4.Update\n5.Delete\n6.Exit\n\n")
if sel=='1':
sp.call('clear',shell=True)
id_ = int(input('id: '))
name = input('Name: ')
phone = input('phone: ')
add_data(id_,name,phone)
elif sel=='2':
sp.call('clear',shell=True)
show_data()
input("\n\npress enter to back:")
elif sel=='3':
sp.call('clear',shell=True)
id__ = int(input('Enter Id: '))
show_data_by_id(id__)
input("\n\npress enter to back:")
elif sel=='4':
sp.call('clear',shell=True)
id__ = int(input('Enter Id: '))
show_data_by_id(id__)
print()
name = input('Name: ')
phone = input('phone: ')
update_student(id__,name,phone)
input("\n\nYour data has been updated \npress enter to back:")
elif sel=='5':
sp.call('clear',shell=True)
id__ = int(input('Enter Id: '))
show_data_by_id(id__)
delete_student(id__)
input("\n\nYour data has been deleted \npress enter to back:")
else:
return 0;
return 1;
while(select()):
pass
|
StarcoderdataPython
|
1741468
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Example of debugging TensorFlow runtime errors using tfdbg."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import tempfile
import numpy as np
import tensorflow
from tensorflow.python import debug as tf_debug
tf = tensorflow.compat.v1
def main(_):
sess = tf.Session()
# Construct the TensorFlow network.
ph_float = tf.placeholder(tf.float32, name="ph_float")
x = tf.transpose(ph_float, name="x")
v = tf.Variable(np.array([[-2.0], [-3.0], [6.0]], dtype=np.float32), name="v")
m = tf.constant(
np.array([[0.0, 1.0, 2.0], [-4.0, -1.0, 0.0]]),
dtype=tf.float32,
name="m")
y = tf.matmul(m, x, name="y")
z = tf.matmul(m, v, name="z")
if FLAGS.debug:
config_file_path = (tempfile.mktemp(".tfdbg_config")
if FLAGS.use_random_config_path else None)
sess = tf_debug.LocalCLIDebugWrapperSession(
sess,
ui_type=FLAGS.ui_type,
config_file_path=config_file_path)
if FLAGS.error == "shape_mismatch":
print(sess.run(y, feed_dict={ph_float: np.array([[0.0], [1.0], [2.0]])}))
elif FLAGS.error == "uninitialized_variable":
print(sess.run(z))
elif FLAGS.error == "no_error":
print(sess.run(y, feed_dict={ph_float: np.array([[0.0, 1.0, 2.0]])}))
else:
raise ValueError("Unrecognized error type: " + FLAGS.error)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.register("type", "bool", lambda v: v.lower() == "true")
parser.add_argument(
"--error",
type=str,
default="shape_mismatch",
help="""\
Type of the error to generate (shape_mismatch | uninitialized_variable |
no_error).\
""")
parser.add_argument(
"--ui_type",
type=str,
default="curses",
help="Command-line user interface type (curses | readline)")
parser.add_argument(
"--debug",
type="bool",
nargs="?",
const=True,
default=False,
help="Use debugger to track down bad values during training")
parser.add_argument(
"--use_random_config_path",
type="bool",
nargs="?",
const=True,
default=False,
help="""If set, set config file path to a random file in the temporary
directory.""")
FLAGS, unparsed = parser.parse_known_args()
with tf.Graph().as_default():
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
StarcoderdataPython
|
182297
|
import numpy as np
from mlxtend.feature_selection import SequentialFeatureSelector as SFS
from sklearn.model_selection import KFold
from sklearn.feature_selection import SelectPercentile, mutual_info_classif
import sys
from pathlib import Path
sys.path[0] = str(Path(sys.path[0]).parent)
from metrics import metrics, meanMetrics, stdMetrics, printMetrics
import time
def select_features(classifier, n_features, fwd, fltg):
sfs = SFS(classifier,
k_features=n_features,
forward=fwd,
floating=fltg,
verbose=1,
scoring='accuracy',
cv=10,
n_jobs=-1)
return sfs
def select_features_number(classifier, number_features, fwd, fltg, X, Y):
tiempo_i = time.time()
Errores = np.ones(10)
Metrics = np.zeros((10,5))
j = 0
kf = KFold(n_splits=10)
clf = classifier
sf = select_features(clf, number_features, fwd, fltg)
sf = sf.fit(X, Y)
X_sf = sf.transform(X)
for train_index, test_index in kf.split(X_sf):
X_train, X_test = X_sf[train_index], X_sf[test_index]
y_train, y_test = Y[train_index], Y[test_index]
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
Errores[j] = 1-metrics(y_test,y_pred)[0]
Metrics[j,:] = metrics(y_test, y_pred)
j+=1
print("\nError de validación aplicando SFS: "+str(np.mean(Errores))+"+/-"+str(np.std(Errores)))
print("\nEficiencia en validación aplicando SFS: "+str((1-np.mean(Errores))*100)+"%")
print("\nTiempo total de ejecución: "+str(time.time()-tiempo_i)+" segundos.")
MetricsMean = meanMetrics(Metrics)
MetricsStd = stdMetrics(Metrics)
printMetrics(MetricsMean)
print("\nDesviaciones Estandard")
printMetrics(MetricsStd)
return sf
def select_features_filter_percentage(classifier, percentage, X, Y):
tiempo_i = time.time()
Errores = np.ones(10)
Metrics = np.zeros((10,5))
j = 0
kf = KFold(n_splits=10)
filter_method = SelectPercentile(mutual_info_classif, percentile=percentage)
filter_method.fit(X,Y)
X_sf = filter_method.transform(X)
for train_index, test_index in kf.split(X_sf):
X_train, X_test = X_sf[train_index], X_sf[test_index]
y_train, y_test = Y[train_index], Y[test_index]
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
Metrics[j,:] = metrics(y_test, y_pred)
Errores[j] = 1-metrics(y_test,y_pred)[0]
j+=1
print("\nError de validación aplicando at "+str(percentage)+"%: "+str(np.mean(Errores))+"+/-"+str(np.std(Errores)))
print("\nEficiencia en validación aplicando at "+str(percentage)+"%: "+str((1-np.mean(Errores))*100)+"%")
print("\nTiempo total de ejecución: "+str(time.time()-tiempo_i)+" segundos.")
MetricsMean = meanMetrics(Metrics)
MetricsStd = stdMetrics(Metrics)
printMetrics(MetricsMean)
print("\nDesviaciones Estandard")
printMetrics(MetricsStd)
return filter_method
|
StarcoderdataPython
|
1602325
|
<filename>bazel/rules/library_rule.bzl
# Copyright 2018-present Open Networking Foundation
# SPDX-License-Identifier: Apache-2.0
def stratum_cc_library(name, deps = None, srcs = None, data = None,
hdrs = None, copts = None, defines = None,
include_prefix = None, includes = None,
strip_include_prefix = None, testonly = None,
textual_hdrs = None, visibility = None,
arches = None):
if arches and arches != ["x86"] and arches != ["host"]:
fail("Stratum does not currently support non-x86 architectures")
alwayslink = 0
if srcs:
if type(srcs) == "select":
alwayslink = 1
elif [s for s in srcs if not s.endswith(".h")]:
alwayslink = 1
native.cc_library(
name = name,
deps = deps,
srcs = srcs,
data = data,
hdrs = hdrs,
alwayslink = alwayslink,
copts = copts,
defines = defines,
include_prefix = include_prefix,
includes = includes,
strip_include_prefix = strip_include_prefix,
testonly = testonly,
textual_hdrs = textual_hdrs,
visibility = visibility,
)
|
StarcoderdataPython
|
8105984
|
#!/usr/bin/python3
class mppt(object):
def __init__(self):
self.__panelVoltage = 0
self.__panelCurrent = 0
self.__batteryVoltage = 48
self.__batteryInputCurrent = 0
@property
def panelVoltage(self):
return self.__panelVoltage
@property
def panelCurrent(self):
return self.__panelCurrent
@property
def batteryVoltage(self):
return self.__batteryVoltage
@property
def batteryInputCurrent(self):
return self.__batteryInputCurrent
|
StarcoderdataPython
|
3520629
|
import matplotlib.pyplot as plt
import matplotlib.lines as mlines
import math
import numpy as np
from Simple_FISs import split_vector_given
def get_plot_points(fuzzy_system):
def to_x_y_vectors(centers, widths):
x_values = []
y_values = []
for center, width in zip(centers, widths):
x_values.extend([[center - width, center, center + width]])
y_values.extend([[0, 1, 0]])
return x_values, y_values
ga_vector = fuzzy_system[-1]
if fuzzy_system[0] == '2_1':
split_2_1_vector = split_vector_given(fuzzy_system[3], fuzzy_system[4], 0, ga_vector, 3)
centers_1, widths_1, centers_2, widths_2, centers_3, widths_3, output_centers, output_widths, rules = split_2_1_vector
elif fuzzy_system[0] == '3_1':
split_3_1_vector = split_vector_given(fuzzy_system[4], fuzzy_system[5], fuzzy_system[6], ga_vector, 3)
centers_1, widths_1, centers_2, widths_2, centers_3, widths_3, output_centers, output_widths, rules = split_3_1_vector
x_values_1, y_values_1 = to_x_y_vectors(centers_1, widths_1)
x_values_2, y_values_2 = to_x_y_vectors(centers_2, widths_2)
x_values_3, y_values_3 = to_x_y_vectors(centers_3, widths_3)
x_values_out, y_values_out = to_x_y_vectors(output_centers, output_widths)
return x_values_1, y_values_1, x_values_2, y_values_2, x_values_3, y_values_3, x_values_out, y_values_out, rules
def view_layer_mfs(fuzzy_systems_layer, layer_num):
""" Populate a figure with all membership function graphs from that layer"""
def populate_subplot(n_rows, n_cols, i1, i2, x_values, y_values, title, two_one=True):
""" Add functions to single subplot"""
def add_formatted_title(title):
""" Move title to two lines if too long and replace '_' with ' ' """
title = title.replace("_", " ")
title = list(title)
if len(title) > 20:
title_first = title[:20]
i = 0
save_i_to_insert = 0
for char in title_first[::-1]:
if char == ' ':
save_i_to_insert = i
i += 1
title.insert(save_i_to_insert, '\n')
return "".join(title)
colors = ['g', 'c', 'b', 'm', 'k']
the_subplot = plt.subplot2grid((n_rows, n_cols), (i1, i2))
if two_one and i2 == 3:
the_subplot = plt.subplot2grid((n_rows, n_cols), (i1, 2), colspan=2)
for x_value, y_value, color in zip(x_values, y_values, colors):
the_subplot.plot(x_value, y_value, color)
the_subplot.set_title(add_formatted_title(title), fontsize=9.0)
the_subplot.set_ylim([0, 1.1])
n_row = len(fuzzy_systems_layer)
n_col = 4
fig = plt.figure(figsize=(n_col * 3, n_row * 2.5))
fig.suptitle(('Layer ' + str(layer_num) + ' Membership Functions'))
fig.subplots_adjust(left=.05, right=.95, top=.8, hspace=0.7)
low_patch = mlines.Line2D([], [], color='g', markersize=15, label='low')
med_patch = mlines.Line2D([], [], color='c', markersize=15, label='medium')
hgh_patch = mlines.Line2D([], [], color='b', markersize=15, label='high')
i = 0
layer_rule_tables = []
for fuzzy_system in fuzzy_systems_layer:
x_values_1, y_values_1, x_values_2, y_values_2, x_values_3, y_values_3, x_values_out, y_values_out, rules = get_plot_points(
fuzzy_system)
if fuzzy_system[0] == '2_1':
layer_rule_tables.append(display_rules_single_fis(rules, fuzzy_system))
populate_subplot(n_row, n_col, i, 0, x_values_1, y_values_1, fuzzy_system[1])
populate_subplot(n_row, n_col, i, 1, x_values_2, y_values_2, fuzzy_system[2])
if x_values_3:
populate_subplot(n_row, n_col, i, 2, x_values_3, y_values_3, fuzzy_system[3])
two_one_fuzzy_system = False
else:
two_one_fuzzy_system = True
populate_subplot(n_row, n_col, i, 3, x_values_out, y_values_out, 'Layer output', two_one=two_one_fuzzy_system)
if i == 0:
plt.legend(handles=[low_patch, med_patch, hgh_patch], bbox_to_anchor=(0., 1.02, 1., .102), loc='lower center',
ncol=3, borderaxespad=1.5)
#plt.legend(handles=[low_patch, med_patch, hgh_patch], loc='upper left')
i += 1
return layer_rule_tables
# Now for the rules - still needs a good amount of work
def display_rules_single_fis(rules, fuzzy_system_descriptors):
number_of_mfs_in1 = fuzzy_system_descriptors[3]
number_of_mfs_in2 = fuzzy_system_descriptors[4]
basic_descriptors = ['low', 'medium', 'high'] #{'3': , '5': ['very low', 'low', 'medium', 'high', 'very high']}
rule_strings = []
for rule in rules:
try:
rule = basic_descriptors[int(math.ceil(rule) - 1)]
except IndexError:
rule = 'high'
rule_strings.append(rule)
rule_table = [['mf2: low', 'mf2: medium', 'mf2: high']]
loc_list = 0
for i1 in np.arange(0, number_of_mfs_in1):
rule_table.append(rule_strings[loc_list: loc_list+number_of_mfs_in2])
loc_list += number_of_mfs_in2
return rule_table
#
# number_mfs_inord = [['2_1', 'mobility_retail_and_recreation', 'mobility_grocery_and_pharmacy', 3, 3, chromosome[0: 27]],
# ['2_1', 'mobility_parks', 'mobility_residential', 3, 3, chromosome[27: 54]],
# ['2_1', 'mobility_transit_stations', 'mobility_workplaces', 3, 3, chromosome[54: 81]],
# ['3_1', 'lam 1', 'lam 2', 'lam 3', 3, 3, 3, chromosome[81: 132]],
# ['2_1', 'lam 4', 'new_confirmed cases', 3, 5, chromosome[132: 169]]]
#
# # Layer 1
# layer_1_rules = view_layer_mfs(number_mfs_inord[0: 3], 1)
# plt.show()
#
# # Layer 2
# view_layer_mfs(number_mfs_inord[3: 4], 2)
# plt.show()
#
# # Layer 3
# view_layer_mfs(number_mfs_inord[4: 5], 3)
# plt.show()
|
StarcoderdataPython
|
3220315
|
from enum import IntFlag
from typing import cast, List, Tuple, Iterable, TextIO
from itertools import takewhile
from qcodes import VisaInstrument, InstrumentChannel, ChannelList
from qcodes.utils.validators import Enum, Numbers
from qcodes.instrument.group_parameter import GroupParameter, Group
def read_curve_file(curve_file: TextIO) -> dict:
"""
Read a curve file with extension .330
The file format of this file is shown in test_lakeshore_file_parser.py
in the test module
The output is a dictionary with keys: "metadata" and "data".
The metadata dictionary contains the first n lines of the curve file which
are in the format "item: value". The data dictionary contains the actual
curve data.
"""
def split_data_line(line: str, parser: type = str) -> List[str]:
return [parser(i) for i in line.split(" ") if i != ""]
def strip(strings: Iterable[str]) -> Tuple:
return tuple(s.strip() for s in strings)
lines = iter(curve_file.readlines())
# Meta data lines contain a colon
metadata_lines = takewhile(lambda s: ":" in s, lines)
# Data from the file is collected in the following dict
file_data = dict()
# Capture meta data
parsed_lines = [strip(line.split(":")) for line in metadata_lines]
file_data["metadata"] = {key: value for key, value in parsed_lines}
# After meta data we have a data header
header_items = strip(split_data_line(next(lines)))
# After that we have the curve data
data = [
split_data_line(line, parser=float)
for line in lines if line.strip() != ""
]
file_data["data"] = dict(
zip(header_items, zip(*data))
)
return file_data
def get_sanitize_data(file_data: dict) -> dict:
"""
Data as found in the curve files are slightly different then
the dictionary as expected by the 'upload_curve' method of the
driver
"""
data_dict = dict(file_data["data"])
# We do not need the index column
del data_dict["No."]
# Rename the 'Units' column to the appropriate name
# Look up under the 'Data Format' entry to find what units we have
data_format = file_data['metadata']['Data Format']
# This is a string in the form '4 (Log Ohms/Kelvin)'
data_format_int = int(data_format.split()[0])
correct_name = Model_325_Curve.valid_sensor_units[data_format_int - 1]
# Rename the column
data_dict[correct_name] = data_dict["Units"]
del data_dict["Units"]
return data_dict
class Status(IntFlag):
sensor_units_overrang = 128
sensor_units_zero = 64
temp_overrange = 32
temp_underrange = 16
invalid_reading = 1
class Model_325_Curve(InstrumentChannel):
valid_sensor_units = ["mV", "V", "Ohm", "log Ohm"]
temperature_key = "Temperature (K)"
def __init__(self, parent: 'Model_325', index: int) -> None:
self._index = index
name = f"curve_{index}"
super().__init__(parent, name)
self.add_parameter(
"serial_number",
parameter_class=GroupParameter
)
self.add_parameter(
"format",
val_mapping={
f"{unt}/K": i+1 for i, unt in enumerate(self.valid_sensor_units)
},
parameter_class=GroupParameter
)
self.add_parameter(
"limit_value",
parameter_class=GroupParameter
)
self.add_parameter(
"coefficient",
val_mapping={
"negative": 1,
"positive": 2
},
parameter_class=GroupParameter
)
self.add_parameter(
"curve_name",
parameter_class=GroupParameter
)
Group(
[
self.curve_name, self.serial_number, self.format,
self.limit_value, self.coefficient
],
set_cmd=f"CRVHDR {self._index}, {{curve_name}}, "
f"{{serial_number}}, {{format}}, {{limit_value}}, "
f"{{coefficient}}",
get_cmd=f"CRVHDR? {self._index}"
)
def get_data(self) -> dict:
curve = [
float(a) for point_index in range(1, 200)
for a in self.ask(f"CRVPT? {self._index}, {point_index}").split(",")
]
d = {self.temperature_key: curve[1::2]}
sensor_unit = self.format().split("/")[0]
d[sensor_unit] = curve[::2]
return d
@classmethod
def validate_datadict(cls, data_dict: dict) -> str:
"""
A data dict has two keys, one of which is 'Temperature (K)'. The other
contains the units in which the curve is defined and must be one of:
'mV', 'V', 'Ohm' or 'log Ohm'
This method validates this and returns the sensor unit encountered in
the data dict
"""
if cls.temperature_key not in data_dict:
raise ValueError(f"At least {cls.temperature_key} needed in the "
f"data dictionary")
sensor_units = [i for i in data_dict.keys() if i != cls.temperature_key]
if len(sensor_units) != 1:
raise ValueError(
"Data dictionary should have one other key, other then "
"'Temperature (K)'"
)
sensor_unit = sensor_units[0]
if sensor_unit not in cls.valid_sensor_units:
raise ValueError(
f"Sensor unit {sensor_unit} invalid. This needs to be one of "
f"{', '.join(cls.valid_sensor_units)}"
)
data_size = len(data_dict[cls.temperature_key])
if data_size != len(data_dict[sensor_unit]) or data_size > 200:
raise ValueError("The length of the temperature axis should be "
"the same as the length of the sensor axis and "
"should not exceed 200 in size")
return sensor_unit
def set_data(self, data_dict: dict, sensor_unit: str = None) -> None:
"""
Set the curve data according to the values found the the dictionary.
Args:
data_dict (dict): See `validate_datadict` to see the format of this
dictionary
sensor_unit (str): If None, the data dict is validated and the
units are extracted.
"""
if sensor_unit is None:
sensor_unit = self.validate_datadict(data_dict)
temperature_values = data_dict[self.temperature_key]
sensor_values = data_dict[sensor_unit]
for value_index, (temperature_value, sensor_value) in \
enumerate(zip(temperature_values, sensor_values)):
cmd_str = f"CRVPT {self._index}, {value_index + 1}, " \
f"{sensor_value:3.3f}, {temperature_value:3.3f}"
self.write(cmd_str)
class Model_325_Sensor(InstrumentChannel):
"""
A single sensor of a Lakeshore 325.
Args:
parent (Model_325): The instrument this heater belongs to
name (str)
inp (str): Either "A" or "B"
"""
def __init__(self, parent: 'Model_325', name: str, inp: str) -> None:
if inp not in ["A", "B"]:
raise ValueError("Please either specify input 'A' or 'B'")
super().__init__(parent, name)
self._input = inp
self.add_parameter(
'temperature',
get_cmd='KRDG? {}'.format(self._input),
get_parser=float,
label='Temperature',
unit='K'
)
self.add_parameter(
'status',
get_cmd='RDGST? {}'.format(self._input),
get_parser=lambda status: self.decode_sensor_status(int(status)),
label='Sensor_Status'
)
self.add_parameter(
"type",
val_mapping={
"Silicon diode": 0,
"GaAlAs diode": 1,
"100 Ohm platinum/250": 2,
"100 Ohm platinum/500": 3,
"1000 Ohm platinum": 4,
"NTC RTD": 5,
"Thermocouple 25mV": 6,
"Thermocouple 50 mV": 7,
"2.5 V, 1 mA": 8,
"7.5 V, 1 mA": 9
},
parameter_class=GroupParameter
)
self.add_parameter(
"compensation",
vals=Enum(0, 1),
parameter_class=GroupParameter
)
Group(
[self.type, self.compensation],
set_cmd=f"INTYPE {self._input}, {{type}}, {{compensation}}",
get_cmd=f"INTYPE? {self._input}"
)
self.add_parameter(
"curve_index",
set_cmd=f"INCRV {self._input}, {{}}",
get_cmd=f"INCRV? {self._input}",
get_parser=int,
vals=Numbers(min_value=1, max_value=35)
)
@staticmethod
def decode_sensor_status(sum_of_codes: int) -> str:
total_status = Status(sum_of_codes)
if sum_of_codes == 0:
return 'OK'
status_messages = [st.name.replace('_', ' ') for st in Status
if st in total_status]
return ", ".join(status_messages)
@property
def curve(self) -> Model_325_Curve:
parent = cast(Model_325, self.parent)
return Model_325_Curve(parent, self.curve_index())
class Model_325_Heater(InstrumentChannel):
"""
Heater control for the Lakeshore 325.
Args:
parent (Model_325): The instrument this heater belongs to
name (str)
loop (int): Either 1 or 2
"""
def __init__(self, parent: 'Model_325', name: str, loop: int) -> None:
if loop not in [1, 2]:
raise ValueError("Please either specify loop 1 or 2")
super().__init__(parent, name)
self._loop = loop
self.add_parameter(
"control_mode",
get_cmd=f"CMODE? {self._loop}",
set_cmd=f"CMODE {self._loop},{{}}",
val_mapping={
"Manual PID": "1",
"Zone": "2",
"Open Loop": "3",
"AutoTune PID": "4",
"AutoTune PI": "5",
"AutoTune P": "6"
}
)
self.add_parameter(
"input_channel",
vals=Enum("A", "B"),
parameter_class=GroupParameter
)
self.add_parameter(
"unit",
val_mapping={
"Kelvin": "1",
"Celsius": "2",
"Sensor Units": "3"
},
parameter_class=GroupParameter
)
self.add_parameter(
'powerup_enable',
val_mapping={True: 1, False: 0},
parameter_class=GroupParameter
)
self.add_parameter(
"output_metric",
val_mapping={
"current": "1",
"power": "2",
},
parameter_class=GroupParameter
)
Group(
[self.input_channel, self.unit, self.powerup_enable,
self.output_metric],
set_cmd=f"CSET {self._loop}, {{input_channel}}, {{unit}}, "
f"{{powerup_enable}}, {{output_metric}}",
get_cmd=f"CSET? {self._loop}"
)
self.add_parameter(
'P',
vals=Numbers(0, 1000),
get_parser=float,
parameter_class=GroupParameter
)
self.add_parameter(
'I',
vals=Numbers(0, 1000),
get_parser=float,
parameter_class=GroupParameter
)
self.add_parameter(
'D',
vals=Numbers(0, 1000),
get_parser=float,
parameter_class=GroupParameter
)
Group(
[self.P, self.I, self.D],
set_cmd=f'PID {self._loop}, {{P}}, {{I}}, {{D}}',
get_cmd=f'PID? {self._loop}'
)
if self._loop == 1:
valid_output_ranges = Enum(0, 1, 2)
else:
valid_output_ranges = Enum(0, 1)
self.add_parameter(
'output_range',
vals=valid_output_ranges,
set_cmd=f'RANGE {self._loop}, {{}}',
get_cmd=f'RANGE? {self._loop}',
val_mapping={
"Off": '0',
"Low (2.5W)": '1',
"High (25W)": '2'
}
)
self.add_parameter(
'setpoint',
vals=Numbers(0, 400),
get_parser=float,
set_cmd=f'SETP {self._loop}, {{}}',
get_cmd=f'SETP? {self._loop}'
)
self.add_parameter(
"ramp_state",
vals=Enum(0, 1),
parameter_class=GroupParameter
)
self.add_parameter(
"ramp_rate",
vals=Numbers(0, 100 / 60 * 1E3),
unit="mK/s",
parameter_class=GroupParameter,
get_parser=lambda v: float(v) / 60 * 1E3, # We get values in K/min,
set_parser=lambda v: v * 60 * 1E-3 # Convert to K/min
)
Group(
[self.ramp_state, self.ramp_rate],
set_cmd=f"RAMP {self._loop}, {{ramp_state}}, {{ramp_rate}}",
get_cmd=f"RAMP? {self._loop}"
)
self.add_parameter(
"is_ramping",
get_cmd=f"RAMPST? {self._loop}"
)
self.add_parameter(
"resistance",
get_cmd=f"HTRRES? {self._loop}",
set_cmd=f"HTRRES {self._loop}, {{}}",
val_mapping={
25: 1,
50: 2,
},
label='Resistance',
unit="Ohm"
)
self.add_parameter(
"heater_output",
get_cmd=f"HTR? {self._loop}",
get_parser=float,
label='Heater Output',
unit="%"
)
class Model_325(VisaInstrument):
"""
Lakeshore Model 325 Temperature Controller Driver
"""
def __init__(self, name: str, address: str, **kwargs) -> None:
super().__init__(name, address, terminator="\r\n", **kwargs)
sensors = ChannelList(
self, "sensor", Model_325_Sensor, snapshotable=False)
for inp in ['A', 'B']:
sensor = Model_325_Sensor(self, 'sensor_{}'.format(inp), inp)
sensors.append(sensor)
self.add_submodule('sensor_{}'.format(inp), sensor)
sensors.lock()
self.add_submodule("sensor", sensors)
heaters = ChannelList(
self, "heater", Model_325_Heater, snapshotable=False)
for loop in [1, 2]:
heater = Model_325_Heater(self, 'heater_{}'.format(loop), loop)
heaters.append(heater)
self.add_submodule('heater_{}'.format(loop), heater)
heaters.lock()
self.add_submodule("heater", heaters)
curves = ChannelList(
self, "curve", Model_325_Curve, snapshotable=False
)
for curve_index in range(1, 35):
curve = Model_325_Curve(self, curve_index)
curves.append(curve)
self.add_submodule("curve", curves)
self.connect_message()
def upload_curve(
self, index: int, name: str, serial_number: str, data_dict: dict
) -> None:
"""
Upload a curve to the given index
Args:
index (int): The index to upload the curve to. We can only use
indices reserved for user defined curves, 21-35
name (str)
serial_number (str)
data_dict (dict): A dictionary containing the curve data
"""
if index not in range(21, 36):
raise ValueError("index value should be between 21 and 35")
sensor_unit = Model_325_Curve.validate_datadict(data_dict)
curve = self.curve[index - 1]
curve.curve_name(name)
curve.serial_number(serial_number)
curve.format(f"{sensor_unit}/K")
curve.set_data(data_dict, sensor_unit=sensor_unit)
def upload_curve_from_file(self, index: int, file_path: str) -> None:
"""
Upload a curve from a curve file. Note that we only support
curve files with extension .330
"""
if not file_path.endswith(".330"):
raise ValueError("Only curve files with extension .330 are supported")
with open(file_path, "r") as curve_file:
file_data = read_curve_file(curve_file)
data_dict = get_sanitize_data(file_data)
name = file_data["metadata"]["Sensor Model"]
serial_number = file_data["metadata"]["Serial Number"]
self.upload_curve(index, name, serial_number, data_dict)
|
StarcoderdataPython
|
9641261
|
<filename>EmergencyServices/migrations/0004_unsafeareas.py
# Generated by Django 3.1.1 on 2020-12-07 19:10
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('EmergencyServices', '0003_auto_20201121_1631'),
]
operations = [
migrations.CreateModel(
name='UnsafeAreas',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('city', models.CharField(blank=True, max_length=250, null=True)),
('state', models.CharField(blank=True, max_length=250, null=True)),
('flag', models.IntegerField(blank=True, default=0, null=True)),
('time', models.DateTimeField(auto_now=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.