content
stringlengths 5
1.05M
|
---|
# This file is part of the GBI project.
# Copyright (C) 2013 Omniscale GmbH & Co. KG <http://omniscale.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from werkzeug.exceptions import NotFound
from flask import (
render_template, Blueprint, flash, redirect, url_for,
request, current_app, session,
)
from flask.ext.babel import gettext as _
from flask.ext.login import login_user, logout_user, login_required, current_user
from gbi_server.forms.user import (
LoginForm, NewUserForm, RemoveUserForm, RecoverSetForm,
EditAddressForm, EditPasswordForm, RecoverRequestForm
)
from gbi_server.extensions import db
from gbi_server.model import User, WMTS, EmailVerification
from gbi_server.lib.helper import send_mail
from gbi_server.lib.couchdb import init_user_boxes
user = Blueprint("user", __name__, template_folder="../templates")
@user.route("/")
def home():
if current_user.is_anonymous:
layers = WMTS.query.filter_by(is_public=True).all()
else:
layers = WMTS.query.all()
return render_template('index.html', user=current_user, layers=layers,)
@user.route("/user", methods=["GET"])
@login_required
def index():
return render_template("user/index.html", user=current_user)
@user.route("/login", methods=["GET", "POST"])
def login():
form = LoginForm()
if form.validate_on_submit():
user = User.by_email(form.data['email'])
if not user or not user.check_password(form.data['password']):
flash(_("user or passwort is not correct"), 'error')
pass # fall through
elif user and not user.verified:
return redirect(url_for('.verify_wait', id=user.id))
elif user and not user.active:
flash(_("account not activated"), 'error')
else:
login_user(user)
session['authproxy_token'] = user.authproxy_token
user.update_last_login()
db.session.commit()
flash(_("Logged in successfully."), 'success')
return redirect(request.args.get("next") or url_for(".home"))
# else: update form with errors
return render_template("user/login.html", form=form)
@user.route("/logout")
@login_required
def logout():
logout_user()
return redirect(url_for(".home"))
@user.route("/user/new", methods=["GET", "POST"])
def new():
form = NewUserForm()
form.federal_state.choices = []
form.federal_state.choices.append(('', _('Please select')))
for state in current_app.config['FEDERAL_STATES']:
form.federal_state.choices.append(state)
form.title.choices = current_app.config['SALUTATIONS']
form.type.choices = []
# add choice wich account types are possible
if current_app.config['FEATURE_CUSTOMER_USERS']:
form.type.choices.append((User.Type.CUSTOMER, _('customer')))
if current_app.config['FEATURE_CONSULTANT_USERS']:
form.type.choices.append((User.Type.CONSULTANT, _('consultant')))
form.type.choices.append((User.Type.SERVICE_PROVIDER, _('service_provider')))
if form.validate_on_submit():
user = User(form.data['email'], form.data['password'])
user.set_user_data(form.data)
user.type = form.data.get('type')
# no use type will be active automatically all must be activated by an admin
user.active = False
# send verifycation mail to check user email
verify = EmailVerification.verify(user)
db.session.add(user)
db.session.add(verify)
db.session.commit()
send_mail(
_("Email verification mail subject"),
render_template(
"user/verify_mail.txt",
user=user,
verify=verify,
_external=True
),
[user.email]
)
couch_url = current_app.config.get('COUCH_DB_URL')
if user.is_service_provider or user.is_customer:
# create couch document and area boxes
# and initialize security
init_user_boxes(user, couch_url)
return redirect(url_for(".verify_wait", id=user.id))
return render_template(
"user/new.html",
form=form,
customer_id=User.Type.CUSTOMER,
service_provider_id=User.Type.SERVICE_PROVIDER,
)
@user.route("/user/remove", methods=["GET", "POST"])
@login_required
def remove():
form = RemoveUserForm()
if form.validate_on_submit():
user = current_user
db.session.delete(user)
logout_user()
db.session.commit()
flash(_("Account removed"), 'success')
return redirect(url_for(".home"))
return render_template("user/remove.html", form=form)
@user.route("/user/<id>/send_verify_mail")
def send_verifymail(id):
user = User.by_id(id)
if not user or user.verified:
raise NotFound()
verify = EmailVerification.verify(user)
db.session.add(verify)
db.session.commit()
send_mail(
_("Email verification mail subject"),
render_template("user/verify_mail.txt", user=user, verify=verify, _external=True),
[user.email]
)
flash(_('email verification was sent successfully'), 'success')
return redirect(url_for(".login"))
@user.route("/user/<id>/verify_wait")
def verify_wait(id):
user = User.by_id(id)
if not user or user.verified:
raise NotFound()
return render_template("user/verify_wait.html", user_id=id)
@user.route("/user/<uuid>/verify")
def verify(uuid):
verify = EmailVerification.by_hash(uuid)
if not verify or not verify.is_verify:
return render_template(
"errors/404.html",
error_msg=_('infotext verify not possible')
)
user = verify.user
user.verified = True
db.session.delete(verify)
db.session.commit()
send_mail(
_("Activate user subject"),
render_template("admin/user_activate_mail.txt", user=user, _external=True),
[member.email for member in User.all_admins()]
)
flash(_("Email verified"), 'success')
return redirect(url_for(".login"))
@user.route("/user/recover", methods=["GET", "POST"])
def recover():
form = RecoverRequestForm()
if form.validate_on_submit():
user = User.by_email(form.data['email'])
recover = EmailVerification.recover(user)
db.session.add(recover)
db.session.commit()
send_mail(
_("Password recover mail subject"),
render_template("user/recover_mail.txt", user=user, recover=recover),
[user.email]
)
return redirect(url_for(".recover_sent"))
return render_template("user/recover.html", form=form)
@user.route("/user/recover_sent")
def recover_sent():
return render_template("user/recover_sent.html")
@user.route("/user/<uuid>/recover", methods=["GET", "POST"], endpoint='recover_password')
@user.route("/user/<uuid>/new", methods=["GET", "POST"], endpoint='new_password')
def recover_new_password(uuid):
verify = EmailVerification.by_hash(uuid)
if not verify or not (verify.is_import or verify.is_recover):
return render_template(
"errors/404.html",
error_msg=_('infotext recover not possible')
)
user = verify.user
form = RecoverSetForm()
if form.validate_on_submit():
user.update_password(form.data['password'])
db.session.delete(verify)
db.session.commit()
login_user(user)
return redirect(url_for(".home"))
return render_template("user/password_set.html", user=user, form=form, verify=verify)
@user.route("/user/edit_address", methods=["GET", "POST"])
@login_required
def edit_address():
user = current_user
form = EditAddressForm(request.form, user)
form.federal_state.choices = current_app.config['FEDERAL_STATES']
form.title.choices = current_app.config['SALUTATIONS']
if form.validate_on_submit():
# save user data to database
user.set_user_data(form.data)
db.session.commit()
flash(_('Address changed'), 'success')
return redirect(url_for(".edit_address"))
return render_template("user/edit_address.html", form=form)
@user.route("/user/edit_password", methods=["GET", "POST"])
@login_required
def edit_password():
user = current_user
form = EditPasswordForm(request.form)
if form.validate_on_submit():
if user.check_password(form.data['old_password']):
user.update_password(form.data['password'])
db.session.commit()
flash(_('Password changed'), 'success')
return redirect(url_for(".edit_password"))
else:
flash(_("Old password is not correct"), 'error')
return render_template("user/edit_password.html", user=user, form=form)
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from django.test import TestCase, TransactionTestCase
from django.test import Client
from django.contrib.auth.models import User
from trading.controller import axfd_utils, useraccountinfomanager
from trading.models import *
from tradeex.controllers.crypto_utils import CryptoUtility
#from trading.forms import *
from trading.tests.setuptest import *
from unittest.mock import Mock, MagicMock, patch
import datetime as dt
import sys, traceback, time, json, math
test_data1 = json.load(open('trading/tests/data/trx_test_data1.json'))
test_data2 = json.load(open('trading/tests/data/trx_test_data2.json'))
test_data_cny_pass1 = json.load(open('trading/tests/data/trx_test_cny_wallet_1.json'))
class AccountCronJobTestCase(TransactionTestCase):
fixtures = ['fixture_for_account_cronjob.json']
def setUp(self):
try:
User.objects.get(username='[email protected]')
except User.DoesNotExist:
setup_test()
@patch.object(CryptoUtility, 'listtransactions_impl')
@patch.object(axfd_utils.AXFundUtility, 'listtransactions')
def test_update_account_from_trx(self, mock_listtransactions,mock_listtransactions_impl):
mock_listtransactions.return_value = test_data1
mock_listtransactions_impl.return_value = test_data_cny_pass1
cnywallet = Wallet.objects.get(cryptocurrency__currency_code='CNY')
axfwallet = Wallet.objects.get(cryptocurrency__currency_code='AXFund')
operator = User.objects.get(username='admin')
# set user's wallet to test address
updated = UserWallet.objects.filter(user__username='taozhang',
wallet__cryptocurrency__currency_code='AXFund').update(
wallet_addr='AGqfmz49fVFpdoKRfaw2zN7CikWAhUsYdE',
lastupdated_at = dt.datetime.utcnow())
if not updated:
self.fail('Did not find userwallet for taozhang')
taozhang = User.objects.get(username='taozhang')
print ('taozhang\'s userid is {0}'.format(taozhang.id))
updated = UserWallet.objects.filter(user__username='yingzhou',
wallet__cryptocurrency__currency_code='AXFund').update(
wallet_addr='AboGeCuvs8U8nGGuoe9awZzhUDHTkuiG4Y',
lastupdated_at = dt.datetime.utcnow())
if not updated:
self.fail('Did not find userwallet for yingzhou')
UserWallet.objects.create(
user = taozhang,
wallet = cnywallet,
wallet_addr = 'PBfMvKuNtJH5yodb13n5FfE7UggNCLh7YP',
created_by = operator,
lastupdated_by = operator
).save()
yingzhou = User.objects.get(username='yingzhou')
print ('yingzhou\'s userid is {0}'.format(yingzhou.id))
UserWallet.objects.create(
user = yingzhou,
wallet = cnywallet,
wallet_addr = 'PXZCvnATCuvNcJheKsg9LGe5Asf9a5xeEd',
created_by = operator,
lastupdated_by = operator
).save()
#with patch('controller.axfd_utils.axfd_listtransactions') as mock:
# instance = mock.return_value
# instance.method.return_value = test_data
c = Client()
response = c.get('/trading/account/cron/update_receive/')
self.assertEqual(200, response.status_code)
user1_wallet = UserWallet.objects.get(user__username='taozhang',
wallet__cryptocurrency__currency_code = 'AXFund')
print ('about to test user_Wallet {0} user {1}'.format(
user1_wallet.id, user1_wallet.user.id
))
user2_wallet = UserWallet.objects.get(user__username='yingzhou',
wallet__cryptocurrency__currency_code = 'AXFund')
self.assertEqual(100+1-1.2-0.0001, user1_wallet.balance)
self.assertEqual(1.0 + 2.0 + 1.0 + 0.0001 * 3, user1_wallet.locked_balance)
self.assertEqual(100+1-1.2-0.0001 - (1.0 + 2.0 + 1.0 + 0.0001 * 3), user1_wallet.available_balance)
self.assertEqual(round(1.2 - 1.0 - 0.0001, 8), user2_wallet.balance)
self.assertEqual(round(1.0 + 1.0 + 0.0001 * 2, 8), user2_wallet.locked_balance)
self.assertEqual(round(1.0 + 1.0 + 0.0001 * 2, 8), user2_wallet.locked_balance)
try:
trans_receive_1 = UserWalletTransaction.objects.get(
reference_wallet_trxId='e8392e991eaa06fc4e37a32c713d69f56b4f14ff823c1adee7b43dc1f98e3b63'
)
self.assertEqual(2, trans_receive_1.user_wallet.user.id)
self.assertEqual('PROCESSED', trans_receive_1.status)
self.assertTrue(math.fabs(trans_receive_1.units - 100.0) < 0.00000001)
self.assertTrue(math.fabs(trans_receive_1.balance_end - trans_receive_1.balance_begin - 100.0)<0.00000001)
self.assertEqual(trans_receive_1.locked_balance_begin, trans_receive_1.locked_balance_end)
self.assertTrue(math.fabs(trans_receive_1.available_to_trade_end - trans_receive_1.available_to_trade_begin - 100.0) < 0.0000001)
self.assertEqual('CREDIT', trans_receive_1.balance_update_type)
except UserWalletTransaction.DoesNotExist:
self.fail('Could not find userwallettransaction for txid e8392e991eaa06fc4e37a32c713d69f56b4f14ff823c1adee7b43dc1f98e3b63')
# test pending redeem transaction for user1
try:
trans1 = UserWalletTransaction.objects.get(
reference_wallet_trxId='cbe71c7c0e27227cb2684d8eefcc8a169145fafe9f1c76a7be79de04b7d0c820',
transaction_type = 'REDEEM'
)
self.assertEqual(2, trans1.user_wallet.user.id)
self.assertEqual('PENDING', trans1.status)
self.assertTrue(math.fabs(trans1.units - 1.0) < 0.00000001)
self.assertEqual(trans1.balance_begin, trans1.balance_end)
self.assertEqual(trans1.locked_balance_begin + 1.0, trans1.locked_balance_end)
self.assertEqual(trans1.available_to_trade_begin - 1.0, trans1.available_to_trade_end)
self.assertEqual('DEBT', trans1.balance_update_type)
except UserWalletTransaction.DoesNotExist:
self.fail('Could not find redeem userwallettransaction for txid cbe71c7c0e27227cb2684d8eefcc8a169145fafe9f1c76a7be79de04b7d0c820')
# test pending redeem fee transaction
try:
trans1 = UserWalletTransaction.objects.get(
reference_wallet_trxId='cbe71c7c0e27227cb2684d8eefcc8a169145fafe9f1c76a7be79de04b7d0c820',
transaction_type = 'REDEEMFEE'
)
self.assertEqual(2, trans1.user_wallet.user.id)
self.assertEqual('PENDING', trans1.status)
self.assertTrue(math.fabs(trans1.units - 0.0001) < 0.00000001)
self.assertEqual(trans1.balance_begin, trans1.balance_end)
self.assertEqual(trans1.locked_balance_begin + 0.0001, trans1.locked_balance_end)
self.assertEqual(trans1.available_to_trade_begin - 0.0001, trans1.available_to_trade_end)
self.assertEqual('DEBT', trans1.balance_update_type)
except UserWalletTransaction.DoesNotExist:
self.fail('Could not find redeem fee userwallettransaction for txid cbe71c7c0e27227cb2684d8eefcc8a169145fafe9f1c76a7be79de04b7d0c820')
# test pending redeem transaction for user2
try:
trans1 = UserWalletTransaction.objects.get(
reference_wallet_trxId='6027fed2199003b34ceb910bd7e1f42914e0c1ea2153d9766e77cfa31cb9255e',
transaction_type = 'REDEEM'
)
self.assertEqual(3, trans1.user_wallet.user.id)
self.assertEqual('PROCESSED', trans1.status)
self.assertTrue(math.fabs(trans1.units - 1.0) < 0.00000001)
self.assertEqual(trans1.balance_begin - 1.0, trans1.balance_end)
self.assertEqual(trans1.locked_balance_begin, trans1.locked_balance_end)
self.assertEqual(trans1.available_to_trade_begin - 1.0, trans1.available_to_trade_end)
self.assertEqual('DEBT', trans1.balance_update_type)
except UserWalletTransaction.DoesNotExist:
self.fail('Could not find redeem userwallettransaction for txid 6027fed2199003b34ceb910bd7e1f42914e0c1ea2153d9766e77cfa31cb9255e')
# test pending redeem fee transaction
try:
trans1 = UserWalletTransaction.objects.get(
reference_wallet_trxId='6027fed2199003b34ceb910bd7e1f42914e0c1ea2153d9766e77cfa31cb9255e',
transaction_type = 'REDEEMFEE'
)
self.assertEqual(3, trans1.user_wallet.user.id)
self.assertEqual('PROCESSED', trans1.status)
self.assertTrue(math.fabs(trans1.units - 0.0001) < 0.00000001)
self.assertEqual(trans1.balance_begin - 0.0001, trans1.balance_end)
self.assertEqual(trans1.locked_balance_begin, trans1.locked_balance_end)
self.assertEqual(trans1.available_to_trade_begin - 0.0001, trans1.available_to_trade_end)
self.assertEqual('DEBT', trans1.balance_update_type)
except UserWalletTransaction.DoesNotExist:
self.fail('Could not find redeem fee userwallettransaction for txid 6027fed2199003b34ceb910bd7e1f42914e0c1ea2153d9766e77cfa31cb9255e')
#TODO: should test there are 3 pending redeem 0 pending receive trans for user1
self.assertEqual(3, len(UserWalletTransaction.objects.filter(user_wallet__user__id = 2, user_wallet__wallet__id = axfwallet.id, transaction_type = 'REDEEM', status='PENDING')))
self.assertEqual(0, len(UserWalletTransaction.objects.filter(user_wallet__user__id = 2, user_wallet__wallet__id = axfwallet.id, transaction_type = 'DEPOSITE', status='PENDING')))
#TODO: should test there are 2 pending redeem 0 pending receive trans for user2
self.assertEqual(2, len(UserWalletTransaction.objects.filter(user_wallet__user__id = 3, user_wallet__wallet__id = axfwallet.id, transaction_type = 'REDEEM', status='PENDING')))
self.assertEqual(0, len(UserWalletTransaction.objects.filter(user_wallet__user__id = 2, user_wallet__wallet__id = axfwallet.id, transaction_type = 'DEPOSITE', status='PENDING')))
# validate the cny transaction
self.validate_cny_first_run()
mock_listtransactions.return_value = test_data2
c = Client()
response = c.get('/trading/account/cron/update_receive/')
self.assertEqual(200, response.status_code)
user1_wallet = UserWallet.objects.get(user__username='taozhang',
wallet__cryptocurrency__currency_code = 'AXFund')
user2_wallet = UserWallet.objects.get(user__username='yingzhou',
wallet__cryptocurrency__currency_code = 'AXFund')
self.assertEqual(100 + 1.0 - 5.2 - 0.0001 * 4, user1_wallet.balance)
self.assertEqual(0, user1_wallet.locked_balance)
self.assertEqual(100 + 1.0 - 5.2 - 0.0001 * 4, user1_wallet.available_balance)
self.assertEqual(1.2 - 1 -1 -1 - 0.0001 * 3, user2_wallet.balance)
self.assertEqual(0, user2_wallet.locked_balance)
self.assertEqual(1.2 - 1 -1 -1 - 0.0001 * 3, user2_wallet.available_balance)
try:
trans1 = UserWalletTransaction.objects.get(
reference_wallet_trxId='e8392e991eaa06fc4e37a32c713d69f56b4f14ff823c1adee7b43dc1f98e3b63'
)
# test it is the same as the receive trans we read when scan the first time
# so it approve that nothing happened to this committed trans
self.assertEqual(trans_receive_1.user_wallet.user.id, trans1.user_wallet.user.id)
self.assertEqual(trans_receive_1.status, trans1.status)
self.assertTrue(trans_receive_1.units, trans1.units)
self.assertTrue(trans_receive_1.balance_end, trans_receive_1.balance_begin)
self.assertEqual(trans_receive_1.locked_balance_begin, trans1.locked_balance_end)
self.assertTrue(trans_receive_1.available_to_trade_end, trans1.available_to_trade_begin)
self.assertEqual(trans_receive_1.balance_update_type, trans1.balance_update_type)
self.assertEqual(trans_receive_1.lastupdated_at, trans1.lastupdated_at)
self.assertEqual(trans_receive_1.lastupdated_by, trans1.lastupdated_by)
except UserWalletTransaction.DoesNotExist:
self.fail('Could not find userwallettransaction for txid e8392e991eaa06fc4e37a32c713d69f56b4f14ff823c1adee7b43dc1f98e3b63')
#Test there is 0 pending transaction
self.assertEqual(0, len(UserWalletTransaction.objects.filter(user_wallet__user__id = 2, user_wallet__wallet__id = axfwallet.id, status='PENDING')))
self.assertEqual(0, len(UserWalletTransaction.objects.filter(user_wallet__user__id = 3, user_wallet__wallet__id = axfwallet.id, status='PENDING')))
# save the trans, prepare to compare it with the trans after another run
lookup = {}
all_trans = UserWalletTransaction.objects.all()
for trans in all_trans:
lookup[trans.id] = trans
user_wallet_1 = UserWallet.objects.get(user__id=2, wallet__cryptocurrency__currency_code = 'AXFund')
user_wallet_2 = UserWallet.objects.get(user__id=3, wallet__cryptocurrency__currency_code = 'AXFund')
# rerun should not make any problem
mock_listtransactions.return_value = test_data2
c = Client()
response = c.get('/trading/account/cron/update_receive/')
self.assertEqual(200, response.status_code)
user1_wallet = UserWallet.objects.get(user__username='taozhang',
wallet__cryptocurrency__currency_code = 'AXFund')
user2_wallet = UserWallet.objects.get(user__username='yingzhou',
wallet__cryptocurrency__currency_code = 'AXFund')
self.assertEqual(user_wallet_1.balance, user1_wallet.balance)
self.assertEqual(user_wallet_1.locked_balance, user1_wallet.locked_balance)
self.assertEqual(user_wallet_1.available_balance, user1_wallet.available_balance)
self.assertEqual(user_wallet_1.lastupdated_by, user1_wallet.lastupdated_by)
self.assertEqual(user_wallet_1.lastupdated_at, user1_wallet.lastupdated_at)
self.assertEqual(user_wallet_2.balance, user2_wallet.balance)
self.assertEqual(user_wallet_2.locked_balance, user2_wallet.locked_balance)
self.assertEqual(user_wallet_2.available_balance, user2_wallet.available_balance)
self.assertEqual(user_wallet_2.lastupdated_by, user2_wallet.lastupdated_by)
self.assertEqual(user_wallet_2.lastupdated_at, user2_wallet.lastupdated_at)
wallet_trans = UserWalletTransaction.objects.all()
for trans in wallet_trans:
old_trans = lookup[trans.id]
self.assertEqual(old_trans.user_wallet.user.id, trans.user_wallet.user.id)
self.assertEqual(old_trans.lastupdated_at, trans.lastupdated_at)
self.assertEqual(old_trans.lastupdated_by, trans.lastupdated_by)
self.assertEqual(old_trans.units, trans.units)
def validate_cny_first_run(self):
cnywallet = Wallet.objects.get(cryptocurrency__currency_code = 'CNY')
user1_wallet = UserWallet.objects.get(user__username='taozhang',
wallet__cryptocurrency__currency_code = 'CNY')
user2_wallet = UserWallet.objects.get(user__username='yingzhou',
wallet__cryptocurrency__currency_code = 'CNY')
self.assertEqual(100.0, user1_wallet.balance)
self.assertEqual(0.02 + 0.01, user1_wallet.locked_balance)
self.assertEqual(100.0 - 0.02 - 0.01, user1_wallet.available_balance)
user1_wallet_trans = UserWalletTransaction.objects.filter(
user_wallet__id = user1_wallet.id).order_by('-lastupdated_at')
self.assertEqual(3, len(user1_wallet_trans))
debt_count = 0
for tran in user1_wallet_trans:
if tran.balance_update_type == 'CREDIT':
self.assertEqual('PROCESSED', tran.status)
self.assertEqual('DEPOSIT', tran.transaction_type)
self.assertEqual(0, tran.balance_begin)
self.assertEqual(100.0, tran.balance_end)
self.assertEqual(0, tran.locked_balance_begin)
self.assertEqual(0, tran.locked_balance_end)
self.assertEqual(0, tran.available_to_trade_begin)
self.assertEqual(100.0, tran.available_to_trade_end)
elif tran.balance_update_type == 'DEBT':
debt_count = debt_count + 1
self.assertEqual('PENDING', tran.status)
if tran.transaction_type == 'REDEEM':
self.assertEqual(100.0, tran.balance_begin)
self.assertEqual(100.0, tran.balance_end)
self.assertEqual(0, tran.locked_balance_begin)
self.assertEqual(0.02, tran.locked_balance_end)
self.assertEqual(100.0, tran.available_to_trade_begin)
self.assertEqual(99.98, tran.available_to_trade_end)
elif tran.transaction_type == 'REDEEMFEE':
self.assertEqual(100.0, tran.balance_begin)
self.assertEqual(100.0, tran.balance_end)
self.assertEqual(0.02, tran.locked_balance_begin)
self.assertEqual(0.03, tran.locked_balance_end)
self.assertEqual(99.98, tran.available_to_trade_begin)
self.assertEqual(99.97, tran.available_to_trade_end)
else:
self.fail('Unexpected transaction type {0}'.format(tran.transaction_type))
self.assertEqual(2, debt_count)
# varify the user3's transaction, which is just has one confirmed
# redeem, and one confirmed deposite, there's no trace of the
# first deposite
self.assertEqual(-1.0 - 0.01 + 2.0, user2_wallet.balance)
self.assertEqual(0, user2_wallet.locked_balance)
self.assertEqual(-1.0 - 0.01 + 2.0, user2_wallet.available_balance)
user2_wallet_trans = UserWalletTransaction.objects.filter(
user_wallet__id = user2_wallet.id).order_by('-lastupdated_at')
self.assertEqual(4, len(user2_wallet_trans))
for tran in user2_wallet_trans:
print('Tran {0}: type:{1}:{2} balance: {3}-{4} locked: {5}-{6} available: {7}-{8} status: {9}'.format(
tran.id, tran.balance_update_type, tran.transaction_type,
tran.balance_begin, tran.balance_end,
tran.locked_balance_begin, tran.locked_balance_end,
tran.available_to_trade_begin, tran.available_to_trade_end,
tran.status
))
if tran.balance_update_type == 'CREDIT':
if tran.status == 'PROCESSED':
self.assertEqual('DEPOSIT', tran.transaction_type)
self.assertEqual(-1.01, tran.balance_begin)
self.assertEqual(0.99, tran.balance_end)
self.assertEqual(0, tran.locked_balance_begin)
self.assertEqual(0, tran.locked_balance_end)
self.assertEqual(-1.01, tran.available_to_trade_begin)
self.assertEqual(0.99, tran.available_to_trade_end)
# the pending deposite should have everything as 0
elif tran.status == 'PENDING':
self.assertEqual('DEPOSIT', tran.transaction_type)
self.assertEqual(0, tran.balance_begin)
self.assertEqual(0, tran.balance_end)
self.assertEqual(0, tran.locked_balance_begin)
self.assertEqual(0, tran.locked_balance_end)
self.assertEqual(0, tran.available_to_trade_begin)
self.assertEqual(0, tran.available_to_trade_end)
elif tran.balance_update_type == 'DEBT':
self.assertEqual('PROCESSED', tran.status)
if tran.transaction_type == 'REDEEM':
self.assertEqual(0, tran.balance_begin)
self.assertEqual(-1.0, tran.balance_end)
self.assertEqual(0, tran.locked_balance_begin)
self.assertEqual(0, tran.locked_balance_end)
self.assertEqual(0, tran.available_to_trade_begin)
self.assertEqual(-1.0, tran.available_to_trade_end)
elif tran.transaction_type == 'REDEEMFEE':
self.assertEqual(-1.0, tran.balance_begin)
self.assertEqual(-1.01, tran.balance_end)
self.assertEqual(0, tran.locked_balance_begin)
self.assertEqual(0, tran.locked_balance_end)
self.assertEqual(-1.0, tran.available_to_trade_begin)
self.assertEqual(-1.01, tran.available_to_trade_end)
else:
self.fail('Unexpected transaction type {0}'.format(tran.transaction_type))
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
import dataclasses
from logging import Logger
from typing import Any, Dict, List, Optional, Tuple, Type
import torch
from ax.core.search_space import SearchSpaceDigest
from ax.core.types import TCandidateMetadata, TConfig
from ax.models.model_utils import best_in_sample_point
from ax.models.torch.utils import (
_to_inequality_constraints,
pick_best_out_of_sample_point_acqf_class,
predict_from_model,
)
from ax.utils.common.base import Base
from ax.utils.common.constants import Keys
from ax.utils.common.logger import get_logger
from ax.utils.common.typeutils import checked_cast, checked_cast_optional, not_none
from botorch.fit import fit_gpytorch_model
from botorch.models.model import Model
from botorch.utils.containers import TrainingData
from gpytorch.kernels import Kernel
from gpytorch.likelihoods.likelihood import Likelihood
from gpytorch.mlls.exact_marginal_log_likelihood import ExactMarginalLogLikelihood
from gpytorch.mlls.marginal_log_likelihood import MarginalLogLikelihood
from torch import Tensor
NOT_YET_FIT_MSG = (
"Underlying BoTorch `Model` has not yet received its training_data. "
"Please fit the model first."
)
logger: Logger = get_logger(__name__)
class Surrogate(Base):
"""
**All classes in 'botorch_modular' directory are under
construction, incomplete, and should be treated as alpha
versions only.**
Ax wrapper for BoTorch ``Model``, subcomponent of ``BoTorchModel``
and is not meant to be used outside of it.
Args:
botorch_model_class: ``Model`` class to be used as the underlying
BoTorch model.
mll_class: ``MarginalLogLikelihood`` class to use for model-fitting.
model_options: Dictionary of options / kwargs for the BoTorch
``Model`` constructed during ``Surrogate.fit``.
kernel_class: ``Kernel`` class, not yet used. Will be used to
construct custom BoTorch ``Model`` in the future.
kernel_options: Kernel kwargs, not yet used. Will be used to
construct custom BoTorch ``Model`` in the future.
likelihood: ``Likelihood`` class, not yet used. Will be used to
construct custom BoTorch ``Model`` in the future.
"""
botorch_model_class: Type[Model]
mll_class: Type[MarginalLogLikelihood]
model_options: Dict[str, Any]
kernel_class: Optional[Type[Kernel]] = None
_training_data: Optional[TrainingData] = None
_model: Optional[Model] = None
# Special setting for surrogates instantiated via `Surrogate.from_botorch`,
# to avoid re-constructing the underlying BoTorch model on `Surrogate.fit`
# when set to `False`.
_constructed_manually: bool = False
def __init__(
self,
# TODO: make optional when BoTorch model factory is checked in.
# Construction will then be possible from likelihood, kernel, etc.
botorch_model_class: Type[Model],
mll_class: Type[MarginalLogLikelihood] = ExactMarginalLogLikelihood,
model_options: Optional[Dict[str, Any]] = None,
kernel_class: Optional[Type[Kernel]] = None, # TODO: use.
kernel_options: Optional[Dict[str, Any]] = None, # TODO: use.
likelihood: Optional[Type[Likelihood]] = None, # TODO: use.
) -> None:
self.botorch_model_class = botorch_model_class
self.mll_class = mll_class
self.model_options = model_options or {}
# Temporary validation while we develop these customizations.
if likelihood is not None:
raise NotImplementedError("Customizing likelihood not yet implemented.")
if kernel_class is not None or kernel_options:
raise NotImplementedError("Customizing kernel not yet implemented.")
@property
def model(self) -> Model:
if self._model is None:
raise ValueError(
"BoTorch `Model` has not yet been constructed, please fit the "
"surrogate first (done via `BoTorchModel.fit`)."
)
return not_none(self._model)
@property
def training_data(self) -> TrainingData:
if self._training_data is None:
raise ValueError(NOT_YET_FIT_MSG)
return not_none(self._training_data)
@property
def training_data_per_outcome(self) -> Dict[str, TrainingData]:
raise NotImplementedError( # pragma: no cover
"`training_data_per_outcome` is only used in `ListSurrogate`."
)
@property
def dtype(self) -> torch.dtype:
return self.training_data.X.dtype
@property
def device(self) -> torch.device:
return self.training_data.X.device
@classmethod
def from_botorch(
cls,
model: Model,
mll_class: Type[MarginalLogLikelihood] = ExactMarginalLogLikelihood,
) -> Surrogate:
"""Instantiate a `Surrogate` from a pre-instantiated Botorch `Model`."""
surrogate = cls(botorch_model_class=model.__class__, mll_class=mll_class)
surrogate._model = model
# Temporarily disallowing `update` for surrogates instantiated from
# pre-made BoTorch `Model` instances to avoid reconstructing models
# that were likely pre-constructed for a reason (e.g. if this setup
# doesn't fully allow to constuct them).
surrogate._constructed_manually = True
return surrogate
def clone_reset(self) -> Surrogate:
return self.__class__(**self._serialize_attributes_as_kwargs())
def construct(self, training_data: TrainingData, **kwargs: Any) -> None:
"""Constructs the underlying BoTorch ``Model`` using the training data.
Args:
training_data: Training data for the model (for one outcome for
the default `Surrogate`, with the exception of batched
multi-output case, where training data is formatted with just
one X and concatenated Ys).
**kwargs: Optional keyword arguments, expects any of:
- "fidelity_features": Indices of columns in X that represent
fidelity.
"""
if self._constructed_manually:
logger.warning("Reconstructing a manually constructed `Model`.")
if not isinstance(training_data, TrainingData):
raise ValueError( # pragma: no cover
"Base `Surrogate` expects training data for single outcome."
)
input_constructor_kwargs = {**self.model_options, **(kwargs or {})}
self._training_data = training_data
formatted_model_inputs = self.botorch_model_class.construct_inputs(
training_data=self.training_data, **input_constructor_kwargs
)
# pyre-ignore[45]: Py raises informative msg if `model_cls` abstract.
self._model = self.botorch_model_class(**formatted_model_inputs)
def fit(
self,
training_data: TrainingData,
search_space_digest: SearchSpaceDigest,
metric_names: List[str],
candidate_metadata: Optional[List[List[TCandidateMetadata]]] = None,
state_dict: Optional[Dict[str, Tensor]] = None,
refit: bool = True,
) -> None:
"""Fits the underlying BoTorch ``Model`` to ``m`` outcomes.
NOTE: ``state_dict`` and ``refit`` keyword arguments control how the
undelying BoTorch ``Model`` will be fit: whether its parameters will
be reoptimized and whether it will be warm-started from a given state.
There are three possibilities:
* ``fit(state_dict=None)``: fit model from stratch (optimize model
parameters and set its training data used for inference),
* ``fit(state_dict=some_state_dict, refit=True)``: warm-start refit
with a state dict of parameters (still re-optimize model parameters
and set the training data),
* ``fit(state_dict=some_state_dict, refit=False)``: load model parameters
without refitting, but set new training data (used in cross-validation,
for example).
Args:
training data: BoTorch ``TrainingData`` container with Xs, Ys, and
possibly Yvars, to be passed to ``Model.construct_inputs`` in
BoTorch.
search_space_digest: A SearchSpaceDigest object containing
metadata on the features in the trainig data.
metric_names: Names of each outcome Y in Ys.
candidate_metadata: Model-produced metadata for candidates, in
the order corresponding to the Xs.
state_dict: Optional state dict to load.
refit: Whether to re-optimize model parameters.
"""
if self._constructed_manually:
logger.debug(
"For manually constructed surrogates (via `Surrogate.from_botorch`), "
"`fit` skips setting the training data on model and only reoptimizes "
"its parameters if `refit=True`."
)
else:
self.construct(
training_data=training_data,
metric_names=metric_names,
**dataclasses.asdict(search_space_digest)
)
if state_dict:
# pyre-fixme[6]: Expected `OrderedDict[typing.Any, typing.Any]` for 1st
# param but got `Dict[str, Tensor]`.
self.model.load_state_dict(not_none(state_dict))
if state_dict is None or refit:
mll = self.mll_class(self.model.likelihood, self.model)
fit_gpytorch_model(mll)
def predict(self, X: Tensor) -> Tuple[Tensor, Tensor]:
"""Predicts outcomes given a model and input tensor.
Args:
model: A botorch Model.
X: A ``n x d`` tensor of input parameters.
Returns:
Tensor: The predicted posterior mean as an ``n x o``-dim tensor.
Tensor: The predicted posterior covariance as a ``n x o x o``-dim tensor.
"""
return predict_from_model(model=self.model, X=X)
def best_in_sample_point(
self,
search_space_digest: SearchSpaceDigest,
objective_weights: Optional[Tensor],
outcome_constraints: Optional[Tuple[Tensor, Tensor]] = None,
linear_constraints: Optional[Tuple[Tensor, Tensor]] = None,
fixed_features: Optional[Dict[int, float]] = None,
options: Optional[TConfig] = None,
) -> Tuple[Tensor, float]:
"""Finds the best observed point and the corresponding observed outcome
values.
"""
best_point_and_observed_value = best_in_sample_point(
Xs=[self.training_data.X],
# pyre-ignore[6]: `best_in_sample_point` currently expects a `TorchModel`
# or a `NumpyModel` as `model` kwarg, but only uses them for `predict`
# function, the signature for which is the same on this `Surrogate`.
# TODO: When we move `botorch_modular` directory to OSS, we will extend
# the annotation for `model` kwarg to accept `Surrogate` too.
model=self,
bounds=search_space_digest.bounds,
objective_weights=objective_weights,
outcome_constraints=outcome_constraints,
linear_constraints=linear_constraints,
fixed_features=fixed_features,
options=options,
)
if best_point_and_observed_value is None:
raise ValueError("Could not obtain best in-sample point.")
best_point, observed_value = best_point_and_observed_value
return checked_cast(Tensor, best_point), observed_value
def best_out_of_sample_point(
self,
search_space_digest: SearchSpaceDigest,
objective_weights: Tensor,
outcome_constraints: Optional[Tuple[Tensor, Tensor]] = None,
linear_constraints: Optional[Tuple[Tensor, Tensor]] = None,
fixed_features: Optional[Dict[int, float]] = None,
options: Optional[TConfig] = None,
) -> Tuple[Tensor, Tensor]:
"""Finds the best predicted point and the corresponding value of the
appropriate best point acquisition function.
"""
if fixed_features:
# When have fixed features, need `FixedFeatureAcquisitionFunction`
# which has peculiar instantiation (wraps another acquisition fn.),
# so need to figure out how to handle.
# TODO (ref: https://fburl.com/diff/uneqb3n9)
raise NotImplementedError("Fixed features not yet supported.")
options = options or {}
acqf_class, acqf_options = pick_best_out_of_sample_point_acqf_class(
outcome_constraints=outcome_constraints,
seed_inner=checked_cast_optional(int, options.get(Keys.SEED_INNER, None)),
qmc=checked_cast(bool, options.get(Keys.QMC, True)),
)
# Avoiding circular import between `Surrogate` and `Acquisition`.
from ax.models.torch.botorch_modular.acquisition import Acquisition
acqf = Acquisition( # TODO: For multi-fidelity, might need diff. class.
surrogate=self,
botorch_acqf_class=acqf_class,
search_space_digest=search_space_digest,
objective_weights=objective_weights,
outcome_constraints=outcome_constraints,
linear_constraints=linear_constraints,
fixed_features=fixed_features,
options=acqf_options,
)
candidates, acqf_values = acqf.optimize(
n=1,
search_space_digest=search_space_digest,
inequality_constraints=_to_inequality_constraints(
linear_constraints=linear_constraints
),
fixed_features=fixed_features,
)
return candidates[0], acqf_values[0]
def update(
self,
training_data: TrainingData,
search_space_digest: SearchSpaceDigest,
metric_names: List[str],
candidate_metadata: Optional[List[List[TCandidateMetadata]]] = None,
state_dict: Optional[Dict[str, Tensor]] = None,
refit: bool = True,
) -> None:
"""Updates the surrogate model with new data. In the base ``Surrogate``,
just calls ``fit`` after checking that this surrogate was not created
via ``Surrogate.from_botorch`` (in which case the ``Model`` comes premade,
constructed manually and then supplied to ``Surrogate``).
NOTE: Expects `training_data` to be all available data,
not just the new data since the last time the model was updated.
Args:
training_data: Surrogate training_data containing all the data the model
should use for inference.
search_space_digest: A SearchSpaceDigest object containing
metadata on the features in the training data.
metric_names: Names of each outcome Y in Ys.
candidate_metadata: Model-produced metadata for candidates, in
the order corresponding to the Xs.
state_dict: Optional state dict to load.
refit: Whether to re-optimize model parameters or just set the training
data used for interence to new training data.
"""
# NOTE: In the future, could have `incremental` kwarg, in which case
# `training_data` could contain just the new data.
if self._constructed_manually:
raise NotImplementedError(
"`update` not yet implemented for models that are "
"constructed manually, but it is possible to create a new "
"surrogate in the same way as the current manually constructed one, "
"via `Surrogate.from_botorch`."
)
self.fit(
training_data=training_data,
search_space_digest=search_space_digest,
metric_names=metric_names,
candidate_metadata=candidate_metadata,
state_dict=state_dict,
refit=refit,
)
def pareto_frontier(self) -> Tuple[Tensor, Tensor]:
"""For multi-objective optimization, retrieve Pareto frontier instead
of best point.
Returns: A two-tuple of:
- tensor of points in the feature space,
- tensor of corresponding (multiple) outcomes.
"""
raise NotImplementedError(
"Pareto frontier not yet implemented."
) # pragma: no cover
def compute_diagnostics(self) -> Dict[str, Any]:
"""Computes model diagnostics like cross-validation measure of fit, etc."""
return {} # pragma: no cover
def _serialize_attributes_as_kwargs(self) -> Dict[str, Any]:
"""Serialize attributes of this surrogate, to be passed back to it
as kwargs on reinstantiation.
"""
return {
"botorch_model_class": self.botorch_model_class,
"mll_class": self.mll_class,
"model_options": self.model_options,
}
|
class ProjectLocationSet(APIObject,IDisposable,IEnumerable):
"""
An set that contains project locations.
ProjectLocationSet()
"""
def Clear(self):
"""
Clear(self: ProjectLocationSet)
Removes every project location from the set,rendering it empty.
"""
pass
def Contains(self,item):
"""
Contains(self: ProjectLocationSet,item: ProjectLocation) -> bool
Tests for the existence of a project location within the set.
item: The project location to be searched for.
Returns: The Contains method returns True if the project location is within the set,
otherwise False.
"""
pass
def Dispose(self):
""" Dispose(self: ProjectLocationSet,A_0: bool) """
pass
def Erase(self,item):
"""
Erase(self: ProjectLocationSet,item: ProjectLocation) -> int
Removes a specified project location from the set.
item: The project location to be erased.
Returns: The number of project locations that were erased from the set.
"""
pass
def ForwardIterator(self):
"""
ForwardIterator(self: ProjectLocationSet) -> ProjectLocationSetIterator
Retrieve a forward moving iterator to the set.
Returns: Returns a forward moving iterator to the set.
"""
pass
def GetEnumerator(self):
"""
GetEnumerator(self: ProjectLocationSet) -> IEnumerator
Retrieve a forward moving iterator to the set.
Returns: Returns a forward moving iterator to the set.
"""
pass
def Insert(self,item):
"""
Insert(self: ProjectLocationSet,item: ProjectLocation) -> bool
Insert the specified project location into the set.
item: The project location to be inserted into the set.
Returns: Returns whether the project location was inserted into the set.
"""
pass
def ReleaseManagedResources(self,*args):
""" ReleaseManagedResources(self: APIObject) """
pass
def ReleaseUnmanagedResources(self,*args):
""" ReleaseUnmanagedResources(self: ProjectLocationSet) """
pass
def ReverseIterator(self):
"""
ReverseIterator(self: ProjectLocationSet) -> ProjectLocationSetIterator
Retrieve a backward moving iterator to the set.
Returns: Returns a backward moving iterator to the set.
"""
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __iter__(self,*args):
""" __iter__(self: IEnumerable) -> object """
pass
IsEmpty=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Test to see if the set is empty.
Get: IsEmpty(self: ProjectLocationSet) -> bool
"""
Size=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Returns the number of project locations that are in the set.
Get: Size(self: ProjectLocationSet) -> int
"""
|
import json
import requests
from ..settings import Settings
class Client:
settings = Settings()
def __init__(self,session=None):
if session is None:
self.session = requests.Session()
else:
self.session = session
self.session.headers.update({'apikey':self.settings.Apikey})
def get(self, path, params=None):
url = self.settings.getUrl(path)
response = self.session.get(url, params=params)
return response.json()
def post(self, path, data, params=None):
url = self.settings.getUrl(path)
headers = {
'Content-Type': 'application/json',
}
data = json.dumps(data)
response = self.session.post(url, data=data, headers=headers, params=params)
return response.json()
def postString(self, path, data, params=None):
url = self.settings.getUrl(path)
headers = {
'Content-Type': 'text/plain',
}
response = self.session.post(url, data=data, headers=headers, params=params)
try:
jdata = response.json()
except ValueError, e:
return
return jdata
def put(self, path, data, params=None):
url = self.settings.getUrl(path)
headers = {
'Content-Type': 'application/json',
}
data = json.dumps(data)
response = self.session.put(url, data=data, headers=headers, params=params)
return response.json()
def delete(self, path, params=None):
url = self.settings.getUrl(path)
response = self.session.delete(url, params=params)
return response.json() |
'''
Page Rank
Implemented page rank program based on fact that a website is more
important if it is linked to by other important websites using recursive
mathematical expression and random surfer probability distribution.
'''
# Importing Libraries
import os
import random
import re
import sys
import math
# Defining Global Constants
DAMPING = 0.85
SAMPLES = 100000
def main():
if len(sys.argv) != 2:
sys.exit("Usage: python pagerank.py corpus")
corpus = crawl(sys.argv[1])
ranks = sample_pagerank(corpus, DAMPING, SAMPLES)
print(f"PageRank Results from Sampling (n = {SAMPLES})")
for page in sorted(ranks):
print(f" {page}: {ranks[page]:.4f}")
ranks = iterate_pagerank(corpus, DAMPING)
print(f"PageRank Results from Iteration")
for page in sorted(ranks):
print(f" {page}: {ranks[page]:.4f}")
def crawl(directory):
"""
Parse a directory of HTML pages and check for links to other pages.
Return a dictionary where each key is a page, and values are
a list of all other pages in the corpus that are linked to by the page.
"""
pages = dict()
# Extract all links from HTML files
for filename in os.listdir(directory):
if not filename.endswith(".html"):
continue
with open(os.path.join(directory, filename)) as f:
contents = f.read()
links = re.findall(r"<a\s+(?:[^>]*?)href=\"([^\"]*)\"", contents)
pages[filename] = set(links) - {filename}
# Only include links to other pages in the corpus
for filename in pages:
pages[filename] = set(
link for link in pages[filename]
if link in pages
)
return pages
def transition_model(corpus, page, damping_factor):
"""
Return a probability distribution over which page to visit next,
given a current page.
With probability `damping_factor`, choose a link at random
linked to by `page`. With probability `1 - damping_factor`, choose
a link at random chosen from all pages in the corpus.
"""
distribution = dict()
if corpus[page]:
for link in corpus:
distribution[link] = (1-damping_factor) / len(corpus)
if link in corpus[page]:
distribution[link] += damping_factor / len(corpus[page])
else:
# If page has no outgoing links then choose randomly among all pages
for link in corpus:
distribution[link] = 1 / len(corpus)
return distribution
def sample_pagerank(corpus, damping_factor, n):
"""
Return PageRank values for each page by sampling `n` pages
according to transition model, starting with a page at random.
Return a dictionary where keys are page names, and values are
their estimated PageRank value (a value between 0 and 1). All
PageRank values should sum to 1.
"""
pagerank = dict()
sample = None
random.seed()
for page in corpus:
pagerank[page] = 0
for step in range(n):
if sample is None:
# First sample generated by choosing from a page at random
sample = random.choices(list(corpus.keys()), k=1)[0]
else:
# Next sample generated from the previous one based on its transition model
model = transition_model(corpus, sample, damping_factor)
population, weights = zip(*model.items())
sample = random.choices(population, weights=weights, k=1)[0]
pagerank[sample] += 1
# Normalize the results
for page in corpus:
pagerank[page] /= n
return pagerank
def iterate_pagerank(corpus, damping_factor):
"""
Return PageRank values for each page by iteratively updating
PageRank values until convergence.
Return a dictionary where keys are page names, and values are
their estimated PageRank value (a value between 0 and 1). All
PageRank values should sum to 1.
"""
pagerank = dict()
newrank = dict()
# Assign initial values for pagerank
for page in corpus:
pagerank[page] = 1 / len(corpus)
repeat = True
while repeat:
# Calculate new rank values based on all of the current rank values
for page in pagerank:
total = float(0)
for possible_page in corpus:
# We consider each possible page that links to current page
if page in corpus[possible_page]:
total += pagerank[possible_page] / len(corpus[possible_page])
# A page that has no links is interpreted as having one link for every page (including itself)
if not corpus[possible_page]:
total += pagerank[possible_page] / len(corpus)
newrank[page] = (1 - damping_factor) / len(corpus) + damping_factor * total
repeat = False
# If any of the values changes by more than the threshold, repeat process
for page in pagerank:
if not math.isclose(newrank[page], pagerank[page], abs_tol=0.001):
repeat = True
# Assign new values to current values
pagerank[page] = newrank[page]
return pagerank
if __name__ == "__main__":
main() |
# Author: Kimia Nadjahi
# Some parts of this code are taken from https://github.com/skolouri/swgmm
import numpy as np
import ot
# import HilbertCode_caller
# import swapsweep
def mOT(x, y, k, m):
n = x.shape[0]
if k < int(n / m):
inds1 = np.split(np.random.permutation(n)[: int(n / m) * m], int(n / m))
inds2 = np.split(np.random.permutation(n)[: int(n / m) * m], int(n / m))
inds1 = list(np.array(inds1)[np.random.choice(len(inds1), k, replace=False)])
inds2 = list(np.array(inds2)[np.random.choice(len(inds2), k, replace=False)])
else:
num_permute = int(k / int(n / m)) + 1
inds1 = []
inds2 = []
for _ in range(num_permute):
inds1_p = np.split(np.random.permutation(n), int(n / m))
inds2_p = np.split(np.random.permutation(n), int(n / m))
inds1 += inds1_p
inds2 += inds2_p
inds1 = list(np.array(inds1)[np.random.choice(len(inds1), k, replace=False)])
inds2 = list(np.array(inds2)[np.random.choice(len(inds2), k, replace=False)])
# C = ot.dist(x, y)
cost = 0
for i in range(k):
for j in range(k):
M = ot.dist(x[inds1[i]], y[inds2[j]])
C = ot.emd([], [], M)
cost += np.sum(C * M)
return cost / (k**2)
def BoMbOT(x, y, k, m):
n = x.shape[0]
if k < int(n / m):
inds1 = np.split(np.random.permutation(n)[: int(n / m) * m], int(n / m))
inds2 = np.split(np.random.permutation(n)[: int(n / m) * m], int(n / m))
inds1 = list(np.array(inds1)[np.random.choice(len(inds1), k, replace=False)])
inds2 = list(np.array(inds2)[np.random.choice(len(inds2), k, replace=False)])
else:
num_permute = int(k / int(n / m)) + 1
inds1 = []
inds2 = []
for _ in range(num_permute):
inds1_p = np.split(np.random.permutation(n), int(n / m))
inds2_p = np.split(np.random.permutation(n), int(n / m))
inds1 += inds1_p
inds2 += inds2_p
inds1 = list(np.array(inds1)[np.random.choice(len(inds1), k, replace=False)])
inds2 = list(np.array(inds2)[np.random.choice(len(inds2), k, replace=False)])
# C = ot.dist(x, y)
big_C = np.zeros((k, k))
for i in range(k):
for j in range(k):
M = ot.dist(x[inds1[i]], y[inds2[j]])
C = ot.emd([], [], M)
big_C[i, j] = np.sum(C * M)
pi = ot.emd([], [], big_C)
return np.sum(pi * big_C)
|
from unittest import TestCase
from apps.algorithms.mean import Mean
from apps.algorithms.standart_deviation import StandartDeviation
from apps.algorithms.z_value import ZValue
__author__ = 'cenk'
class ZValueTest(TestCase):
def setUp(self):
pass
def test_algorithm_with_list(self):
data_list = [1, 2, 3, 4, 5]
standart_deviation = StandartDeviation()
standart_deviation_value = standart_deviation.calculate(data_list)
mean = Mean()
mean_value = mean.calculate(data_list)
print standart_deviation_value, mean_value
z_value = ZValue()
z1 = z_value.calculate(88, mean=100, standart_deviation=10)
z2 = z_value.calculate(112, mean=100, standart_deviation=10)
z3 = z_value.calculate(5, mean=100, standart_deviation=10)
print z1, z2, z3
def test_get_decimals(self):
z_value = ZValue()
z_value.calculate(88, mean=100, standart_deviation=10)
z_value.find_from_table()
def test_algorithm_with_tuple(self):
mean = Mean()
data_list = [("a", 1), ("b", 2), ("c", 3), ( "d", 4), ("e", 5)]
self.assertEquals(3, mean.calculate(data_list, is_tuple=True, index=1))
data_list = [("a", "a", 1), ("b", "b", 2), ("c", "c", 3), ("d", "d", 4), ("e", "e", 5)]
self.assertEquals(3.0, mean.calculate(data_list, is_tuple=True, index=2)) |
from regression_tests import *
class TestUpxDetectionSegfault(Test):
settings = TestSettings(
tool='fileinfo',
input='sample.ex',
args='--json'
)
# Ensures that PE files without sections do not crash fileinfo upon
# execution.
# See https://github.com/avast/retdec/issues/821
def test_file_analysed_correctly(self):
self.assertTrue(self.fileinfo.succeeded)
|
from typing import Callable
import numpy as np
from pandas import DataFrame
from .converter import ConverterAbs, interpret
from .utils import INTER_STR_SEP
class VarReplace(ConverterAbs):
"""Replaces the variable with some formula
Transform all occurrences of the variable varname in the model with the
binary formula, and add extra constraint if required. This is an abstract
class which can be used for various integer encodings. If is_regexp is set
to True, then all appropriate variables should be replaced.
.. note::
Variables varname disappear from the model, including its list of
variables.
:param varname: variable to be replaced
:param is_regexp: flag deciding if varname is regular expression
"""
def __init__(self, varname: str, is_regexp: bool) -> None:
self.varname = varname
self.is_regexp = is_regexp
super().__init__()
class VarOneHot(VarReplace):
"""Replace integer variables with one-hot encoding
Replaces integer variables with one-hot encoding, and add constraint that
sum of new added bits is equal to one. For variable lb <= y <= ub
the encoding creates ub-lb+1 binary variables. The limits of y needs to be
finite integer numbers. If is_regexp is set to True, then all bounded
integer variables are replaced.
:param varname: the replaced integer variable
:param is_regexp: flag deciding if varname is regular expression
"""
def __init__(self, varname: str, is_regexp: bool) -> None:
super().__init__(varname, is_regexp)
@interpret.register
def interpret_varonehot(samples: DataFrame, converter: VarOneHot) -> DataFrame:
for name in converter.data["bounds"].keys():
lb, ub = converter.data["bounds"][name]
names = [f"{name}{INTER_STR_SEP}OH_{i}" for i in range(ub - lb + 1)]
samples["feasible_tmp"] = samples.apply(lambda row: sum(row[n] for n in names) == 1, axis=1)
def set_var_value(row):
return lb + [row[n] for n in names].index(1) if row["feasible_tmp"] else np.nan
samples[name] = samples.apply(
set_var_value,
axis=1,
)
samples["feasible"] &= samples.pop("feasible_tmp")
for n in names:
samples.pop(n)
return samples
class VarBinary(VarReplace):
"""Replace integer variables with binary encoding
Replaces integer variables with binary encoding. For variable lb <= y <= ub
the encoding creates approximately log(ub-lb+1) binary variables. The limits
of y needs to be finite integer numbers. If is_regexp is set to True, then
all bounded integer variables are replaced.
:param varname: the replaced integer variable
:param is_regexp: flag deciding if varname is a regular expression
"""
def __init__(self, varname: str, is_regexp: bool) -> None:
super().__init__(varname, is_regexp)
def _binary_encoding_coeff(lb: int, ub: int):
span_size = ub - lb + 1
is_power_of_two = span_size and (not (span_size & (span_size - 1)))
if is_power_of_two:
bit_no = span_size.bit_length() - 1
vals = [2 ** i for i in range(bit_no)]
else:
bit_no = span_size.bit_length()
vals = [2 ** i for i in range(bit_no - 1)]
vals.append(ub - lb - sum(vals))
return vals
@interpret.register
def interpret_binary(samples: DataFrame, converter: VarBinary) -> DataFrame:
for name in converter.data["bounds"].keys():
lb, ub = converter.data["bounds"][name]
vals = _binary_encoding_coeff(lb, ub)
bnames = [f"{name}{INTER_STR_SEP}BIN_{i}" for i in range(len(vals))]
samples[name] = lb + sum(val * samples[bname] for val, bname in zip(vals, bnames))
for n in bnames:
samples.pop(n)
return samples
class VarPracticalBinary(VarReplace):
"""Replace integer variables with practical binary encoding
TODO: fill
:param varname: the replaced integer variable
:param is_regexp: flag deciding if varname is a regular expression
:param ub: allowed upper bound
"""
def __init__(self, varname: str, is_regexp: bool, ub: int) -> None:
assert ub > 1
super().__init__(varname, is_regexp)
@interpret.register
def interpret_varpracticalbinary(samples: DataFrame, converter: VarPracticalBinary) -> DataFrame:
raise NotImplementedError()
class TrivialIntToBit(VarReplace):
"""Replace integer with binary variable
Replaces integer variables y with binary variable lb + b, where
lb <= y <= lb+1 is assumed. lb should be finite integer number. If is_regexp
is set to True, then all integer variables satisfying the constraint above
are replaced.
:param varname: the replaced integer variable
:param is_regexp: flag deciding if varname is regular expression
:param optional: if set to True, the converts only integer variables with
appropriate bounds
"""
def __init__(self, varname: str, is_regexp: bool) -> None:
super().__init__(varname, is_regexp)
@interpret.register
def interpret_trivialinttobit(samples: DataFrame, converter: TrivialIntToBit) -> DataFrame:
for name, lb in converter.data["lb"].items():
name_new = f"{name}{INTER_STR_SEP}itb"
samples.rename(columns={name_new: name})
samples[name] = samples.pop(name_new) + lb
return samples
class BitToSpin(VarReplace):
"""Replace binary variable with spin variable
Replaces binary variable b with spin variable s. The formula is (1-s)/2
if reversed is set to False, or (1+s)/2 otherwise. If is_regexp is set to
True, then all binary variables are replaced.
:param varname: the replaced binary variable
:param is_regexp: flag deciding if varname is regular expression
:param reversed: the flag denoting which formula is used for replacement
"""
def __init__(self, varname: str, is_regexp: bool, reversed: bool) -> None:
super().__init__(varname, is_regexp)
self.reversed = reversed
@interpret.register
def interpret_bittospin(samples: DataFrame, converter: BitToSpin) -> DataFrame:
for name in converter.data["varnames"]:
name_new = f"{name}{INTER_STR_SEP}bts"
samples.rename(columns={name_new: name})
if converter.reversed:
samples[name] = (1 - samples.pop(name_new)) / 2
else:
samples[name] = (1 + samples.pop(name_new)) / 2
return samples
class SpinToBit(VarReplace):
"""Replace spin variable with bit variable
Replaces spin variable s with bit variable b. The formula is 1-2*b
if reversed is set to False, or 2*b-1 otherwise. If is_regexp is set to
True, then all binary variables are replaced.
:param varname: the replaced spin variable
:param is_regexp: flag deciding if varname is regular expression
:param reversed: the flag denoting which formula is used for replacement
"""
def __init__(self, varname: str, is_regexp: bool, reversed: bool) -> None:
super().__init__(varname, is_regexp)
self.reversed = reversed
@interpret.register
def interpret_spintobit(samples: DataFrame, converter: SpinToBit) -> DataFrame:
for name in converter.data["varnames"]:
name_new = f"{name}{INTER_STR_SEP}stb"
samples.rename(columns={name_new: name})
if converter.reversed:
samples[name] = 1 - 2 * samples.pop(name_new)
else:
samples[name] = 1 + 2 * samples.pop(name_new)
return samples
class IntSetValue(VarReplace):
"""Set value to a variable
Replaces each occurrence of the variable with the provided value. If
is_regexp is set to True, then all binary variables are replaced. The value
must be within bounds of the variable.
:param varname: the replaced variable
:param is_regexp: flag deciding if varname is regular expression
:param value: the new value of the integers
"""
def __init__(self, varname: str, is_regexp: bool, value: int) -> None:
super().__init__(varname, is_regexp)
self.value = value
@interpret.register
def interpret_intsetvalue(samples: DataFrame, converter: IntSetValue) -> DataFrame:
raise NotImplementedError()
class ReplaceVarWithEq(VarReplace):
"""Replace a variable with expression based on a given constraint
Given a equality constraint of the form a*x+P(y) == R(z) and variable x,
removes x and replaces each occurrence of x with (R(z)-P(y))/a, provided z, y
are sets of variables not including x. This operation is always correct if x
is not bounded, otherwise this may lead to nonequivalent model. Constraint
is removed after being used.
This operation may result in increasing or reducing number of qubits
depending on the used scheme.
:param varname: the replaced variable
:param is_regexp: flag deciding if varname is regular expression
:param replace_scheme: a function which provides constraint name to be used
for a given variable.
"""
def __init__(self, varname: str, is_regexp: bool, replace_scheme: Callable) -> None:
# theoretically replace_scheme could be a dict, but then it will not be
# extendible to larger models
self.replace_scheme = replace_scheme
super().__init__(varname, is_regexp)
@interpret.register
def interpret_replacevarwitheq(samples: DataFrame, converter: ReplaceVarWithEq) -> DataFrame:
raise NotImplementedError()
|
import math
f = open("input.txt", "r")
data = []
pattern = [0, 1, 0, -1]
for char in f.read():
data += [int(char)]
data *= 10000
outputOffset = ""
for i in range(7):
outputOffset += str(data[i])
outputOffset = int(outputOffset)
for loop in range(100):
for i in range(outputOffset + 9):
total = 0
for x, dat in enumerate(data):
total += dat * pattern[math.floor((x + 1) / (i + 1)) % len(pattern) ]
data[i] = int(str(total)[-1])
for i in range(outputOffset, outputOffset + 9):
print(data[i])
|
##############################################################################
#
# Copyright (c) 2009 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""
IOIDAllocator implementations.
"""
from __future__ import absolute_import
import six
import abc
@six.add_metaclass(abc.ABCMeta)
class AbstractOIDAllocator(object):
@abc.abstractmethod
def set_min_oid(self, cursor, oid):
raise NotImplementedError()
@abc.abstractmethod
def new_oids(self, cursor):
raise NotImplementedError()
# All of these allocators allocate 16 OIDs at a time. In the sequence
# or table, value (n) represents (n * 16 - 15) through (n * 16). So,
# value 1 represents OID block 1-16, 2 represents OID block 17-32,
# and so on. The _oid_range_around helper method returns a list
# around this number sorted in the proper way.
# Note than range(n * 16 - 15, n*16+1).sort(reverse=True)
# is the same as range(n * 16, n*16 -16, -1)
if isinstance(range(1), list):
# Py2
def _oid_range_around(self, n):
return range(n * 16, n * 16 - 16, -1)
else:
def _oid_range_around(self, n):
l = list(range(n * 16, n * 16 - 16, -1))
l.sort(reverse=True)
return l
|
"""
System-level calls to external tools, directory creation, etc.
Authors:
Thomas A. Hopf
"""
import os
from os import path
import tempfile
import subprocess
import urllib.request
import shutil
import requests
class ResourceError(Exception):
"""
Exception for missing resources (files, URLs, ...)
"""
class ExternalToolError(Exception):
"""
Exception for failing external calculations
"""
def run(cmd, stdin=None, check_returncode=True,
working_dir=None, shell=False, env=None):
"""
Run external program as subprocess.
Parameters
----------
cmd : str or list of str
Command (and optional command line arguments)
stdin : str or byte sequence, optional (default: None)
Input to be sent to STDIN of the process
check_returncode : bool, optional (default=True)
Verify if call had returncode == 0, otherwise raise
ExternalToolError
working_dir : str, optional (default: None)
Change to this directory before running command
shell : bool, optional (default: False)
Invoke shell when calling subprocess (default: False)
env : dict, optional (default: None)
Use this environment for executing the subprocess
Returns
-------
int
Return code of process
stdout
Byte string with stdout output
stderr
Byte string of stderr output
Raises
------
ExternalToolError
"""
try:
with subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
stdin=subprocess.PIPE, universal_newlines=True,
cwd=working_dir, shell=shell, env=env
) as proc:
(stdout, stderr) = proc.communicate(stdin)
return_code = proc.returncode
if check_returncode and return_code != 0:
raise ExternalToolError(
"Call failed:\ncmd={}\nreturncode={}\nstdout={}\nstderr={}".format(
cmd, return_code, stdout, stderr
)
)
return return_code, stdout, stderr
except (OSError, ValueError) as e:
raise ExternalToolError(
"Call to external tool failed and did not return: {}".format(cmd)
) from e
def valid_file(file_path):
"""
Verify if a file exists and is not empty.
Parameters
----------
file_path : str
Path to file to check
Returns
-------
bool
True if file exists and is non-zero size,
False otherwise.
"""
try:
return os.stat(file_path).st_size > 0
except (OSError, TypeError):
# catch TypeError for nonsense paths, e.g. None
return False
def verify_resources(message, *args):
"""
Verify if a set of files exists and is not empty.
Parameters
----------
message : str
Message to display with raised ResourceError
*args : List of str
Path(s) of file(s) to be checked
Raises
------
ResourceError
If any of the resources does not exist or is empty
"""
invalid = [str(f) for f in args if not valid_file(f)]
if len(invalid) > 0:
raise ResourceError(
"{}:\n{}".format(message, ", ".join(invalid))
)
else:
return True
def create_prefix_folders(prefix):
"""
Create a directory tree contained in a prefix.
prefix : str
Prefix containing directory tree
"""
dirname = path.dirname(prefix)
if dirname != "":
makedirs(dirname)
def makedirs(directories):
"""
Create directory subtree, some or all of the folders
may already exist.
Parameters
----------
directories : str
Directory subtree to create
"""
os.makedirs(directories, exist_ok=True)
def insert_dir(prefix, *dirs, rootname_subdir=True):
"""
Create new path by inserting additional
directories into the folder tree of prefix
(but keeping the filename prefix at the end),
Parameters
----------
prefix : str
Prefix of path that should be extended
*dirs : str
Add these directories at the end of path
rootname_subdir : bool, optional (default: True)
Given /my/path/prefix,
* if True, creates structure like
/my/path/prefix/*dirs/prefix
* if False, creates structure like
/my/path/*dirs/prefix
Returns
-------
str
Extended path
"""
base_dir, rootname = path.split(prefix)
if rootname_subdir:
return path.join(prefix, *dirs, rootname)
else:
return path.join(base_dir, *dirs, rootname)
def temp():
"""
Create a temporary file
Returns
-------
str
Path of temporary file
"""
handle, name = tempfile.mkstemp()
return name
def tempdir():
"""
Create a temporary directory
Returns
-------
str
Path of temporary directory
"""
return tempfile.mkdtemp()
def write_file(file_path, content):
"""
Writes content to output file
Parameters
----------
file_path : str
Path of output file
content : str
Content to be written to file
"""
with open(file_path, "w") as f:
f.write(content)
def get(url, output_path=None, allow_redirects=False):
"""
Download external resource
Parameters
----------
url : str
URL of resource that should be downloaded
output_path: str, optional
Save contents of URL to this file
(only for text files)
allow_redirects: bool
Allow redirects by server or not
Returns
-------
r : requests.models.Response
Response object, use r.text to access text,
r.json() to decode json, and r.content for
raw bytestring
Raises
------
ResourceError
"""
try:
r = requests.get(url, allow_redirects=allow_redirects)
if r.status_code != requests.codes.ok:
raise ResourceError(
"Invalid status code ({}) for URL: {}".format(
r.status_code, url
)
)
if output_path is not None:
try:
write_file(output_path, r.text)
except IOError as e:
raise ResourceError(
"Could not save to file: {}".format(output_path)
) from e
return r
except requests.exceptions.RequestException as e:
raise ResourceError() from e
def get_urllib(url, output_path):
"""
Download external resource to file using urllib.
This function is intended for cases where get()
implemented using requests can not be used, e.g.
for download from an FTP server.
Parameters
----------
url : str
URL of resource that should be downloaded
output_path: str, optional
Save contents of URL to this file
(only for text files)
"""
with urllib.request.urlopen(url) as r, open(output_path, 'wb') as f:
shutil.copyfileobj(r, f)
|
import pytest
from elkconfdparser import errors
from elkconfdparser import parser
class TestDropStack:
@pytest.mark.parametrize(
"input_root, input_stack, expected_root, expected_stack",
[
({}, [], {}, []),
({}, [1], {}, [1]),
({}, [2, 1], {1: [2]}, []),
({1: [8]}, [2, 1], {1: [8, 2]}, []),
({1: [8], 3: [9]}, [2, 1], {1: [8, 2], 3: [9]}, []),
],
)
def testTwoOperands(self, input_root, input_stack, expected_root, expected_stack):
assert parser._drop_stack(input_root, input_stack) is None
assert input_root == expected_root
assert input_stack == expected_stack
@pytest.mark.parametrize(
"input_root, input_stack, expected_root, expected_stack",
[
({}, [3, 2, 1], {1: [2]}, [3]),
],
)
def testMultipleOperands(self, input_root, input_stack, expected_root, expected_stack):
with pytest.raises(errors.StackNotEmptyException, match=r'.*operands left.*'):
parser._drop_stack(input_root, input_stack)
assert input_root == expected_root
assert input_stack == expected_stack
class TestParse:
@pytest.mark.parametrize(
"test_input, expected",
[
("", {}),
(" ", {}),
(" \n ", {}),
("a{}", {'a': [{}]}),
("aa{}", {'aa': [{}]}),
(" a{} ", {'a': [{}]}),
(" a { } ", {'a': [{}]}),
(" \na\n \n{\n \n}\n ", {'a': [{}]}),
('a{b=>"c"}', {'a': [{'b': ['c']}]}),
('\na\n\n{\nb\n=>\n"c"\n}\n', {'a': [{'b': ['c']}]}),
],
)
def testSectionDetected(self, test_input, expected):
assert parser.parse(test_input) == expected
@pytest.mark.parametrize(
"test_input, expected",
[
('b=>"c"', [{'b': ['c']}]),
],
)
def testSectionValueDetection(self, test_input, expected):
test_input = f"a {{ {test_input} }}"
assert parser.parse(test_input)['a'] == expected
@pytest.mark.parametrize(
"test_input, expected",
[
('"the string"', ['the string']),
('"\\"quoted string\\""', ['"quoted string"']),
('"middle \\"quoted\\" string"', ['middle "quoted" string']),
('"unpair \\"\\"\\" string"', ['unpair """ string']),
],
)
def testSectionAttrValue(self, test_input, expected):
test_input = f"a {{ b => {test_input} }}"
assert parser.parse(test_input)['a'][0]['b'] == expected
def testReal(self):
test_input = """
aa {
bb {
cc => "dd"
ee => "ff"
}
gg {
hh => "the string 1"
jj => "\\"with\\" quotes 1"
}
gg {
hh => "the string 2"
jj => "\\"with\\" quotes 2"
}
}
"""
expected = {
"aa": [
{
"bb": [
{
"cc": ["dd"],
"ee": ["ff"],
}
],
"gg": [
{
"hh": ["the string 1"],
"jj": ["\"with\" quotes 1"],
},
{
"hh": ["the string 2"],
"jj": ["\"with\" quotes 2"],
}
]
}
]
}
assert parser.parse(test_input) == expected
|
from itertools import chain, islice, starmap, cycle
from operator import add
def drop(n,xs): return islice(xs,n,None)
def take(n,xs): return list(islice(xs,n))
class iself:
def __init__(self, xs_ctor, nvp_max=4):
self.xs_ctor=xs_ctor; self.nvp_max=nvp_max
def __iter__(self):
buf = []
ref_ivp = [0]; n = self.nvp_max
xs = self.xs_ctor(iself._vpiter(buf, ref_ivp))
xz = iter(xs)
while True:
i = ref_ivp[0]
for _ in range(n):
buf.append(next(xz))
for i1 in range(i, i+n): yield buf[i1]
ref_ivp[0] += n
@staticmethod
def _vpiter(buf, ref_ivp):
i = ref_ivp[0] -2
while True:
try: yield buf[i]; i += 1
except IndexError: break
fibs = iself(lambda fibs: chain([1,1], starmap(add, zip(fibs, drop(1,fibs)))))
def on_each(op, xs):
for x in xs: op(x); yield x
def sdbg(s,xs): return on_each(lambda x: print(s,x), xs)
class iself1:
def __init__(self,xs_ctor,nvp_max):
self.xs_ctor=xs_ctor; self.nvp_max=nvp_max
def __iter__(self):
n = self.nvp_max
buf = [None for _ in range(n)]
ri = [0]
xz = iter(self.xs_ctor(iself1._vp(buf, ri)))
while True:
i = ri[0]
buf[i] = next(xz)
yield buf[i]
ri[0] += 1
if ri[0] == n: ri[0] = 0
@staticmethod
def _vp(buf, ri): # 到底在 iter 里创建,还是只创建一次?
i = ri[0]
while True:
try: yield buf[i]; i+=1
except IndexError: i=0
# 但即便我懵懵懂懂,还是蒙对了…… 就想着 drop1 是“选择”b 环形缓冲区也行么
fibs = iself1(
lambda s: chain([1,1], starmap(add, zip(sdbg("a",s), sdbg("c",drop(1,s)))) ),
2
)
fibns = take(int(input("n?")), fibs)
def zip_next(xs): return iself1(lambda s: zip(xs, drop(1, xs)), 1)
print(fibns)
fibns = fibns[1:]
print(list(zip_next(fibns)))
if all(starmap(lambda a,b: a<b, zip_next(fibns))):
print("是单调递增序列")
|
def facto_iterative(n):
fac = 1
for i in range(n):
fac = fac * (i+1)
return fac
number = int(input("Enter some value\n"))
print(facto_iterative(number))
def facto_recursive(n):
if n ==1:
return 1
else:
return n * facto_recursive(n-1)
number = int(input("Enter some value\n"))
print(facto_recursive(number))
def fibonacci(n):
if n==0:
return 0
elif n==1:
return 1
else:
return fibonacci(n-1)+fibonacci(n-2)
number = int(input("Enter some value\n"))
print(fibonacci(number)) |
from django.conf.urls import url, include
from website.views import index
urlpatterns = [
url(r'^$', index, name='index')
]
|
#!/usr/bin/env python
"""Pipeline script for data pre-processing."""
import os
import glob
import numpy as np
from ugali.analysis.pipeline import Pipeline
import ugali.preprocess.pixelize
import ugali.preprocess.maglims
from ugali.utils.logger import logger
components = ['pixelize','density','maglims','simple','split']
defaults = ['pixelize','density','simple']
def run(self):
# The three mask options are (semi-)mutually exclusive
if np.in1d(['maglims','simple','split'],self.opts.run).sum() > 1:
raise Exception("Too many 'mask' run options.")
if 'pixelize' in self.opts.run:
# Pixelize the raw catalog data
logger.info("Running 'pixelize'...")
rawdir = self.config['data']['dirname']
rawfiles = sorted(glob.glob(os.path.join(rawdir,'*.fits')))
x = ugali.preprocess.pixelize.pixelizeCatalog(rawfiles,self.config)
if 'density' in self.opts.run:
# Calculate magnitude limits
logger.info("Running 'density'...")
x = ugali.preprocess.pixelize.pixelizeDensity(self.config,nside=512,force=self.opts.force)
if 'maglims' in self.opts.run:
# Calculate magnitude limits
logger.info("Running 'maglims'...")
maglims = ugali.preprocess.maglims.Maglims(self.config)
x = maglims.run(force=self.opts.force)
if 'simple' in self.opts.run:
# Calculate simple magnitude limits
logger.info("Running 'simple'...")
#ugali.preprocess.maglims.simple_maglims(self.config,force=self.opts.force)
maglims = ugali.preprocess.maglims.Maglims(self.config)
x = maglims.run(simple=True,force=self.opts.force)
if 'split' in self.opts.run:
# Split up a pre-existing maglim map
logger.info("Running 'split'...")
ugali.preprocess.maglims.split(self.config,'split',force=self.opts.force)
Pipeline.run = run
Pipeline.defaults = defaults
pipeline = Pipeline(__doc__,components)
pipeline.parse_args()
pipeline.execute()
|
#!/usr/bin/env python
import logging
import warnings
import numpy as np
from numpy.lib.ufunclike import isposinf
from scipy.stats import chi
EPS = 1e-8
class MultiVariateNormalDistribution(object):
def __init__(self, shift, scale, cov, dim=None):
# main components
self.shift = shift
self.scale = scale
self.cov = cov
# params
self.dim = dim if dim is not None else shift.shape[0]
# states
self.eigvecs = None
self.eigvals = None
self.inv_cov = None
self.invsqrt_cov = None
self.rev = None
# decompose cov
self.decomposed = False
def decompose(self, force_positive=False, shrinkage=0, rescale=None, bound_size=float('inf')):
# force symmetric
self.cov = (self.cov + self.cov.T) / 2.0
# solve
self.eigvals, self.eigvecs = np.linalg.eigh(self.cov)
# force positive definite
if force_positive:
self.eigvals = np.clip(self.eigvals, EPS, None)
# shrinkage
if shrinkage > 0:
trace_cov = np.sum(self.eigvals)
self.eigvals = (1 - shrinkage) * self.eigvals + shrinkage * (trace_cov / self.dim) * np.ones(self.dim)
# rescale
if rescale is not None:
ratio = (self.scale / rescale) ** 2
self.cov *= ratio
self.eigvals *= ratio
self.scale = rescale
# restrict max length
base_length = chi.mean(self.dim) + 2.0 * chi.std(self.dim)
max_eigval = (bound_size / base_length) ** 2
self.eigvals = np.clip(self.eigvals, EPS, max_eigval)
# computing
with warnings.catch_warnings(record=True) as w:
self.cov = np.dot(self.eigvecs, np.diag(self.eigvals)).dot(self.eigvecs.T)
#inv cov
self.inv_cov = np.dot(self.eigvecs, np.diag(self.eigvals ** -1)).dot(self.eigvecs.T)
# inv sqrt cov
self.invsqrt_cov = np.dot(self.eigvecs, np.diag(self.eigvals ** -0.5)).dot(self.eigvecs.T)
# sqrt cov
self.sqrt_cov = np.dot(self.eigvecs, np.diag(self.eigvals ** 0.5)).dot(self.eigvecs.T)
# reverse projection matrix
self.rev = np.dot(np.diag(self.eigvals ** -0.5), self.eigvecs.T)
# handle warnings
if len(w) > 0:
print("Eigvals: ", self.eigvals)
print("Sigma: ", self.scale)
raise Exception("Negative eigval")
def sample(self, num, remap=None):
if not self.decomposed:
self.decompose()
bias = np.random.normal(size=[num, self.dim])
amp_bias = self.scale * (self.eigvals ** 0.5)[np.newaxis,:] * bias
rot_bias = np.dot(amp_bias, self.eigvecs.T)
samples = self.shift[np.newaxis,:] + rot_bias
if remap is not None:
samples = remap(samples)
return samples
def dispersion(self, X):
x = X.reshape(-1, self.dim)
y = x - self.shift[np.newaxis, :]
z = np.dot(y / self.scale, self.invsqrt_cov)
dens = np.sum(z ** 2, axis=1)
if len(X.shape) == 1:
dens = dens[0]
return dens
|
import mongoengine
from .trace import WrappedConnect
# Original connect function
_connect = mongoengine.connect
def patch():
setattr(mongoengine, "connect", WrappedConnect(_connect))
def unpatch():
setattr(mongoengine, "connect", _connect)
|
from sklearn.cluster import KMeans
from collections import Counter
import numpy as np
import cv2 #for resizing image
def get_dominant_color(image, k=4, image_processing_size = None):
"""
takes an image as input
returns the dominant color of the image as a list
dominant color is found by running k means on the
pixels & returning the centroid of the largest cluster
processing time is sped up by working with a smaller image;
this resizing can be done with the image_processing_size param
which takes a tuple of image dims as input
>>> get_dominant_color(my_image, k=4, image_processing_size = (25, 25))
[56.2423442, 34.0834233, 70.1234123]
"""
#resize image if new dims provided
if image_processing_size is not None:
image = cv2.resize(image, image_processing_size,
interpolation = cv2.INTER_AREA)
#reshape the image to be a list of pixels
image = image.reshape((image.shape[0] * image.shape[1], 3))
#cluster and assign labels to the pixels
clt = KMeans(n_clusters = k)
labels = clt.fit_predict(image)
#count labels to find most popular
label_counts = Counter(labels)
#subset out most popular centroid
dominant_color = clt.cluster_centers_[label_counts.most_common(1)[0][0]]
return list(dominant_color)
# By Luke https://stackoverflow.com/a/17507369
def NN(A, start):
"""Nearest neighbor algorithm.
A is an NxN array indicating distance between N locations
start is the index of the starting location
Returns the path and cost of the found solution
"""
path = [start]
cost = 0
N = A.shape[0]
mask = np.ones(N, dtype=bool) # boolean values indicating which
# locations have not been visited
mask[start] = False
for i in range(N-1):
last = path[-1]
next_ind = np.argmin(A[last][mask]) # find minimum of remaining locations
next_loc = np.arange(N)[mask][next_ind] # convert to original location
path.append(next_loc)
mask[next_loc] = False
cost += A[last, next_loc]
return path, cost
def load_hsv_image(file, width=100, height=None):
image = cv2.imread(file)
if height==None:
h, w, _ = image.shape
height = (width*h)/w
image = cv2.resize(image, (80,100), interpolation = cv2.INTER_AREA)
return cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
if __name__=='__main__':
bgr_image = cv2.resize(cv2.imread('windy tree.jpg'), (100,125),
interpolation = cv2.INTER_AREA)
hsv_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2HSV)
dom_color = get_dominant_color(hsv_image)
#create a square showing dominant color of equal size to input image
dom_color_hsv = np.full(bgr_image.shape, dom_color, dtype='uint8')
#convert to bgr color space for display
dom_color_bgr = cv2.cvtColor(dom_color_hsv, cv2.COLOR_HSV2BGR)
#concat input image and dom color square side by side for display
output_image = np.hstack((bgr_image, dom_color_bgr))
#show results to screen
cv2.imshow('Image Dominant Color', output_image)
|
#!/usr/bin/env python
# Copyright (c) 2015-2021 Vector 35 Inc
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import sys
from binaryninja.update import UpdateChannel, are_auto_updates_enabled, set_auto_updates_enabled, is_update_installation_pending, install_pending_update
from binaryninja import core_version
import datetime
from six.moves import input
chandefault = UpdateChannel.list[0].name
channel = None
versions = []
def load_channel(newchannel):
global channel
global versions
if (channel is not None and newchannel == channel.name):
print("Same channel, not updating.")
else:
try:
print("Loading channel %s" % newchannel)
channel = UpdateChannel[newchannel]
print("Loading versions...")
versions = channel.versions
except Exception:
print("%s is not a valid channel name. Defaulting to " % chandefault)
channel = UpdateChannel[chandefault]
def select(version):
done = False
date = datetime.datetime.fromtimestamp(version.time).strftime('%c')
while not done:
print("Version:\t%s" % version.version)
print("Updated:\t%s" % date)
print("Notes:\n\n-----\n%s" % version.notes)
print("-----")
print("\t1)\tSwitch to version")
print("\t2)\tMain Menu")
selection = input('Choice: ')
if selection.isdigit():
selection = int(selection)
else:
selection = 0
if (selection == 2):
done = True
elif (selection == 1):
if (version.version == channel.latest_version.version):
print("Requesting update to latest version.")
else:
print("Requesting update to prior version.")
if are_auto_updates_enabled():
print("Disabling automatic updates.")
set_auto_updates_enabled(False)
if (version.version == core_version()):
print("Already running %s" % version.version)
else:
print("version.version %s" % version.version)
print("core_version %s" % core_version())
print("Downloading...")
print(version.update())
print("Installing...")
if is_update_installation_pending:
#note that the GUI will be launched after update but should still do the upgrade headless
install_pending_update()
# forward updating won't work without reloading
sys.exit()
else:
print("Invalid selection")
def list_channels():
done = False
print("\tSelect channel:\n")
while not done:
channel_list = UpdateChannel.list
for index, item in enumerate(channel_list):
print("\t%d)\t%s" % (index + 1, item.name))
print("\t%d)\t%s" % (len(channel_list) + 1, "Main Menu"))
selection = input('Choice: ')
if selection.isdigit():
selection = int(selection)
else:
selection = 0
if (selection <= 0 or selection > len(channel_list) + 1):
print("%s is an invalid choice." % selection)
else:
done = True
if (selection != len(channel_list) + 1):
load_channel(channel_list[selection - 1].name)
def toggle_updates():
set_auto_updates_enabled(not are_auto_updates_enabled())
def main():
global channel
done = False
load_channel(chandefault)
while not done:
print("\n\tBinary Ninja Version Switcher")
print("\t\tCurrent Channel:\t%s" % channel.name)
print("\t\tCurrent Version:\t%s" % core_version())
print("\t\tAuto-Updates On:\t%s\n" % are_auto_updates_enabled())
for index, version in enumerate(versions):
date = datetime.datetime.fromtimestamp(version.time).strftime('%c')
print("\t%d)\t%s (%s)" % (index + 1, version.version, date))
print("\t%d)\t%s" % (len(versions) + 1, "Switch Channel"))
print("\t%d)\t%s" % (len(versions) + 2, "Toggle Auto Updates"))
print("\t%d)\t%s" % (len(versions) + 3, "Exit"))
selection = input('Choice: ')
if selection.isdigit():
selection = int(selection)
else:
selection = 0
if (selection <= 0 or selection > len(versions) + 3):
print("%d is an invalid choice.\n\n" % selection)
else:
if (selection == len(versions) + 3):
done = True
elif (selection == len(versions) + 2):
toggle_updates()
elif (selection == len(versions) + 1):
list_channels()
else:
select(versions[selection - 1])
if __name__ == "__main__":
main()
|
"""Cart-related forms and fields."""
from django import forms
from django.conf import settings
from django.core.exceptions import NON_FIELD_ERRORS, ObjectDoesNotExist
from django.utils.translation import npgettext_lazy, pgettext_lazy
from django_countries.fields import LazyTypedChoiceField, countries
from ..core.exceptions import InsufficientStock
from ..shipping.utils import get_shipment_options
class QuantityField(forms.IntegerField):
"""A specialized integer field with initial quantity and min/max values."""
def __init__(self, **kwargs):
super().__init__(
min_value=0, max_value=settings.MAX_CART_LINE_QUANTITY,
initial=1, **kwargs)
class AddToCartForm(forms.Form):
"""Add-to-cart form.
Allows selection of a product variant and quantity.
The save method adds it to the cart.
"""
quantity = QuantityField(
label=pgettext_lazy('Add to cart form field label', 'Quantity'))
error_messages = {
'not-available': pgettext_lazy(
'Add to cart form error',
'Sorry. This product is currently not available.'),
'empty-stock': pgettext_lazy(
'Add to cart form error',
'Sorry. This product is currently out of stock.'),
'variant-does-not-exists': pgettext_lazy(
'Add to cart form error',
'Oops. We could not find that product.'),
'insufficient-stock': npgettext_lazy(
'Add to cart form error',
'Only %d remaining in stock.',
'Only %d remaining in stock.')}
def __init__(self, *args, **kwargs):
self.cart = kwargs.pop('cart')
self.product = kwargs.pop('product')
self.discounts = kwargs.pop('discounts', ())
super().__init__(*args, **kwargs)
def clean(self):
"""Clean the form.
Makes sure the total quantity in cart (taking into account what was
already there) does not exceed available quantity.
"""
cleaned_data = super().clean()
quantity = cleaned_data.get('quantity')
if quantity is None:
return cleaned_data
try:
product_variant = self.get_variant(cleaned_data)
except ObjectDoesNotExist:
msg = self.error_messages['variant-does-not-exists']
self.add_error(NON_FIELD_ERRORS, msg)
else:
cart_line = self.cart.get_line(product_variant)
used_quantity = cart_line.quantity if cart_line else 0
new_quantity = quantity + used_quantity
try:
product_variant.check_quantity(new_quantity)
except InsufficientStock as e:
remaining = e.item.get_stock_quantity() - used_quantity
if remaining:
msg = self.error_messages['insufficient-stock']
self.add_error('quantity', msg % remaining)
else:
msg = self.error_messages['empty-stock']
self.add_error('quantity', msg)
return cleaned_data
def save(self):
"""Add the selected product variant and quantity to the cart."""
product_variant = self.get_variant(self.cleaned_data)
return self.cart.add(variant=product_variant,
quantity=self.cleaned_data['quantity'])
def get_variant(self, cleaned_data):
"""Return a product variant that matches submitted values.
This allows specialized implementations to select the variant based on
multiple fields (like size and color) instead of having a single
variant selection widget.
"""
raise NotImplementedError()
class ReplaceCartLineForm(AddToCartForm):
"""Replace quantity in cart form.
Similar to AddToCartForm but its save method replaces the quantity.
"""
def __init__(self, *args, **kwargs):
self.variant = kwargs.pop('variant')
kwargs['product'] = self.variant.product
super().__init__(*args, **kwargs)
self.cart_line = self.cart.get_line(self.variant)
self.fields['quantity'].widget.attrs = {
'min': 1, 'max': settings.MAX_CART_LINE_QUANTITY}
def clean_quantity(self):
"""Clean the quantity field.
Checks if target quantity does not exceed the currently available
quantity.
"""
quantity = self.cleaned_data['quantity']
try:
self.variant.check_quantity(quantity)
except InsufficientStock as e:
msg = self.error_messages['insufficient-stock']
raise forms.ValidationError(
msg % e.item.get_stock_quantity())
return quantity
def clean(self):
"""Clean the form skipping the add-to-form checks."""
# explicitly skip parent's implementation
# pylint: disable=E1003
return super(AddToCartForm, self).clean()
def get_variant(self, cleaned_data):
"""Return the matching variant.
In this case we explicitly know the variant as we're modifying an
existing line in cart.
"""
return self.variant
def save(self):
"""Replace the selected product's quantity in cart."""
product_variant = self.get_variant(self.cleaned_data)
return self.cart.add(product_variant, self.cleaned_data['quantity'],
replace=True)
class CountryForm(forms.Form):
"""Country selection form."""
country = LazyTypedChoiceField(
label=pgettext_lazy('Country form field label', 'Country'),
choices=countries)
def get_shipment_options(self):
"""Return a list of shipping methods for the selected country."""
code = self.cleaned_data['country']
return get_shipment_options(code)
|
#!/usr/bin/env python
# Copyright 2008 by Jonathan Tang
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Python library & command-line tool for documentation and dependency analysis
of JavaScript files.
Some of the features this offers:
List all dependencies of a file or files:
>>> CodeBaseDoc(['examples'])['subclass.js'].module.all_dependencies
['module.js', 'module_closure.js', 'class.js', 'subclass.js']
Programmatically access properties of the documentation:
>>> CodeBaseDoc(['examples'])['subclass.js']['public_method'].doc
'A public method.'
>>> CodeBaseDoc(['examples'])['subclass.js']['public_method'].is_private
False
Generate documentation for a set of files:
>>> CodeBaseDoc(['examples']).save_docs(None, 'js_apidocs')
Tag reference is similar to JSDoc: http://jsdoc.sourceforge.net/#tagref. See usage() for command line options.
"""
import os, re, sys, getopt, cgi
try:
import cjson
encode_json = lambda val: cjson.encode(val)
except ImportError:
try:
import simplejson
encode_json = lambda val: simplejson.dumps(val)
except ImportError:
def encode_json(val):
raise ImportError(
"Either cjson or simplejson is required for JSON encoding")
##### INPUT/OUTPUT #####
def warn(format, *args):
"""
Print out a warning on STDERR.
"""
sys.stderr.write(format % args + '\n')
def flatten(iter_of_iters):
"""
Flatten an iterator of iterators into a single, long iterator, exhausting
each subiterator in turn.
>>> flatten([[1, 2], [3, 4]])
[1, 2, 3, 4]
"""
retval = []
for val in iter_of_iters:
retval.extend(val)
return retval
def any(iter):
""" For < Python2.5 compatibility. """
for elem in iter:
if elem:
return True
return False
def is_js_file(filename):
"""
Return true if the filename ends in .js and is not a packed or
minified file (no '.pack' or '.min' in the filename)
>>> is_js_file('jquery.min.js')
False
>>> is_js_file('foo.json')
False
>>> is_js_file('ui.combobox.js')
True
"""
return filename.endswith('.js') \
and not '.pack' in filename \
and not '.min' in filename
def trim_js_ext(filename):
"""
If `filename` ends with .js, trims the extension off.
>>> trim_js_ext('foo.js')
'foo'
>>> trim_js_ext('something_else.html')
'something_else.html'
"""
if filename.endswith('.js'):
return filename[:-3]
else:
return filename
def list_js_files(dir):
"""
Generator for all JavaScript files in the directory, recursively
>>> 'examples/module.js' in list(list_js_files('examples'))
True
"""
for dirpath, dirnames, filenames in os.walk(dir):
for filename in filenames:
if is_js_file(filename):
yield os.path.join(dirpath, filename)
def get_file_list(paths):
"""
Return a list of all JS files, given the root paths.
"""
return flatten(list_js_files(path) for path in paths)
def read_file(path):
"""
Open a file, reads it into a string, closes the file, and returns
the file text.
"""
fd = open(path)
try:
return fd.read()
finally:
fd.close()
def save_file(path, text):
"""
Save a string to a file. If the containing directory(ies) doesn't exist,
this creates it.
"""
dir = os.path.dirname(path)
if not os.path.exists(dir):
os.makedirs(dir)
fd = open(path, 'wb')
try:
if type(text) == str:
text = text.encode('utf-8')
fd.write(text)
finally:
fd.close()
##### Parsing utilities #####
def split_delimited(delimiters, split_by, text):
"""
Generator that walks the ``text`` and splits it into an array on
``split_by``, being careful not to break inside a delimiter pair.
``delimiters`` should be an even-length string with each pair of matching
delimiters listed together, open first.
>>> list(split_delimited('{}[]', ',', ''))
['']
>>> list(split_delimited('', ',', 'foo,bar'))
['foo', 'bar']
>>> list(split_delimited('[]', ',', 'foo,[bar, baz]'))
['foo', '[bar, baz]']
>>> list(split_delimited('{}', ' ', '{Type Name} name Desc'))
['{Type Name}', 'name', 'Desc']
>>> list(split_delimited('[]{}', ',', '[{foo,[bar, baz]}]'))
['[{foo,[bar, baz]}]']
Two adjacent delimiters result in a zero-length string between them:
>>> list(split_delimited('{}', ' ', '{Type Name} Desc'))
['{Type Name}', '', 'Desc']
``split_by`` may be a predicate function instead of a string, in which
case it should return true on a character to split.
>>> list(split_delimited('', lambda c: c in '[]{}, ', '[{foo,[bar, baz]}]'))
['', '', 'foo', '', 'bar', '', 'baz', '', '', '']
"""
delims = [0] * int(len(delimiters) / 2)
actions = {}
for i in range(0, len(delimiters), 2):
actions[delimiters[i]] = (int(i / 2), 1)
actions[delimiters[i + 1]] = (int(i / 2), -1)
if isinstance(split_by, str):
def split_fn(c): return c == split_by
else:
split_fn = split_by
last = 0
for i in range(len(text)):
c = text[i]
if split_fn(c) and not any(delims):
yield text[last:i]
last = i + 1
try:
which, dir = actions[c]
delims[which] = delims[which] + dir
except KeyError:
pass # Normal character
yield text[last:]
def get_doc_comments(text):
r"""
Return a list of all documentation comments in the file text. Each
comment is a pair, with the first element being the comment text and
the second element being the line after it, which may be needed to
guess function & arguments.
>>> get_doc_comments(read_file('examples/module.js'))[0][0][:40]
'/**\n * This is the module documentation.'
>>> get_doc_comments(read_file('examples/module.js'))[1][0][7:50]
'This is documentation for the first method.'
>>> get_doc_comments(read_file('examples/module.js'))[1][1]
'function the_first_function(arg1, arg2) '
>>> get_doc_comments(read_file('examples/module.js'))[2][0]
'/** This is the documentation for the second function. */'
"""
def make_pair(match):
comment = match.group()
try:
end = text.find('\n', match.end(0)) + 1
if '@class' not in comment:
next_line = next(split_delimited('()', '\n', text[end:]))
else:
next_line = text[end:text.find('\n', end)]
except StopIteration:
next_line = ''
return (comment, next_line)
return [make_pair(match) for match in re.finditer('/\*\*(.*?)\*/',
text, re.DOTALL)]
def strip_stars(doc_comment):
r"""
Strip leading stars from a doc comment.
>>> strip_stars('/** This is a comment. */')
'This is a comment.'
>>> strip_stars('/**\n * This is a\n * multiline comment. */')
'This is a\n multiline comment.'
>>> strip_stars('/** \n\t * This is a\n\t * multiline comment. \n*/')
'This is a\n multiline comment.'
"""
return re.sub('\n\s*?\*\s*?', '\n', doc_comment[3:-2]).strip()
def split_tag(section):
"""
Split the JSDoc tag text (everything following the @) at the first
whitespace. Returns a tuple of (tagname, body).
"""
splitval = re.split('\s+', section, 1)
tag, body = len(splitval) > 1 and splitval or (splitval[0], '')
return tag.strip(), body.strip()
FUNCTION_REGEXPS = [
'function (\w+)',
'(\w+):\sfunction',
'\.(\w+)\s*=\s*function',
]
def guess_function_name(next_line, regexps=FUNCTION_REGEXPS):
"""
Attempt to determine the function name from the first code line
following the comment. The patterns recognized are described by
`regexps`, which defaults to FUNCTION_REGEXPS. If a match is successful,
returns the function name. Otherwise, returns None.
"""
for regexp in regexps:
match = re.search(regexp, next_line)
if match:
return match.group(1)
return None
def guess_parameters(next_line):
"""
Attempt to guess parameters based on the presence of a parenthesized
group of identifiers. If successful, returns a list of parameter names;
otherwise, returns None.
"""
match = re.search('\(([\w\s,]+)\)', next_line)
if match:
return [arg.strip() for arg in match.group(1).split(',')]
else:
return None
def parse_comment(doc_comment, next_line):
r"""
Split the raw comment text into a dictionary of tags. The main comment
body is included as 'doc'.
>>> comment = get_doc_comments(read_file('examples/module.js'))[4][0]
>>> parse_comment(strip_stars(comment), '')['doc']
'This is the documentation for the fourth function.\n\n Since the function being documented is itself generated from another\n function, its name needs to be specified explicitly. using the @function tag'
>>> parse_comment(strip_stars(comment), '')['function']
'not_auto_discovered'
If there are multiple tags with the same name, they're included as a list:
>>> parse_comment(strip_stars(comment), '')['param']
['{String} arg1 The first argument.', '{Int} arg2 The second argument.']
"""
sections = re.split('\n\s*@', doc_comment)
tags = {
'doc': sections[0].strip(),
'guessed_function': guess_function_name(next_line),
'guessed_params': guess_parameters(next_line)
}
for section in sections[1:]:
tag, body = split_tag(section)
if tag in tags:
existing = tags[tag]
try:
existing.append(body)
except AttributeError:
tags[tag] = [existing, body]
else:
tags[tag] = body
return tags
def parse_comments_for_file(filename):
"""
Return a list of all parsed comments in a file. Mostly for testing &
interactive use.
"""
return [parse_comment(strip_stars(comment), next_line)
for comment, next_line in get_doc_comments(read_file(filename))]
#### Classes #####
class CodeBaseDoc(dict):
"""
Represents the documentation for an entire codebase.
This takes a list of root paths. The resulting object acts like a
dictionary of FileDoc objects, keyed by the filename of the file (relative
to the source root).
>>> CodeBaseDoc(['examples'])['class.js'].name
'class.js'
It also handles dependency & subclass analysis, setting the appropriate
fields on the contained objects. Note that the keys (after prefix
chopping) should match the names declared in @dependency or @see tags;
otherwise, you may get MissingDependencyErrors.
"""
def __init__(self, root_paths, include_private=False):
"""
Create a new `CodeBaseDoc`. `root_paths` is a list of directories
where JavaScript files can be found. @see and @dependency tags
are relative to these paths.
By default, private methods are not included. Pass True to
`include_private` to include them.
"""
self.include_private = include_private
self._populate_files(root_paths, root_paths)
self._build_dependencies()
self._build_superclass_lists()
def _populate_files(self, root_paths, prefix):
files = get_file_list(root_paths)
def key_name(file_name):
if prefix is None:
return os.path.basename(file_name)
for pre in prefix:
if not pre.endswith('/'):
pre = pre + '/'
if file_name.startswith(pre):
return file_name[len(pre):]
return file_name
for file in files:
name = key_name(file)
self[name] = FileDoc(name, read_file(file))
def _build_dependencies(self):
"""
>>> CodeBaseDoc(['examples'])['subclass.js'].module.all_dependencies
['module.js', 'module_closure.js', 'class.js', 'subclass.js']
"""
for module in list(self.values()):
module.set_all_dependencies(find_dependencies([module.name], self))
def _build_superclass_lists(self):
"""
>>> CodeBaseDoc(['examples']).all_classes['MySubClass'].all_superclasses[0].name
'MyClass'
"""
cls_dict = self.all_classes
for cls in list(cls_dict.values()):
cls.all_superclasses = []
superclass = cls.superclass
try:
while superclass:
superclass_obj = cls_dict[superclass]
cls.all_superclasses.append(superclass_obj)
superclass = superclass_obj.superclass
except KeyError:
print("Missing superclass: " + superclass)
def _module_index(self, attr):
return dict((obj.name, obj) for module in list(self.values())
for obj in getattr(module, attr))
@property
def all_functions(self):
"""
Returns a dict of all functions in all modules of the codebase,
keyed by their name.
"""
return self._module_index('functions')
@property
def all_methods(self):
"""
Returns a dict of all methods in all modules.
"""
return self._module_index('methods')
@property
def all_classes(self):
"""
Returns a dict of all classes in all modules.
"""
return self._module_index('classes')
def translate_ref_to_url(self, ref, in_comment=None):
"""
Translates an @see or @link reference to a URL. If the ref is of the
form #methodName, it looks for a method of that name on the class
`in_comment` or parent class of method `in_comment`. In this case, it
returns a local hash URL, since the method is guaranteed to be on the
same page:
>>> doc = CodeBaseDoc(['examples'])
>>> doc.translate_ref_to_url('#public_method', doc.all_methods['private_method'])
'#public_method'
>>> doc.translate_ref_to_url('#public_method', doc.all_classes['MySubClass'])
'#public_method'
If it doesn't find it there, it looks for a global function:
>>> doc.translate_ref_to_url('#make_class')
'module_closure.html#make_class'
A reference of the form ClassName#method_name looks up a specific method:
>>> doc.translate_ref_to_url('MyClass#first_method')
'class.html#first_method'
Finally, a reference of the form ClassName looks up a specific class:
>>> doc.translate_ref_to_url('MyClass')
'class.html#MyClass'
"""
if ref.startswith('#'):
method_name = ref[1:]
if isinstance(in_comment, FunctionDoc) and in_comment.member:
search_in = self.all_classes[in_comment.member]
elif isinstance(in_comment, ClassDoc):
search_in = in_comment
else:
search_in = None
try:
return search_in.get_method(method_name).url
except AttributeError:
pass
def lookup_ref(file_doc):
for fn in file_doc.functions:
if fn.name == method_name:
return fn.url
return None
elif '#' in ref:
class_name, method_name = ref.split('#')
def lookup_ref(file_doc):
for cls in file_doc.classes:
if cls.name == class_name:
try:
return cls.get_method(method_name).url
except AttributeError:
pass
return None
else:
class_name = ref
def lookup_ref(file_doc):
for cls in file_doc.classes:
if cls.name == class_name:
return cls.url
return None
for file_doc in list(self.values()):
url = lookup_ref(file_doc)
if url:
return file_doc.url + url
return ''
def build_see_html(self, see_tags, header_tag, in_comment=None):
def list_tag(see_tag):
return '<li><a href = "%s">%s</a></li>' % (
self.translate_ref_to_url(see_tag, in_comment), see_tag)
if see_tags:
return '<%s>See Also:</%s>\n<ul>\n' % (header_tag, header_tag) + \
'\n'.join(list_tag(tag) for tag in see_tags) + '</ul>'
else:
return ''
def translate_links(self, text, in_comment=None):
"""
Turn all @link tags in `text` into HTML anchor tags.
`in_comment` is the `CommentDoc` that contains the text, for
relative method lookups.
"""
def replace_link(matchobj):
ref = matchobj.group(1)
return '<a href = "%s">%s</a>' % (
self.translate_ref_to_url(ref, in_comment), ref)
return re.sub('{@link ([\w#]+)}', replace_link, text)
def to_json(self, files=None):
"""
Converts the full CodeBaseDoc into JSON text. The optional `files`
list lets you restrict the JSON dict to include only specific files.
"""
return encode_json(self.to_dict(files))
def to_dict(self, files=None):
"""
Converts the CodeBaseDoc into a dictionary containing the to_dict()
representations of each contained file. The optional `files` list
lets you restrict the dict to include only specific files.
>>> CodeBaseDoc(['examples']).to_dict(['class.js']).get('module.js')
>>> CodeBaseDoc(['examples']).to_dict(['class.js'])['class.js'][0]['name']
'MyClass'
"""
keys = files or list(self.keys())
return dict((key, self[key].to_dict()) for key in keys)
def to_html(self):
"""
Builds basic HTML for the full module index.
"""
return '<h1>Module index</h1>\n' + \
make_index('all_modules', list(self.values()))
def save_docs(self, files=None, output_dir=None):
"""
Save documentation files for codebase into `output_dir`. If output
dir is None, it'll refrain from building the index page and build
the file(s) in the current directory.
If `files` is None, it'll build all files in the codebase.
"""
if output_dir:
try:
os.mkdir(output_dir)
except OSError:
pass
try:
import pkg_resources
save_file(os.path.join(output_dir, 'jsdoc.css'),
pkg_resources.resource_string(__name__, 'static/jsdoc.css'))
except (ImportError, IOError):
try:
import shutil
base_dir = os.path.dirname(os.path.realpath(__file__))
css_file = os.path.join(base_dir, 'jsdoc.css')
shutil.copy(css_file, output_dir)
except IOError:
print('jsdoc.css not found. HTML will not be styled.')
save_file('%s/index.html' % output_dir,
build_html_page('Module index', self.to_html()))
else:
output_dir = '.'
if files is None:
files = list(self.keys())
for filename in files:
try:
doc = self[filename]
save_file('%s/%s.html' % (output_dir, trim_js_ext(doc.name)),
build_html_page(doc.name, doc.to_html(self)))
except KeyError:
warn('File %s does not exist', filename)
class FileDoc(object):
"""
Represents documentaion for an entire file. The constructor takes the
source text for file, parses it, then provides a class wrapper around
the parsed text.
"""
def __init__(self, file_name, file_text):
"""
Construct a FileDoc. `file_name` is the name of the JavaScript file,
`file_text` is its text.
"""
self.name = file_name
self.order = []
self.comments = { 'file_overview': ModuleDoc({}) }
is_first = True
for comment, next_line in get_doc_comments(file_text):
raw = parse_comment(strip_stars(comment), next_line)
if 'fileoverview' in raw:
obj = ModuleDoc(raw)
elif raw.get('function') or raw.get('guessed_function'):
obj = FunctionDoc(raw)
elif raw.get('class'):
obj = ClassDoc(raw)
elif is_first:
obj = ModuleDoc(raw)
else:
continue
self.order.append(obj.name)
self.comments[obj.name] = obj
is_first = False
for method in self.methods:
try:
self.comments[method.member].add_method(method)
except AttributeError:
warn('member %s of %s is not a class',
method.member, method.name)
except KeyError:
pass
def __str__(self):
return "Docs for file " + self.name
def keys(self):
"""
Returns all legal names for doc comments.
>>> file = FileDoc('module.js', read_file('examples/module.js'))
>>> file.keys()[1]
'the_first_function'
>>> file.keys()[4]
'not_auto_discovered'
"""
return self.order
def values(self):
"""
Same as list(file_doc).
>>> file = FileDoc('module.js', read_file('examples/module.js'))
>>> file.values()[0].doc[:30]
'This is the module documentati'
"""
return list(self)
def __iter__(self):
"""
Returns all comments from the file, in the order they appear.
"""
return (self.comments[name] for name in self.order)
def __contains__(self, name):
"""
Returns True if the specified function or class name is in this file.
"""
return name in self.comments
def __getitem__(self, index):
"""
If `index` is a string, returns the named method/function/class
from the file.
>>> file = FileDoc('module.js', read_file('examples/module.js'))
>>> file['the_second_function'].doc
'This is the documentation for the second function.'
If `index` is an integer, returns the ordered comment from the file.
>>> file[0].name
'file_overview'
>>> file[0].doc[:30]
'This is the module documentati'
"""
if isinstance(index, int):
return self.comments[self.order[index]]
else:
return self.comments[index]
def set_all_dependencies(self, dependencies):
"""
Sets the `all_dependencies` property on the module documentation.
"""
self.comments['file_overview'].all_dependencies = dependencies
@property
def module(self):
"""
Return the `ModuleDoc` comment for this file.
"""
return self.comments['file_overview']
@property
def doc(self):
"""
Shortcut for ``self.module.doc``.
"""
return self.module.doc
@property
def url(self):
return trim_js_ext(self.name) + '.html'
def _filtered_iter(self, pred):
return (self.comments[name] for name in self.order
if pred(self.comments[name]))
@property
def functions(self):
"""
Returns a generator of all standalone functions in the file, in textual
order.
>>> file = FileDoc('module.js', read_file('examples/module.js'))
>>> list(file.functions)[0].name
'the_first_function'
>>> list(file.functions)[3].name
'not_auto_discovered'
"""
def is_function(comment):
return isinstance(comment, FunctionDoc) and not comment.member
return self._filtered_iter(is_function)
@property
def methods(self):
"""
Returns a generator of all member functions in the file, in textual
order.
>>> file = FileDoc('class.js', read_file('examples/class.js'))
>>> file.methods.next().name
'first_method'
"""
def is_method(comment):
return isinstance(comment, FunctionDoc) and comment.member
return self._filtered_iter(is_method)
@property
def classes(self):
"""
Returns a generator of all classes in the file, in textual order.
>>> file = FileDoc('class.js', read_file('examples/class.js'))
>>> cls = file.classes.next()
>>> cls.name
'MyClass'
>>> cls.methods[0].name
'first_method'
"""
return self._filtered_iter(lambda c: isinstance(c, ClassDoc))
def to_dict(self):
return [comment.to_dict() for comment in self]
def to_html(self, codebase):
if codebase.include_private:
def visible(fns): return fns
else:
def visible(fns):
return [fn for fn in fns if not fn.is_private]
vars = [
('module', self.module.to_html(codebase)),
('function_index', make_index('functions', visible(self.functions))),
('class_index', make_index('classes', self.classes)),
('functions', '\n'.join(fn.to_html(codebase)
for fn in visible(self.functions))),
('classes', '\n'.join(cls.to_html(codebase) for cls in self.classes))
]
html = '<h1>Module documentation for %s</h1>\n%s' % (self.name,
htmlize_paragraphs(codebase.translate_links(self.module.doc)))
for key, html_text in vars:
if html_text:
html += '<h2>%s</h2>\n%s' % (printable(key), html_text)
return html
class CommentDoc(object):
"""
Base class for all classes that represent a parsed comment of some sort.
"""
def __init__(self, parsed_comment):
self.parsed = parsed_comment
def __str__(self):
return "Docs for " + self.name
def __repr__(self):
return str(self)
def __contains__(self, tag_name):
return tag_name in self.parsed
def __getitem__(self, tag_name):
return self.get(tag_name)
def get(self, tag_name, default=''):
"""
Return the value of a particular tag, or None if that tag doesn't
exist. Use 'doc' for the comment body itself.
"""
return self.parsed.get(tag_name, default)
def get_as_list(self, tag_name):
"""
Return the value of a tag, making sure that it's a list. Absent
tags are returned as an empty-list; single tags are returned as a
one-element list.
The returned list is a copy, and modifications do not affect the
original object.
"""
val = self.get(tag_name, [])
if isinstance(val, list):
return val[:]
else:
return [val]
@property
def doc(self):
"""
Return the comment body.
"""
return self.get('doc')
@property
def url(self):
"""
Return a URL for the comment, within the page.
"""
return '#' + self.name
@property
def see(self):
"""
Return a list of all @see tags on the comment.
"""
return self.get_as_list('see')
def to_json(self):
"""
Return a JSON representation of the CommentDoc. Keys are as per
to_dict.
"""
return encode_json(self.to_dict())
def to_dict(self):
"""
Return a dictionary representation of the CommentDoc. The keys of
this correspond to the tags in the comment, with the comment body in
`doc`.
"""
return self.parsed.copy()
class ModuleDoc(CommentDoc):
"""
Represents the top-level fileoverview documentation.
"""
@property
def name(self):
"""
Always return 'file_overview'.
"""
return 'file_overview'
@property
def author(self):
"""
Return the author of this module, as a string.
"""
return self.get('author')
@property
def organization(self):
"""
Return the organization that developed this, as as string.
"""
return self.get('organization')
@property
def license(self):
"""
Return the license of this module, as as string.
"""
return self.get('license')
@property
def version(self):
"""
Return the version of this module, as as string.
"""
return self.get('version')
@property
def dependencies(self):
"""
Returns the immediate dependencies of a module (only those that are
explicitly declared). Use the `all_dependencies` field for transitive
dependencies - the FileDoc must have been created by a CodeBaseDoc for
this field to exist.
>>> FileDoc('', read_file('examples/module_closure.js')).module.dependencies
['module.js']
>>> FileDoc('subclass.js', read_file('examples/subclass.js')).module.dependencies
['module_closure.js', 'class.js']
"""
return self.get_as_list('dependency')
def to_dict(self):
"""
Return this ModuleDoc as a dict. In addition to `CommentDoc` defaults,
this has:
- **name**: The module name.
- **dependencies**: A list of immediate dependencies.
- **all_dependencies**: A list of all dependencies.
"""
vars = super(ModuleDoc, self).to_dict()
vars['dependencies'] = self.dependencies
vars['name'] = self.name
try:
vars['all_dependencies'] = self.all_dependencies[:]
except AttributeError:
vars['all_dependencies'] = []
return vars
def to_html(self, codebase):
"""
Convert this to HTML.
"""
html = ''
def build_line(key, include_pred, format_fn):
val = getattr(self, key)
if include_pred(val):
return '<dt>%s</dt><dd>%s</dd>\n' % (printable(key), format_fn(val))
else:
return ''
def build_dependency(val):
return ', '.join('<a href = "%s.html">%s</a>' % (trim_js_ext(name), name)
for name in val)
for key in ('author', 'organization', 'version', 'license'):
html += build_line(key, lambda val: val, lambda val: val)
html += build_line('dependencies', lambda val: val, build_dependency)
html += build_line('all_dependencies', lambda val: len(val) > 1,
build_dependency)
html += codebase.build_see_html(self.see, 'h3')
if html:
return '<dl class = "module">\n%s\n</dl>\n' % html
else:
return ''
class FunctionDoc(CommentDoc):
r"""
Documentation for a single function or method. Takes a parsed
comment and provides accessors for accessing the various fields.
>>> comments = parse_comments_for_file('examples/module_closure.js')
>>> fn1 = FunctionDoc(comments[1])
>>> fn1.name
'the_first_function'
>>> fn1.doc
'The auto-naming can pick up functions defined as fields of an object,\n as is common with classes and the module pattern.'
"""
def __init__(self, parsed_comment):
super(FunctionDoc, self).__init__(parsed_comment)
@property
def name(self):
return self.get('guessed_function') or self.get('function')
@property
def params(self):
"""
Returns a ParamDoc for each parameter of the function, picking up
the order from the actual parameter list.
>>> comments = parse_comments_for_file('examples/module_closure.js')
>>> fn2 = FunctionDoc(comments[2])
>>> fn2.params[0].name
'elem'
>>> fn2.params[1].type
'Function(DOM)'
>>> fn2.params[2].doc
'The Options array.'
"""
tag_texts = self.get_as_list('param') + self.get_as_list('argument')
if self.get('guessed_params') is None:
return [ParamDoc(text) for text in tag_texts]
else:
param_dict = {}
for text in tag_texts:
param = ParamDoc(text)
param_dict[param.name] = param
return [param_dict.get(name) or ParamDoc('{} ' + name)
for name in self.get('guessed_params')]
@property
def options(self):
"""
Return the options for this function, as a list of ParamDocs. This is
a common pattern for emulating keyword arguments.
>>> comments = parse_comments_for_file('examples/module_closure.js')
>>> fn2 = FunctionDoc(comments[2])
>>> fn2.options[0].name
'foo'
>>> fn2.options[1].type
'Int'
>>> fn2.options[1].doc
'Some other option'
"""
return [ParamDoc(text) for text in self.get_as_list('option')]
@property
def return_val(self):
"""
Returns the return value of the function, as a ParamDoc with an
empty name:
>>> comments = parse_comments_for_file('examples/module_closure.js')
>>> fn1 = FunctionDoc(comments[1])
>>> fn1.return_val.name
''
>>> fn1.return_val.doc
'Some value'
>>> fn1.return_val.type
'String'
>>> fn2 = FunctionDoc(comments[2])
>>> fn2.return_val.doc
'Some property of the elements.'
>>> fn2.return_val.type
'Array<String>'
"""
ret = self.get('return') or self.get('returns')
type = self.get('type')
if '{' in ret and '}' in ret:
if not '} ' in ret:
# Ensure that name is empty
ret = ret.replace('} ', '} ')
return ParamDoc(ret)
if ret and type:
return ParamDoc('{%s} %s' % (type, ret))
return ParamDoc(ret)
@property
def exceptions(self):
"""
Returns a list of ParamDoc objects (with empty names) of the
exception tags for the function.
>>> comments = parse_comments_for_file('examples/module_closure.js')
>>> fn1 = FunctionDoc(comments[1])
>>> fn1.exceptions[0].doc
'Another exception'
>>> fn1.exceptions[1].doc
'A fake exception'
>>> fn1.exceptions[1].type
'String'
"""
def make_param(text):
if '{' in text and '}' in text:
# Make sure param name is blank:
word_split = list(split_delimited('{}', ' ', text))
if word_split[1] != '':
text = ' '.join([word_split[0], ''] + word_split[1:])
else:
# Handle old JSDoc format
word_split = text.split()
text = '{%s} %s' % (word_split[0], ' '.join(word_split[1:]))
return ParamDoc(text)
return [make_param(text) for text in
self.get_as_list('throws') + self.get_as_list('exception')]
@property
def is_private(self):
"""
Return True if this is a private function or method.
"""
return 'private' in self.parsed
@property
def member(self):
"""
Return the raw text of the @member tag, a reference to a method's
containing class, or None if this is a standalone function.
"""
return self.get('member')
@property
def is_constructor(self):
"""
Return True if this function is a constructor.
"""
return 'constructor' in self.parsed
def to_dict(self):
"""
Convert this FunctionDoc to a dictionary. In addition to `CommentDoc`
keys, this adds:
- **name**: The function name
- **params**: A list of parameter dictionaries
- **options**: A list of option dictionaries
- **exceptions**: A list of exception dictionaries
- **return_val**: A dictionary describing the return type, as per `ParamDoc`
- **is_private**: True if private
- **is_constructor**: True if a constructor
- **member**: The raw text of the member property.
"""
vars = super(FunctionDoc, self).to_dict()
vars.update({
'name': self.name,
'params': [param.to_dict() for param in self.params],
'options': [option.to_dict() for option in self.options],
'exceptions': [exc.to_dict() for exc in self.exceptions],
'return_val': self.return_val.to_dict(),
'is_private': self.is_private,
'is_constructor': self.is_constructor,
'member': self.member
})
return vars
def to_html(self, codebase):
"""
Convert this `FunctionDoc` to HTML.
"""
body = ''
for section in ('params', 'options', 'exceptions'):
val = getattr(self, section)
if val:
body += '<h5>%s</h5>\n<dl class = "%s">%s</dl>' % (
printable(section), section,
'\n'.join(param.to_html() for param in val))
body += codebase.build_see_html(self.see, 'h5', self)
return ('<a name = "%s" />\n<div class = "function">\n' +
'<h4>%s</h4>\n%s\n%s\n</div>\n') % (self.name, self.name,
htmlize_paragraphs(codebase.translate_links(self.doc, self)), body)
class ClassDoc(CommentDoc):
"""
Documentation for a single class.
"""
def __init__(self, parsed_comment):
"""
Initialize this object from a parsed comment dictionary. `add_method`
must be called later to populate the `methods` property with
`FunctionDoc`.
"""
super(ClassDoc, self).__init__(parsed_comment)
self.methods = []
# Methods are added externally with add_method, after construction
@property
def name(self):
return self.get('class') or self.get('constructor')
@property
def superclass(self):
"""
Return the immediate superclass name of the class, as a string. For
the full inheritance chain, use the `all_superclasses` property, which
returns a list of objects and only works if this ClassDoc was created
from a `CodeBaseDoc`.
"""
return self.get('extends') or self.get('base')
@property
def constructors(self):
"""
Return all methods labeled with the @constructor tag.
"""
return [fn for fn in self.methods if fn.is_constructor]
def add_method(self, method):
"""
Add a `FunctionDoc` method to this class. Called automatically if this
ClassDoc was constructed from a CodeBaseDoc.
"""
self.methods.append(method)
def has_method(self, method_name):
"""
Returns True if this class contains the specified method.
"""
return self.get_method(method_name) is not None
def get_method(self, method_name, default=None):
"""
Returns the contained method of the specified name, or `default` if
not found.
"""
for method in self.methods:
if method.name == method_name:
return method
return default
def to_dict(self):
"""
Convert this ClassDoc to a dict, such as if you want to use it in a
template or string interpolation. Aside from the basic `CommentDoc`
fields, this also contains:
- **name**: The class name
- **method**: A list of methods, in their dictionary form.
"""
vars = super(ClassDoc, self).to_dict()
vars.update({
'name': self.name,
'method': [method.to_dict() for method in self.methods]
})
return vars
def to_html(self, codebase):
"""
Convert this ClassDoc to HTML. This returns the default long-form
HTML description that's used when the full docs are built.
"""
return ('<a name = "%s" />\n<div class = "jsclass">\n' +
'<h3>%s</h3>\n%s\n<h4>Methods</h4>\n%s</div>') % (
self.name, self.name,
htmlize_paragraphs(codebase.translate_links(self.doc, self)) +
codebase.build_see_html(self.see, 'h4', self),
'\n'.join(method.to_html(codebase) for method in self.methods
if codebase.include_private or not method.is_private))
class ParamDoc(object):
"""
Represents a parameter, option, or parameter-like object, basically
anything that has a name, a type, and a description, separated by spaces.
This is also used for return types and exceptions, which use an empty
string for the name.
>>> param = ParamDoc('{Array<DOM>} elems The elements to act upon')
>>> param.name
'elems'
>>> param.doc
'The elements to act upon'
>>> param.type
'Array<DOM>'
You can also omit the type: if the first element is not surrounded by
curly braces, it's assumed to be the name instead:
>>> param2 = ParamDoc('param1 The first param')
>>> param2.type
''
>>> param2.name
'param1'
>>> param2.doc
'The first param'
"""
def __init__(self, text):
parsed = list(split_delimited('{}', ' ', text))
if parsed[0].startswith('{') and parsed[0].endswith('}'):
self.type = parsed[0][1:-1]
self.name = parsed[1]
self.doc = ' '.join(parsed[2:])
else:
self.type = ''
self.name = parsed[0]
self.doc = ' '.join(parsed[1:])
def to_dict(self):
"""
Convert this to a dict. Keys (all strings) are:
- **name**: Parameter name
- **type**: Parameter type
- **doc**: Parameter description
"""
return {
'name': self.name,
'type': self.type,
'doc': self.doc
}
def to_html(self, css_class=''):
"""
Returns the parameter as a dt/dd pair.
"""
if self.name and self.type:
header_text = '%s (%s)' % (self.name, self.type)
elif self.type:
header_text = self.type
else:
header_text = self.name
return '<dt>%s</dt><dd>%s</dd>' % (header_text, self.doc)
##### DEPENDENCIES #####
class CyclicDependency(Exception):
"""
Exception raised if there is a cyclic dependency.
"""
def __init__(self, remaining_dependencies):
self.values = remaining_dependencies
def __str__(self):
return ('The following dependencies result in a cycle: '
+ ', '.join(self.values))
class MissingDependency(Exception):
"""
Exception raised if a file references a dependency that doesn't exist.
"""
def __init__(self, file, dependency):
self.file = file
self.dependency = dependency
def __str__(self):
return "Couldn't find dependency %s when processing %s" % \
(self.dependency, self.file)
def build_dependency_graph(start_nodes, js_doc):
"""
Build a graph where nodes are filenames and edges are reverse dependencies
(so an edge from jquery.js to jquery.dimensions.js indicates that jquery.js
must be included before jquery.dimensions.js). The graph is represented
as a dictionary from filename to (in-degree, edges) pair, for ease of
topological sorting. Also returns a list of nodes of degree zero.
"""
queue = []
dependencies = {}
start_sort = []
def add_vertex(file):
in_degree = len(js_doc[file].module.dependencies)
dependencies[file] = [in_degree, []]
queue.append(file)
if in_degree == 0:
start_sort.append(file)
def add_edge(from_file, to_file):
dependencies[from_file][1].append(to_file)
def is_in_graph(file):
return file in dependencies
for file in start_nodes:
add_vertex(file)
for file in queue:
for dependency in js_doc[file].module.dependencies:
if dependency not in js_doc:
raise MissingDependency(file, dependency)
if not is_in_graph(dependency):
add_vertex(dependency)
add_edge(dependency, file)
return dependencies, start_sort
def topological_sort(dependencies, start_nodes):
"""
Perform a topological sort on the dependency graph `dependencies`, starting
from list `start_nodes`.
"""
retval = []
def edges(node): return dependencies[node][1]
def in_degree(node): return dependencies[node][0]
def remove_incoming(node): dependencies[node][0] = in_degree(node) - 1
while start_nodes:
node = start_nodes.pop()
retval.append(node)
for child in edges(node):
remove_incoming(child)
if not in_degree(child):
start_nodes.append(child)
leftover_nodes = [node for node in list(dependencies.keys())
if in_degree(node) > 0]
if leftover_nodes:
raise CyclicDependency(leftover_nodes)
else:
return retval
def find_dependencies(start_nodes, js_doc):
"""
Sort the dependency graph, taking in a list of starting module names and a
CodeBaseDoc (or equivalent dictionary). Returns an ordered list of
transitive dependencies such that no module appears before its
dependencies.
"""
return topological_sort(*build_dependency_graph(start_nodes, js_doc))
##### HTML utilities #####
def build_html_page(title, body):
"""
Build the simple tag skeleton for a title and body.
"""
return """<html>
<head>
<title>%s</title>
<link rel = "stylesheet" type = "text/css" href = "jsdoc.css" />
</head>
<body>
%s
</body>
</html>""" % (title, body)
def make_index(css_class, entities):
"""
Generate the HTML index (a short description and a link to the full
documentation) for a list of FunctionDocs or ClassDocs.
"""
def make_entry(entity):
return ('<dt><a href = "%(url)s">%(name)s</a></dt>\n' +
'<dd>%(doc)s</dd>') % {
'name': entity.name,
'url': entity.url,
'doc': first_sentence(entity.doc)
}
entry_text = '\n'.join(make_entry(val) for val in entities)
if entry_text:
return '<dl class = "%s">\n%s\n</dl>' % (css_class, entry_text)
else:
return ''
def first_sentence(str):
"""
Return the first sentence of a string - everything up to the period,
or the whole text if there is no period.
>>> first_sentence('')
''
>>> first_sentence('Incomplete')
''
>>> first_sentence('The first sentence. This is ignored.')
'The first sentence.'
"""
return str[0:str.find('.') + 1]
def htmlize_paragraphs(text):
"""
Convert paragraphs delimited by blank lines into HTML text enclosed
in <p> tags.
"""
paragraphs = re.split('(\r?\n)\s*(\r?\n)', text)
return '\n'.join('<p>%s</p>' % paragraph for paragraph in paragraphs)
def printable(id):
"""
Turn a Python identifier into something fit for human consumption.
>>> printable('author')
'Author'
>>> printable('all_dependencies')
'All Dependencies'
"""
return ' '.join(word.capitalize() for word in id.split('_'))
##### Command-line functions #####
def usage():
command_name = sys.argv[0]
print("""
Usage: %(name)s [options] file1.js file2.js ...
By default, this tool recursively searches the current directory for .js files
to build up its dependency database. This can be changed with the --jspath option (see below). It then outputs the JSDoc for the files on the command-line (if no files are listed, it generates the docs for the whole sourcebase). If only a single file is listed and no output directory is specified, the HTML page is placed in the current directory; otherwise, all pages and a module index are placed in the output directory.
Available options:
-p, --jspath Directory to search for JS libraries (multiple allowed)
-o, --output Output directory for building full documentation (default: apidocs)
--private Include private functions & methods in output
--help Print usage information and exit
--test Run PyJSDoc unit tests
-j, --json Output doc parse tree in JSON instead of building HTML
-d, --dependencies Output dependencies for file(s) only
Cookbook of common tasks:
Find dependencies of the Dimensions plugin in the jQuery CVS repository,
filtering out packed files from the search path:
$ %(name)s -d -p trunk/plugins jquery.dimensions.js
Concatenate dependent plugins into a single file for web page:
$ %(name)s -d rootfile1.js rootfile2.js | xargs cat > scripts.js
Read documentation information for form plugin (including full dependencies),
and include on a PHP web page using the PHP Services_JSON module:
<?php
$json = new Services_JSON();
$jsdoc = $json->decode(`%(name)s jquery.form.js -j -p trunk/plugins`);
?>
Build documentation for all modules on your system:
$ %(name)s -p ~/svn/js -o /var/www/htdocs/jqdocs
""" % {'name': os.path.basename(command_name) })
def get_path_list(opts):
"""
Return a list of all root paths where JS files can be found, given the
command line options (in dict form) for this script.
"""
paths = []
for opt, arg in list(opts.items()):
if opt in ('-p', '--jspath'):
paths.append(arg)
return paths or [os.getcwd()]
def run_and_exit_if(opts, action, *names):
"""
Run the no-arg function `action` if any of `names` appears in the
option dict `opts`.
"""
for name in names:
if name in opts:
action()
sys.exit(0)
def run_doctests():
import doctest
doctest.testmod()
def main(args=sys.argv):
"""
Main command-line invocation.
"""
try:
opts, args = getopt.gnu_getopt(args[1:], 'p:o:jdt', [
'jspath=', 'output=', 'private', 'json', 'dependencies',
'test', 'help'])
opts = dict(opts)
except getopt.GetoptError:
usage()
sys.exit(2)
run_and_exit_if(opts, run_doctests, '--test')
run_and_exit_if(opts, usage, '--help')
js_paths = get_path_list(opts)
docs = CodeBaseDoc(js_paths, '--private' in opts)
if args:
selected_files = set(docs.keys()) & set(args)
else:
selected_files = list(docs.keys())
def print_json():
print(docs.to_json(selected_files))
run_and_exit_if(opts, print_json, '--json', '-j')
def print_dependencies():
for dependency in find_dependencies(selected_files, docs):
print(dependency)
run_and_exit_if(opts, print_dependencies, '--dependencies', '-d')
output = opts.get('--output') or opts.get('-o')
if output is None and len(args) != 1:
output = 'apidocs'
docs.save_docs(selected_files, output)
if __name__ == '__main__':
main()
|
import subprocess
import sys
import pkg_resources
def get_bin_path(package_name: str, name: str) -> str:
bin_name = name
if sys.platform == "win32":
bin_name = bin_name + ".exe"
return pkg_resources.resource_filename(package_name, f"bin/{bin_name}")
import pkg_resources
def multiply_ext(lhs: int, rhs: int) -> int:
res = subprocess.run(
[
get_bin_path("py_ext_bin_test", "c_test"),
str(lhs),
str(rhs),
],
capture_output=True,
text=True,
)
if res.returncode != 0:
raise ValueError("Problem with args")
return int(res.stdout)
|
"""
"""
import copy
def insert_sort(data):
result = copy.deepcopy(data)
for i in range(1, len(result)):
key = result[i]
left = i - 1
while left >= 0 and result[left] > key:
result[left + 1] = result[left]
left -= 1
result[left + 1] = key
return result
|
import requests, os
url = "https://ftp.ncbi.nlm.nih.gov/refseq/H_sapiens/annotation/GRCh38_latest/refseq_identifiers/GRCh38_latest_genomic.fna.gz"
r = requests.get(url, allow_redirects=True)
open("GRCh38_latest_genomic.fna.gz", "wb").write(r.content)
os.system("gzip -d GRCh38_latest_genomic.fna.gz")
|
from typing import IO, Tuple
import cv2 as cv
import numpy as np
from ..ModelBase import Justify, rgb
from ..AbstractPlot import AbstractPlot
from ..Typing import *
def _pos(pos: POS_T) -> Tuple[int, int]:
return (int(pos[0]*10), int(pos[1]*10))
def _pts(pts: PTS_T):
res = []
for pos in pts:
res.append((int(pos[0]*10), int(pos[1]*10)))
return np.array([res], np.int32)
def _color(color: rgb) -> List[int]:
cols = [int(c * 255) for c in color.get()[:-1]]
#cols.append('%')
return cols
def _align(align: List[Justify]):
if Justify.LEFT in align:
return 'start'
if Justify.CENTER in align:
return 'middle'
return 'end'
def _baseline(align: List[Justify]):
if Justify.TOP in align:
return 'hanging'
if Justify.BOTTOM in align:
return 'baseline'
return 'middle'
class PlotOpenCV(AbstractPlot):
def __init__(self, file: IO, width: float, height: float, dpi: int, scale: float = 3.543307):
self.file = file
self.width = width
self.height = height
self.dpi = dpi
self.scale = scale
size = int(width*10), int(height*10), 3
self._image = np.zeros(size, dtype=np.uint8)
def polyline(self, pts: PTS_T, width: float, color: rgb, fill: rgb|None = None)-> None:
line_type = 8
cv.fillPoly(self._image, _pts(pts), (255, 255, 255), line_type)
def rectangle(self, start: POS_T, end: POS_T, width: float, color: rgb, fill: rgb|None=None) -> None:
_start = start
_end = end
if start[0] > end[0]:
_tmp = _start[0]
_start = (_end[0], _start[1])
_end = (_tmp, _end[1])
if start[1] > end[1]:
_tmp = _start[1]
_start = (start[0], _end[1])
_end = (end[0], _tmp)
line_type = 8
cv.rectangle(self._image, _pos(_start), _pos(_end),
(0, 255, 255),
-1,
8)
def line(self, pts: PTS_T, width: float, color: rgb) -> None:
line_type = 8
cv.line(self._image, _pos(pts[0]), _pos(pts[1]), (0, 0, 0),
int(width*10),
line_type)
def circle(self, pos: POS_T, radius: float, width: float, color: rgb, fill: rgb|None=None) -> None:
line_type = 8
cv.circle(self._image,
_pos(pos),
int(radius),
(0, 0, 255),
int(width*10),
line_type)
def arc(self, pos: POS_T, radius: float, start: float, end: float, width: float, color: rgb, fill: rgb|None=None) -> None:
line_type = 8
cv.circle(self._image,
_pos(pos),
radius,
(0, 0, 255),
int(width*10),
line_type)
def text(self, pos: POS_T, text: str, font_height: float, font_with: float,
face: str, rotate: float, style: str, thickness: float,
color: rgb, align: List[Justify]) -> None:
font = cv.FONT_HERSHEY_SIMPLEX
cv.putText(self._image,text, _pos(pos), font, 0.5,(0,0,0),2,cv.LINE_AA)
def end(self) -> None:
cv.imwrite(self.file, self._image)
|
import logging
from rich.logging import RichHandler
FORMAT = "%(message)s"
logging.basicConfig(level="INFO", format=FORMAT, datefmt="[%X]", handlers=[RichHandler()])
log = logging.getLogger("rich")
|
import pandas as pd
import h5py
from collections import defaultdict
from tqdm import tqdm
import sys
from pathlib import Path
import warnings
warnings.filterwarnings('ignore')
def die(message, status=1):
"""Print an error message and call sys.exit with the given status, terminating the process"""
print(message, file=sys.stderr)
sys.exit(status)
def get_dtypes(hdf5_path, enum_field):
if h5py.check_dtype(enum=hdf5_path.dtype[enum_field]):
# data_dtype may lose some dataset dtypes if there are duplicates of 'v'
return {v: k for k, v in h5py.check_dtype(enum=hdf5_path.dtype[enum_field]).items()}
def get_data(bulkfile_path, seq_sum_path, read_list=None, time=2, name=None, read_pos='end_time'):
"""
Get classification data from start or end of reads in a bulkfile
Parameters
----------
bulkfile_path : str
Path to bulk FAST5 file
ss : pd.DataFrame
Sequencing_summary file as pandas DataFrame
read_list : list
List of reads to get classifications for
time : numeric (int or float)
Time surrounding the end of a read to get classifications
name : str
(Optional) label to add onto returned data, useful for grouping
read_pos : str
Either 'start_time' or 'end_time' for classifications at start or end of reads
Returns
-------
list
list of lists; with columns ['read_id', 'time', 'label', 'name']
"""
ss = pd.read_csv(seq_sum_path, sep='\t', usecols=['run_id', 'read_id', 'channel', 'start_time', 'duration'])
bf = h5py.File(bulkfile_path, 'r')
sf = int(bf["UniqueGlobalKey"]["context_tags"].attrs["sample_frequency"].decode('utf8'))
run_id = bf["UniqueGlobalKey"]["tracking_id"].attrs["run_id"].decode('utf8')
ss = ss[ss['run_id'] == run_id]
ss['end_time'] = ss['start_time'] + ss['duration']
# ss['end_time'] = ss['end_time']
conv = {'acquisition_raw_index': 'read_start',
'summary_state': 'modal_classification'}
if read_list is not None:
ss = ss[ss['read_id'].isin(read_list)]
channels = ss['channel'].unique()
# get label dtypes
labels_dt = get_dtypes(
bf['IntermediateData']['Channel_' + str(channels[0])]['Reads'], 'modal_classification'
)
labels_dt.update(
get_dtypes(bf['StateData']['Channel_' + str(channels[0])]['States'], 'summary_state')
)
int_data = defaultdict()
state_data = defaultdict()
b = []
for ch in tqdm(channels):
path = bf['IntermediateData']['Channel_' + str(ch)]['Reads']
for f in ['read_id', 'modal_classification', 'read_start']:
int_data[f] = path[f]
path = bf['StateData']['Channel_' + str(ch)]['States']
for f in ['acquisition_raw_index', 'summary_state']:
state_data[conv[f]] = path[f]
df = pd.concat([pd.DataFrame(int_data), pd.DataFrame(state_data)], axis=0)
df['read_id'] = df['read_id'].str.decode('utf8')
df.sort_values(by='read_start', ascending=True, inplace=True)
df['read_id'] = df['read_id'].fillna(method='ffill')
df['modal_classification'] = df['modal_classification'].map(labels_dt)
df['read_start'] = df['read_start'] / sf
temp_ss = ss[ss['channel'] == ch]
for idx, row in temp_ss.iterrows():
before = df[df['read_start'].between(row[read_pos] - time, row[read_pos], inclusive=False)]
after = df[df['read_start'].between(row[read_pos], row[read_pos] + time, inclusive=True)]
for i, r in before.iterrows():
b.append([row['read_id'],
r['read_start'] - row[read_pos],
r['modal_classification'],
name
])
for i, r in after.iterrows():
b.append([row['read_id'],
r['read_start'] - row[read_pos],
r['modal_classification'],
name
])
bf.close()
return b
def main():
if len(sys.argv) < 3:
die('Usage: python {s} sequencing_summary.txt <path/to/bulkfile/directory>'.format(s=sys.argv[0]))
files = [[p] for p in Path(sys.argv[2]).iterdir() if p.suffix == '.fast5']
if not files:
die('No bulk FAST5 files found in {}'.format(sys.argv[2]))
for t in files:
f = h5py.File(t[0], 'r')
t.append(f["UniqueGlobalKey"]["tracking_id"].attrs["run_id"].decode('utf8'))
f.close()
for file_name, run_id in files:
print('{},\t{}'.format(file_name, run_id))
print('Collecting ends:')
filename = 'end_events.csv'
paginate = False
for t in files:
end_data = get_data(t[0], sys.argv[1], time=2, name=t[1], read_pos='end_time')
df = pd.DataFrame(end_data, columns=['read_id', 'time', 'label', 'comment'])
if not df.empty and not paginate:
df.to_csv(filename, sep=',', header=True, index=False)
paginate = True
elif not df.empty and paginate:
with open(filename, 'a') as file:
df.to_csv(file, sep=',', header=False, index=False)
else:
print('df is empty')
print('Collecting starts:')
filename = 'start_events.csv'
paginate = True
for t in files:
end_data = get_data(t[0], sys.argv[1], time=2, name=t[1], read_pos='start_time')
df = pd.DataFrame(end_data, columns=['read_id', 'time', 'label', 'comment'])
if not df.empty and not paginate:
df.to_csv(filename, sep=',', header=True, index=False)
paginate = True
elif not df.empty and paginate:
with open(filename, 'a') as file:
df.to_csv(file, sep=',', header=False, index=False)
else:
print('df is empty')
if __name__ == '__main__':
main()
|
from .process import Command as ProcessCommand
from .sample_video import Command as SampleCommand
class Command:
name = "video_process"
help = "sample video into images and process the images"
def add_basic_arguments(self, parser):
SampleCommand().add_basic_arguments(parser)
ProcessCommand().add_basic_arguments(parser)
def run(self, args: dict):
SampleCommand().run(args)
ProcessCommand().run(args)
|
import json
import logging
import pytest
AED_CLOUD_ACTIVITY_EVENT_DICT = json.loads(
"""{
"url": "https://www.example.com",
"syncDestination": "TEST_SYNC_DESTINATION",
"sharedWith": [{"cloudUsername": "[email protected]"}, {"cloudUsername": "[email protected]"}],
"cloudDriveId": "TEST_CLOUD_DRIVE_ID",
"actor": "[email protected]",
"tabUrl": "TEST_TAB_URL",
"windowTitle": "TEST_WINDOW_TITLE"
}"""
)
AED_REMOVABLE_MEDIA_EVENT_DICT = json.loads(
"""{
"removableMediaVendor": "TEST_VENDOR_NAME",
"removableMediaName": "TEST_NAME",
"removableMediaSerialNumber": "TEST_SERIAL_NUMBER",
"removableMediaCapacity": 5000000,
"removableMediaBusType": "TEST_BUS_TYPE"
}"""
)
AED_EMAIL_EVENT_DICT = json.loads(
"""{
"emailSender": "TEST_EMAIL_SENDER",
"emailRecipients": ["[email protected]", "[email protected]"]
}"""
)
AED_EVENT_DICT = json.loads(
"""{
"eventId": "0_1d71796f-af5b-4231-9d8e-df6434da4663_912339407325443353_918253081700247636_16",
"eventType": "READ_BY_APP",
"eventTimestamp": "2019-09-09T02:42:23.851Z",
"insertionTimestamp": "2019-09-09T22:47:42.724Z",
"filePath": "/Users/testtesterson/Downloads/About Downloads.lpdf/Contents/Resources/English.lproj/",
"fileName": "InfoPlist.strings",
"fileType": "FILE",
"fileCategory": "UNCATEGORIZED",
"fileSize": 86,
"fileOwner": "testtesterson",
"md5Checksum": "19b92e63beb08c27ab4489fcfefbbe44",
"sha256Checksum": "2e0677355c37fa18fd20d372c7420b8b34de150c5801910c3bbb1e8e04c727ef",
"createTimestamp": "2012-07-22T02:19:29Z",
"modifyTimestamp": "2012-12-19T03:00:08Z",
"deviceUserName": "[email protected]",
"osHostName": "Test's MacBook Air",
"domainName": "192.168.0.3",
"publicIpAddress": "71.34.4.22",
"privateIpAddresses": [
"fe80:0:0:0:f053:a9bd:973:6c8c%utun1",
"fe80:0:0:0:a254:cb31:8840:9d6b%utun0",
"0:0:0:0:0:0:0:1%lo0",
"192.168.0.3",
"fe80:0:0:0:0:0:0:1%lo0",
"fe80:0:0:0:8c28:1ac9:5745:a6e7%utun3",
"fe80:0:0:0:2e4a:351c:bb9b:2f28%utun2",
"fe80:0:0:0:6df:855c:9436:37f8%utun4",
"fe80:0:0:0:ce:5072:e5f:7155%en0",
"fe80:0:0:0:b867:afff:fefc:1a82%awdl0",
"127.0.0.1"
],
"deviceUid": "912339407325443353",
"userUid": "912338501981077099",
"actor": null,
"directoryId": [],
"source": "Endpoint",
"url": null,
"shared": null,
"sharedWith": [],
"sharingTypeAdded": [],
"cloudDriveId": null,
"detectionSourceAlias": null,
"fileId": null,
"exposure": [
"ApplicationRead"
],
"processOwner": "testtesterson",
"processName": "/Applications/Google Chrome.app/Contents/MacOS/Google Chrome",
"removableMediaVendor": null,
"removableMediaName": null,
"removableMediaSerialNumber": null,
"removableMediaCapacity": null,
"removableMediaBusType": null,
"syncDestination": null
}"""
)
@pytest.fixture()
def mock_log_record(mocker):
return mocker.MagicMock(spec=logging.LogRecord)
@pytest.fixture
def mock_file_event_log_record(mock_log_record):
mock_log_record.msg = AED_EVENT_DICT
return mock_log_record
@pytest.fixture
def mock_file_event_removable_media_event_log_record(mock_log_record):
mock_log_record.msg = AED_REMOVABLE_MEDIA_EVENT_DICT
return mock_log_record
@pytest.fixture
def mock_file_event_cloud_activity_event_log_record(mock_log_record):
mock_log_record.msg = AED_CLOUD_ACTIVITY_EVENT_DICT
return mock_log_record
@pytest.fixture
def mock_file_event_email_event_log_record(mock_log_record):
mock_log_record.msg = AED_EMAIL_EVENT_DICT
return mock_log_record
|
from django.contrib import admin
from django.utils.translation import gettext_lazy as _
from playexo.models import Answer, HighestGrade
class EvalListFilter(admin.SimpleListFilter):
# Human-readable title which will be displayed in the
# right admin sidebar just above the filter options.
title = _('Eval')
# Parameter for the filter that will be used in the URL query.
parameter_name = 'grade'
def lookups(self, request, model_admin):
"""
Returns a list of tuples. The first element in each
tuple is the coded value for the option that will
appear in the URL query. The second element is the
human-readable name for the option that will appear
in the right sidebar.
"""
return (
('100', _('success')),
('0', _('nul')),
('-1', _('Error')),
('-', _('Build')),
)
def queryset(self, request, queryset):
"""
Returns the filtered queryset based on the value
provided in the query string and retrievable via
`self.value()`.
"""
# Compare the requested value (either '80s' or '90s')
# to decide how to filter the queryset.
if self.value() == '100':
return queryset.filter(grade=100)
if self.value() == '0':
return queryset.filter(grade=0)
if self.value() == '-':
return queryset.filter(grade=None)
if self.value() == '-1':
return queryset.filter(grade=-1)
@admin.register(Answer)
class AnswerAdmin(admin.ModelAdmin):
list_display = ('user', 'pl', 'grade', 'seed', 'date', 'activity')
list_filter = (EvalListFilter, 'date')
@admin.register(HighestGrade)
class HighestGradeAdmin(admin.ModelAdmin):
list_display = ('user', 'pl', 'grade', 'activity')
|
#!/usr/bin/python3.6
# -*- coding: utf-8 -*-
# @Time : 2021/5/20 下午11:47
# @Author : Joselynzhao
# @Email : [email protected]
# @File : DataFrame.py
# @Software: PyCharm
# @Desc :
|
# libraries
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import os,sys
import matplotlib.dates as mdates
import matplotlib as mpl
from matplotlib.colors import ListedColormap
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
from matplotlib.offsetbox import AnchoredText
from mpl_toolkits.axisartist.axislines import Axes
from mpl_toolkits import axisartist
sys.path.append(
os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
)
from src.utils.settings import config
from src.models.icestupaClass import Icestupa
from src.models.methods.metadata import get_parameter_metadata
if __name__ == "__main__":
locations = ['guttannen21', 'gangles21','guttannen20', 'schwarzsee19']
index = pd.date_range(start ='1-1-2022',
end ='1-1-2024', freq ='D', name= "When")
df_out = pd.DataFrame(columns=locations,index=index)
fig, ax = plt.subplots(4, 1, sharex='col', figsize=(12, 14))
fig.subplots_adjust(hspace=0.4, wspace=0.4)
i=0
blue = "#0a4a97"
red = "#e23028"
purple = "#9673b9"
green = "#28a745"
orange = "#ffc107"
pink = "#ce507a"
skyblue = "#9bc4f0"
grey = '#ced4da'
CB91_Blue = "#2CBDFE"
CB91_Green = "#47DBCD"
CB91_Pink = "#F3A0F2"
CB91_Purple = "#9D2EC5"
CB91_Violet = "#661D98"
CB91_Amber = "#F5B14C"
for location in locations:
# Get settings for given location and trigger
SITE, FOLDER = config(location)
icestupa = Icestupa(location)
icestupa.read_output()
icestupa.self_attributes()
df = icestupa.df[["When","iceV"]]
df_c = pd.read_hdf(FOLDER["input"] + "model_input_" + icestupa.trigger + ".h5", "df_c")
if icestupa.name in ["guttannen21", "guttannen20"]:
df_c = df_c[1:]
df_c = df_c.set_index("When").resample("D").mean().reset_index()
dfv = df_c[["When", "DroneV", "DroneVError"]]
if location == 'schwarzsee19':
df['When'] = df['When'].mask(icestupa.df['When'].dt.year == 2019,
icestupa.df['When'] + pd.offsets.DateOffset(year=2023))
dfv['When'] = dfv['When'].mask(df_c['When'].dt.year == 2019,
df_c['When'] + pd.offsets.DateOffset(year=2023))
if location == 'guttannen20':
df['When'] = df['When'].mask(icestupa.df['When'].dt.year == 2019,
icestupa.df['When'] + pd.offsets.DateOffset(year=2022))
df['When'] = df['When'].mask(icestupa.df['When'].dt.year == 2020,
icestupa.df['When'] + pd.offsets.DateOffset(year=2023))
dfv['When'] = dfv['When'].mask(df_c['When'].dt.year == 2019,
df_c['When'] + pd.offsets.DateOffset(year=2022))
dfv['When'] = dfv['When'].mask(df_c['When'].dt.year == 2020,
df_c['When'] + pd.offsets.DateOffset(year=2023))
if location == 'guttannen21':
df['When'] = df['When'].mask(icestupa.df['When'].dt.year == 2020,
icestupa.df['When'] + pd.offsets.DateOffset(year=2022))
dfv['When'] = dfv['When'].mask(df_c['When'].dt.year == 2020,
df_c['When'] + pd.offsets.DateOffset(year=2022))
df['When'] = df['When'].mask(icestupa.df['When'].dt.year == 2021,
icestupa.df['When'] + pd.offsets.DateOffset(year=2023))
dfv['When'] = dfv['When'].mask(df_c['When'].dt.year == 2021,
df_c['When'] + pd.offsets.DateOffset(year=2023))
dfd = df.set_index("When").resample("D").mean().reset_index()
dfd = dfd.set_index("When")
df_out[location] = dfd["iceV"]
df_out = df_out.reset_index()
x = df_out.When
y1 = df_out[location]
x2 = dfv.When
y2 = dfv.DroneV
ax[i].plot(
x,
y1,
"b-",
label="Modelled Volume",
linewidth=1,
color=CB91_Blue,
zorder=1,
)
ax[i].scatter(x2, y2, color=CB91_Green, s=5, label="Measured Volume", zorder=2)
# ax[i].fill_between(x, y1=icestupa.V_dome, y2=0, color=grey, label = "Dome Volume")
ax[i].set_ylim(0, round(df_out[location].max(),0))
if location == "gangles21":
ax[i].set_ylim(0, round(y2.max()+1,0))
v = get_parameter_metadata(location)
ax[i].title.set_text(v["fullname"])
# ax[i].title(location)
# Hide the right and top spines
ax[i].spines['right'].set_visible(False)
ax[i].spines['top'].set_visible(False)
# Only show ticks on the left and bottom spines
ax[i].yaxis.set_ticks_position('left')
ax[i].xaxis.set_ticks_position('bottom')
ax[i].yaxis.set_major_locator(plt.LinearLocator(numticks=2))
ax[i].xaxis.set_major_locator(mdates.MonthLocator())
ax[i].xaxis.set_major_formatter(mdates.DateFormatter("%b %d"))
fig.autofmt_xdate()
df_out = df_out.set_index("When")
i+=1
fig.text(0.04, 0.5, 'Ice Volume[$m^3$]', va='center', rotation='vertical')
handles, labels = ax[1].get_legend_handles_labels()
fig.legend(handles, labels, loc='upper right')
fig.suptitle('Artificial Ice Reservoirs', fontsize=16)
# plt.legend()
plt.savefig("data/paper/try2.jpg", bbox_inches="tight", dpi=300)
df_out = df_out.dropna(thresh=1)
df_out = df_out.to_csv("data/paper/results.csv")
|
# Pyrogram - Telegram MTProto API Client Library for Python
# Copyright (C) 2017-2018 Dan Tès <https://github.com/delivrance>
#
# This file is part of Pyrogram.
#
# Pyrogram is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pyrogram is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Pyrogram. If not, see <http://www.gnu.org/licenses/>.
import re
from struct import unpack
# SMP = Supplementary Multilingual Plane: https://en.wikipedia.org/wiki/Plane_(Unicode)#Overview
SMP_RE = re.compile(r"[\U00010000-\U0010FFFF]")
def add_surrogates(text):
# Replace each SMP code point with a surrogate pair
return SMP_RE.sub(
lambda match: # Split SMP in two surrogates
"".join(chr(i) for i in unpack("<HH", match.group().encode("utf-16le"))),
text
)
def remove_surrogates(text):
# Replace each surrogate pair with a SMP code point
return text.encode("utf-16", "surrogatepass").decode("utf-16")
|
from usure.preprocessing.cleaning.twittercorpuscleaner import TwitterCorpusCleaner
def can_remove_all_id_test():
cleaner = TwitterCorpusCleaner()
text = "\"406449856862232577\",\"\"Las despedidas no deberian existir\"\""
cleaned_text = "\"\"Las despedidas no deberian existir\"\""
procesed_text = cleaner.clean(text)
assert procesed_text == cleaned_text
|
# Exercício Python 067
# Mostre a tabuada de varios números, um de cada vez, para cada valor digitado pelo usuário
# Programa interrompido quando N for negativo
while True:
print('=-=' * 14)
n = int(input('Você quer saber a tabuada de qual valor? '))
if n < 0:
break
contador = 0
while contador < 10:
contador = contador + 1
produto = n * contador
print(f'{n} x {contador:2} = {produto}')
print('FIM!') |
# Generated by Django 2.1.1 on 2018-09-19 19:46
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('custom', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='riskfield',
name='risk_type',
field=models.ForeignKey(
default=1,
on_delete=django.db.models.deletion.CASCADE,
related_name='risk_fields',
to='custom.RiskType'
),
preserve_default=False,
),
]
|
# AUTOGENERATED BY NBDEV! DO NOT EDIT!
__all__ = ["index", "modules", "custom_doc_links", "git_url"]
index = {"say_hello": "00_core.ipynb",
"say_bye": "00_core.ipynb",
"optimize_bayes_param": "01_bayes_opt.ipynb",
"ReadTabBatchIdentity": "02_tab_ae.ipynb",
"TabularPandasIdentity": "02_tab_ae.ipynb",
"TabDataLoaderIdentity": "02_tab_ae.ipynb",
"RecreatedLoss": "02_tab_ae.ipynb",
"BatchSwapNoise": "02_tab_ae.ipynb",
"TabularAE": "02_tab_ae.ipynb",
"HyperparamsGenerator": "03_param_finetune.ipynb",
"XgboostParamGenerator": "03_param_finetune.ipynb",
"LgbmParamGenerator": "03_param_finetune.ipynb",
"CatParamGenerator": "03_param_finetune.ipynb",
"RFParamGenerator": "03_param_finetune.ipynb",
"ModelIterator": "04_model_zoo.ipynb"}
modules = ["core.py",
"bayes_opt.py",
"tab_ae.py",
"params.py",
"model_iterator.py"]
doc_url = "https://DavidykZhao.github.io/Yikai_helper_funcs/"
git_url = "https://github.com/DavidykZhao/Yikai_helper_funcs/tree/master/"
def custom_doc_links(name): return None
|
import tensorflow as tf
import numpy as np
print(tf.__version__)
from tensorflow.contrib.learn.python.learn.datasets import base
IRIS_TRAINING="iris_training.csv"
IRIS_TEST="iris_test.csv"
traning_set=base.load_csv_with_header(filename=IRIS_TRAINING,features_dtype=np.float32,target_dtype=np.int)
test_set=base.load_csv_with_header(filename=IRIS_TEST,features_dtype=np.float32,target_dtype=np.int)
feature_name="flower_features"
feature_columns=[tf.feature_column.numeric_column(feature_name,shape=[4])]
classifier=tf.estimator.LinearClassifier(feature_columns=feature_columns,n_classes=3,
model_dir="\\tmp\\iris_model")
def input_fn(dataset):
def _fn():
features={feature_name:tf.constant(dataset.data)}
label=tf.constant(dataset.target)
return features,label
return _fn
classifier.train(input_fn=input_fn(traning_set),steps=1000)
print("fit done")
accuracy_score =classifier.evaluate(input_fn=input_fn(test_set),steps=100)["accuracy"]
print("\nAccuracy: {0:f}".format(accuracy_score))
feature_spec={'flower_features':tf.FixedLenFeature(shape=[4],dtype=np.float32)}
serving_fn=tf.estimator.export.build_parsing_serving_input_receiver_fn(feature_spec)
classifier.export_savedmodel(export_dir_base="\\tmp\\iris_model"+"\\export",serving_input_receiver_fn=serving_fn) |
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
def cf_timeseriesinsights_cl(cli_ctx, *_):
from azure.cli.core.commands.client_factory import get_mgmt_service_client
from .vendored_sdks.timeseriesinsights import TimeSeriesInsightsClient
return get_mgmt_service_client(cli_ctx,
TimeSeriesInsightsClient)
def cf_environment(cli_ctx, *_):
return cf_timeseriesinsights_cl(cli_ctx).environments
def cf_event_source(cli_ctx, *_):
return cf_timeseriesinsights_cl(cli_ctx).event_sources
def cf_reference_data_set(cli_ctx, *_):
return cf_timeseriesinsights_cl(cli_ctx).reference_data_sets
def cf_access_policy(cli_ctx, *_):
return cf_timeseriesinsights_cl(cli_ctx).access_policies
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import logging
from collections import namedtuple
from flask import Flask
from flask_cors import CORS
from threading import Thread
from builtins import str
from gevent.pywsgi import WSGIServer
from typing import Text, Optional, Union, List
import rasa_core
from rasa_core import constants, agent
from rasa_core import utils, server
from rasa_core.agent import Agent
from rasa_core.channels import (
console, RestInput, InputChannel,
BUILTIN_CHANNELS)
from rasa_core.constants import DOCS_BASE_URL
from rasa_core.interpreter import (
NaturalLanguageInterpreter)
from rasa_core.utils import read_yaml_file
from rasa_core.bot_server_channel import BotServerInputChannel
logger = logging.getLogger() # get the root logger
AvailableEndpoints = namedtuple('AvailableEndpoints', 'nlg '
'nlu '
'action '
'model')
def create_argument_parser():
"""Parse all the command line arguments for the run script."""
parser = argparse.ArgumentParser(
description='starts the bot')
parser.add_argument(
'-d', '--core',
required=True,
type=str,
help="core model to run")
parser.add_argument(
'-u', '--nlu',
type=str,
help="nlu model to run")
parser.add_argument(
'-p', '--port',
default=constants.DEFAULT_SERVER_PORT,
type=int,
help="port to run the server at")
parser.add_argument(
'--auth_token',
type=str,
help="Enable token based authentication. Requests need to provide "
"the token to be accepted.")
parser.add_argument(
'--cors',
nargs='*',
type=str,
help="enable CORS for the passed origin. "
"Use * to whitelist all origins")
parser.add_argument(
'-o', '--log_file',
type=str,
default="rasa_core.log",
help="store log file in specified file")
parser.add_argument(
'--credentials',
default=None,
help="authentication credentials for the connector as a yml file")
parser.add_argument(
'--endpoints',
default=None,
help="Configuration file for the connectors as a yml file")
parser.add_argument(
'-c', '--connector',
default="cmdline",
choices=["facebook", "slack", "telegram", "mattermost", "cmdline",
"twilio", "botframework", "rocketchat","bot"],
help="service to connect to")
parser.add_argument(
'--enable_api',
action="store_true",
help="Start the web server api in addition to the input channel")
utils.add_logging_option_arguments(parser)
return parser
def read_endpoints(endpoint_file):
nlg = utils.read_endpoint_config(endpoint_file,
endpoint_type="nlg")
nlu = utils.read_endpoint_config(endpoint_file,
endpoint_type="nlu")
action = utils.read_endpoint_config(endpoint_file,
endpoint_type="action_endpoint")
model = utils.read_endpoint_config(endpoint_file,
endpoint_type="models")
return AvailableEndpoints(nlg, nlu, action, model)
def _raise_missing_credentials_exception(channel):
raise Exception("To use the {} input channel, you need to "
"pass a credentials file using '--credentials'. "
"The argument should be a file path pointing to"
"a yml file containing the {} authentication"
"information. Details in the docs: "
"{}/connectors/#{}-setup".
format(channel, channel, DOCS_BASE_URL, channel))
def _create_external_channels(channel, credentials_file):
# type: (Optional[Text], Optional[Text]) -> List[InputChannel]
# the commandline input channel is the only one that doesn't need any
# credentials
print(channel, credentials_file, "external_chanlles son3")
if channel == "cmdline":
from rasa_core.channels import RestInput
return [RestInput()]
if channel is None and credentials_file is None:
# if there is no configuration at all, we'll run without a channel
return []
elif credentials_file is None:
# if there is a channel, but no configuration, this can't be right
_raise_missing_credentials_exception(channel)
all_credentials = read_yaml_file(credentials_file)
print(all_credentials)
if channel:
print("1")
return [_create_single_channel(channel, all_credentials)]
else:
print("2")
return [_create_single_channel(c, k)
for c, k in all_credentials.items()]
def _create_single_channel(channel, credentials):
if channel == "facebook":
from rasa_core.channels.facebook import FacebookInput
return FacebookInput(
credentials.get("verify"),
credentials.get("secret"),
credentials.get("page-access-token"))
elif channel == "slack":
from rasa_core.channels.slack import SlackInput
return SlackInput(
credentials.get("slack_token"),
credentials.get("slack_channel"))
elif channel == "telegram":
from rasa_core.channels.telegram import TelegramInput
return TelegramInput(
credentials.get("access_token"),
credentials.get("verify"),
credentials.get("webhook_url"))
elif channel == "mattermost":
from rasa_core.channels.mattermost import MattermostInput
return MattermostInput(
credentials.get("url"),
credentials.get("team"),
credentials.get("user"),
credentials.get("pw"))
elif channel == "twilio":
print(credentials)
from rasa_core.channels.twilio import TwilioInput
return TwilioInput(
credentials.get("account_sid"),
credentials.get("auth_token"),
credentials.get("twilio_number"))
elif channel == "botframework":
from rasa_core.channels.botframework import BotFrameworkInput
return BotFrameworkInput(
credentials.get("app_id"),
credentials.get("app_password"))
elif channel == "rocketchat":
from rasa_core.channels.rocketchat import RocketChatInput
return RocketChatInput(
credentials.get("user"),
credentials.get("password"),
credentials.get("server_url"))
elif channel == "rasa":
from rasa_core.channels.rasa_chat import RasaChatInput
return RasaChatInput(
credentials.get("url"),
credentials.get("admin_token"))
elif channel == "bot":
from rasa_core.bot_server_channel import BotServerInputChannel
return BotServerInputChannel()
else:
raise Exception("This script currently only supports the "
"{} connectors."
"".format(", ".join(BUILTIN_CHANNELS)))
def create_http_input_channels(channel, # type: Union[None, Text, RestInput]
credentials_file # type: Optional[Text]
):
# type: (...) -> List[InputChannel]
"""Instantiate the chosen input channel."""
print(channel, credentials_file, "create_http son2")
if channel is None or channel in rasa_core.channels.BUILTIN_CHANNELS:
return _create_external_channels(channel, credentials_file)
else:
try:
c = utils.class_from_module_path(channel)
return [c()]
except Exception:
raise Exception("Unknown input channel for running main.")
def start_cmdline_io(server_url, on_finish, **kwargs):
kwargs["server_url"] = server_url
kwargs["on_finish"] = on_finish
p = Thread(target=console.record_messages,
kwargs=kwargs)
p.start()
def start_server(input_channels,
cors,
auth_token,
port,
initial_agent,
enable_api=True):
"""Run the agent."""
if enable_api:
app = server.create_app(initial_agent,
cors_origins=cors,
auth_token=auth_token)
else:
app = Flask(__name__)
CORS(app, resources={r"/*": {"origins": cors or ""}})
if input_channels:
rasa_core.channels.channel.register(input_channels,
app,
initial_agent.handle_message,
route="/webhooks/")
if logger.isEnabledFor(logging.DEBUG):
utils.list_routes(app)
http_server = WSGIServer(('0.0.0.0', port), app)
logger.info("Rasa Core server is up and running on "
"{}".format(constants.DEFAULT_SERVER_URL))
http_server.start()
return http_server
def serve_application(initial_agent,
channel=None,
port=constants.DEFAULT_SERVER_PORT,
credentials_file=None,
cors=None,
auth_token=None,
enable_api=True
):
print(channel, credentials_file, "server_app son")
input_channels = create_http_input_channels(channel, credentials_file)
http_server = start_server(input_channels, cors, auth_token,
port, initial_agent, enable_api)
if channel == "cmdline":
start_cmdline_io(constants.DEFAULT_SERVER_URL, http_server.stop)
try:
http_server.serve_forever()
except Exception as exc:
logger.exception(exc)
def load_agent(core_model, interpreter, endpoints,
tracker_store=None,
wait_time_between_pulls=100):
if endpoints.model:
return agent.load_from_server(
interpreter=interpreter,
generator=endpoints.nlg,
action_endpoint=endpoints.action,
model_server=endpoints.model,
tracker_store=tracker_store,
wait_time_between_pulls=wait_time_between_pulls
)
else:
return Agent.load(core_model,
interpreter=interpreter,
generator=endpoints.nlg,
tracker_store=tracker_store,
action_endpoint=endpoints.action)
if __name__ == '__main__':
# Running as standalone python application
arg_parser = create_argument_parser()
cmdline_args = arg_parser.parse_args()
logging.getLogger('werkzeug').setLevel(logging.WARN)
logging.getLogger('matplotlib').setLevel(logging.WARN)
utils.configure_colored_logging(cmdline_args.loglevel)
utils.configure_file_logging(cmdline_args.loglevel,
cmdline_args.log_file)
logger.info("Rasa process starting")
_endpoints = read_endpoints(cmdline_args.endpoints)
_interpreter = NaturalLanguageInterpreter.create(cmdline_args.nlu,
_endpoints.nlu)
_agent = load_agent(cmdline_args.core,
interpreter=_interpreter,
endpoints=_endpoints)
serve_application(_agent,
cmdline_args.connector,
cmdline_args.port,
cmdline_args.credentials,
cmdline_args.cors,
cmdline_args.auth_token,
cmdline_args.enable_api)
|
import os
import tornado.web
import tornado.ioloop
from tornado.options import define, options, parse_command_line
from tornado_face.face.views import RegisterHandler, InitDbHandler, LoginbHandler
define('port', default=8090, type=int)
def make_app():
return tornado.web.Application(handlers=[
(r'/register/', RegisterHandler),
(r'/init_db/', InitDbHandler),
(r'/login/', LoginbHandler),
],
template_path=os.path.join(os.path.dirname(os.path.abspath(__file__)), 'templates'),
static_path=os.path.join(os.path.dirname(os.path.abspath(__file__)), 'static'),
)
if __name__ == '__main__':
parse_command_line()
app = make_app()
app.listen(options.port)
tornado.ioloop.IOLoop.current().start()
|
import cv2
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import glob
from IPython.display import clear_output
from numpy import expand_dims
from keras.preprocessing.image import load_img
from keras.preprocessing.image import img_to_array
from keras.preprocessing.image import ImageDataGenerator
from matplotlib import pyplot
def show_img(img, bigger=False):
if bigger:
plt.figure(figsize=(10,10))
image_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
plt.imshow(image_rgb)
plt.show()
def sharpen(img, sigma=200):
# sigma = 5、15、25
blur_img = cv2.GaussianBlur(img, (0, 0), sigma)
usm = cv2.addWeighted(img, 1.5, blur_img, -0.5, 0)
return usm
# sharpen
def img_processing(img):
# do something here
img = sharpen(img)
return img
# like sharpen
def enhance_details(img):
hdr = cv2.detailEnhance(img, sigma_s=12, sigma_r=0.15)
return hdr
# restoring models
def edsr(origin_img):
sr = cv2.dnn_superres.DnnSuperResImpl_create()
path = "EDSR_x4.pb"
sr.readModel(path)
sr.setModel("edsr",4)
result = sr.upsample(origin_img)
return result
def espcn(origin_img):
sr = cv2.dnn_superres.DnnSuperResImpl_create()
path = "ESPCN_x4.pb"
sr.readModel(path)
sr.setModel("espcn",4)
result = sr.upsample(origin_img)
return result
def fsrcnn(origin_img):
sr = cv2.dnn_superres.DnnSuperResImpl_create()
path = "FSRCNN_x4.pb"
sr.readModel(path)
sr.setModel("fsrcnn",4)
result = sr.upsample(origin_img)
return result
def lapsrn(origin_img):
sr = cv2.dnn_superres.DnnSuperResImpl_create()
path = "LapSRN_x4.pb"
sr.readModel(path)
sr.setModel("lapsrn",4)
result = sr.upsample(origin_img)
return result
def uint_to_float(img, method='NTSC'):
img = img.astype(np.float32) / 255
b,g,r = cv2.split(img)
if method == 'average':
gray = (r + g + b) / 3
elif method == 'NTSC':
gray = 0.2989*r + 0.5870*g + 0.1140*b
#gray = (gray*255).astype('uint8')
return gray |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from key_door import constants, wrapper
try:
import cv2
import matplotlib
from matplotlib import cm
from matplotlib import pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
except:
raise AssertionError(
"To use visualisation wrapper, further package requirements need to be satisfied. Please consult README."
)
class VisualisationEnv(wrapper.Wrapper):
COLORMAP = cm.get_cmap("plasma")
def __init__(self, env):
super().__init__(env=env)
def render(
self,
save_path: Optional[str] = None,
dpi: Optional[int] = 60,
format: str = "state",
) -> None:
if format == constants.STATE:
assert (
self._env.active
), "To render map with state, environment must be active."
"call reset_environment() to reset environment and make it active."
"Else render stationary environment skeleton using format='stationary'"
if save_path:
fig = plt.figure()
plt.imshow(
self._env._env_skeleton(
rewards=format, keys=format, doors=format, agent=format
),
origin="lower",
)
fig.savefig(save_path, dpi=dpi)
else:
plt.imshow(
self._env.env_skeleton(
rewards=format, keys=format, doors=format, agent=format
),
origin="lower",
)
def visualise_episode_history(
self, save_path: str, history: Union[str, List[np.ndarray]] = "train"
) -> None:
"""Produce video of episode history.
Args:
save_path: name of file to be saved.
history: "train", "test" to plot train or test history, else provide an independent history.
"""
if isinstance(history, str):
if history == constants.TRAIN:
history = self._env.train_episode_history
elif history == constants.TEST:
history = self._env.test_episode_history
elif history == constants.TRAIN_PARTIAL:
history = self._env.train_episode_partial_history
elif history == constants.TEST_PARTIAL:
history = self._env.test_episode_partial_history
SCALING = 20
FPS = 30
map_shape = history[0].shape
frameSize = (SCALING * map_shape[1], SCALING * map_shape[0])
out = cv2.VideoWriter(
filename=save_path,
fourcc=cv2.VideoWriter_fourcc("m", "p", "4", "v"),
fps=FPS,
frameSize=frameSize,
)
for frame in history:
bgr_frame = frame[..., ::-1].copy()
flipped_frame = np.flip(bgr_frame, 0)
scaled_up_frame = np.kron(flipped_frame, np.ones((SCALING, SCALING, 1)))
out.write((scaled_up_frame * 255).astype(np.uint8))
out.release()
def plot_heatmap_over_env(
self,
heatmap: Dict[Tuple[int, int], float],
fig: Optional[matplotlib.figure.Figure] = None,
ax: Optional[matplotlib.axes.Axes] = None,
save_name: Optional[str] = None,
) -> None:
assert (
ax is not None and fig is not None
) or save_name is not None, "Either must provide axis to plot heatmap over,"
"r file name to save separate figure."
environment_map = self._env._env_skeleton(
rewards=None, keys=None, doors=None, agent=None
)
all_values = list(heatmap.values())
current_max_value = np.max(all_values)
current_min_value = np.min(all_values)
for position, value in heatmap.items():
# remove alpha from rgba in colormap return
# normalise value for color mapping
environment_map[position[::-1]] = self.COLORMAP(
(value - current_min_value) / (current_max_value - current_min_value)
)[:-1]
fig = plt.figure()
if save_name is not None:
plt.imshow(environment_map, origin="lower", cmap=self.COLORMAP)
plt.colorbar()
fig.savefig(save_name, dpi=60)
else:
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
im = ax.imshow(environment_map, origin="lower", cmap=self.COLORMAP)
fig.colorbar(im, ax=ax, cax=cax, orientation="vertical")
plt.close()
|
# @file: Individual that contains the 'DNA' for each individual in the population
# @author: Daniel Yuan
from random import randint, uniform
class Individual(object):
def __init__(self, vector, search_range, dimension=None):
# Ensure that Indidual has
assert(search_range and (vector or dimension))
self.search_range = search_range
if vector:
self.vector = vector
self.dimension = len(vector)
else:
self.dimension = dimension
self.vector = self._generate_random_vector()
def get_vector(self):
return self.vector
def crossover(self, other_vector, mutation_rate):
if self.dimension is not other_vector.dimension:
raise Exception('Trying to cross over vectors of different dimensions')
cross_index = randint(0, self.dimension - 1)
if uniform(0, 1) > 0.5:
first_vector = self.vector
second_vector = other_vector.vector
else:
first_vector = other_vector.vector
second_vector = self.vector
new_vector = []
for i in range(self.dimension):
should_mutate = uniform(0, 1) <= mutation_rate
value = 0
if should_mutate:
value = uniform(self.search_range.get_min(), self.search_range.get_max())
elif i < cross_index:
value = first_vector[i]
else:
value = second_vector[i]
new_vector.append(value)
return Individual(new_vector, self.search_range)
def get_fitness(self, problem):
self.fitness = problem.eval(self.vector)
return self.fitness
def _generate_random_vector(self):
vector = []
for _ in range(self.dimension):
min_value = self.search_range.get_min()
max_value = self.search_range.get_max()
vector.append(uniform(min_value, max_value))
return vector
|
#!/usr/bin/python
#coding=utf8
import pandas as pd
def readBigData(filePath,delim,header=None):
reader = pd.read_csv(filePath,header=None,delimiter=delim, iterator=True)
loop = True
chunkSize = 100000
chunks = []
while loop:
try:
chunk = reader.get_chunk(chunkSize)
chunks.append(chunk)
except StopIteration:
loop = False
print "Iteration is stopped."
df = pd.concat(chunks, ignore_index=True)
return df
|
#!/usr/bin/env python
def gcd_mod(a, b):
'''
Divison based version.
From: The Art of Computer Programming, Volume 2, pages 319-320
'''
while b > 1:
# t holds the value of b while the next remainder b is being calculated.
t = b
b = a % b
a = t
return a
def gcd_sub(a, b):
'''
Subtraction based version.
From: The Art of Computer Programming, Volume 2, pages 318-319
'''
while a != b:
if a > b:
a = a - b
else:
b = b - a
return a
def gcd_recursive(a, b):
'''
Recursive based version.
From: Numbers and Geometry, Stillwell, page 14
'''
if b == 0:
return a
else:
return gcd_recursive(b, a % b)
|
#!/usr/bin/env python3
'''
This script ...
'''
import argparse
# https://docs.python.org/3/library/argparse.html
import itertools
import os
import screed
import networkx as nx
from tqdm import tqdm
from panprimer.utils import \
load_graph, \
mask_nodes, \
extract_color, \
neighbors, \
orient_nodes_on_genome
from panprimer.wrappers import design_primers
parser = argparse.ArgumentParser(
description='Find primers in genome graphs')
parser.add_argument('--maxlen', default=4000, type=int,
help='Maximum len of PCR product')
parser.add_argument('--minlen', default=400, type=int,
help='Minimum len of PCR product')
parser.add_argument('-n', default=10, type=int,
help='Number of candidate primer pairs in result (best first)')
parser.add_argument('--outfile', default='primers.csv',
help='Where to store primers')
parser.add_argument('--graph', required=True,
help='Genome graph with color annotation [.gfa]')
parser.add_argument('--index', required=True,
help='Index of the color annotation')
parser.add_argument('--pattern', required=True,
help='Which genomes to include and which to exclude (0, 1) [.csv]')
parser.add_argument('--debug', action='store_true',
help='Store some debugging logs')
parser.add_argument('--genome', required=True,
help='Genome to validate primer positions')
parser.add_argument('--maxpairs', required=True,
help='Maximum primer site pairs before we stop the recursion madness')
args = parser.parse_args()
maxlen = args.maxlen
minlen = args.minlen
limits = [minlen, maxlen]
npairs = args.n
outfile = args.outfile
fp_pattern = args.pattern
fp_gfa = args.graph
fp_ix = args.index
genome = args.genome
max_pairs = int(args.maxpairs)
'''
Each node in the genome graph has a set of colors, which means that the
represented sequence of DNA is present in this subset of organsims. We want
to be able to assing genomes into an include and exclude set (0 and 1,
respectively). We do this through a pattern (mask) on the color index of the
genome graph. E.g.
[1, 0, 1, 0] in a genome graph w/ 4 genomes means that we only want to find
primers for the 1st and 3rd genome, that are NOT present on the 2nd and 4th.
'''
is_included_genome = {}
with open(fp_pattern, 'r') as patternfile:
for line in patternfile:
binary, path = line.strip().split(',')
is_included_genome[os.path.basename(path)] = int(binary)
with open(fp_ix, 'r') as ixfile:
files = [os.path.basename(i) for i in next(ixfile).split(' ')][:-1]
# -1 .. last entry is a space so last item is '' (empty)
pattern = [is_included_genome[i] for i in files]
if args.debug:
with open('log', 'w+') as log:
log.write(' '.join([str(i) for i in pattern]))
# Parse .gfa
print('Loading graph ...')
G, strands = load_graph(fp_gfa)
# Return candidate nodes, defined as nodes through which all genomes in the
# "include" set pass, but nodes in the "exclude" set don't.
candidates = mask_nodes(G, pattern)
# print(f'{len(candidates)} candidate nodes match pattern')
d = {}
print('Checking candidates against colors ...')
'''
This step looks through the candidates and checks whether they are suitable for
a PCR. For example: Are two nodes close enough so that a polymerase can span
the distance between them for all genomes in the include set?
We also make sure that we can traverse the graph from primer 1 to primer 2
using each color, ie that this path exists for all genomes in the include set.
'''
for color in tqdm(range(len(pattern))):
if pattern[color] != 1:
# Discard color bc/ it is not in the "include" set
# TODO: We could generalize this to n primer sets, e.g. for 3 species
continue
# Extract all nodes that are traversed by a single genome
# G_ = extract_color(G, i)
pairs, singletons = set(), set()
# TODO: Stop if a sufficient number of candidates have been found
for node in candidates:
# If there are few genomes to exclude, most nodes are potential
# primer sites. So we cap them here to not search forever.
if len(pairs) >= max_pairs:
continue
seq = G.nodes[node]['sequence']
# If node is of sufficient length save. We can probably find 2
# primers on this segment. If not ...
if len(seq) > minlen:
# TODO: only include the singleton if found in all colors
singletons.add(node)
continue
# ... traverse its neighborhood to see if any other candidate is
# reachible. The polymerase can only span so many nucleotides.
for n in neighbors(G, node, color, limits):
if (n != node) and (n in candidates):
pairs.add(tuple(sorted([node, n])))
# sorted .. (1, 3) is the same as (3, 1) to us
# tuple .. hashable, so we can call set() on it
d[color] = pairs
# Make sure that 0'ed genomes do not appear in results
d = {k: v for k, v in d.items() if pattern[k] == 1}
# https://stackoverflow.com/questions/30773911/union-of-multiple-sets-in-python
valid_pairs = list(set.intersection(*map(set, d.values())))
print(f'Found {len(valid_pairs) + len(singletons)} potential primer region(s)')
sequences = [(G.nodes[n]['sequence'], 'singleton', [n]) for n in singletons]
# Now we make sure the sequence between valid pairs is not too short and on
# the same strand (we'll glue them together later in a synthetic contig to
# feed into primer3, see below).
for i, j in valid_pairs:
'''
See how the path leaving from one primer unitig can be - and +,
depending on the path:
grep "2428690" pangenome.gfa
S 2428690 GGATGTTAA... DA:Z:2
L 1686469 + 2428690 + 30M
L 1686484 - 2428690 - 30M
L 2428690 - 1686469 - 30M
L 2428690 + 1686484 + 30M
'''
si = G.nodes[i]['sequence']
sj = G.nodes[j]['sequence']
# For primer3 design we need sequence fragments on the same strand,
# see below.
with screed.open(genome) as file:
for read in file:
try:
seq = orient_nodes_on_genome(read.sequence, si, sj, minlen)
except KeyError:
continue
# if not oss:
# sj = screed.rc(sj)
# if invert:
# si, sj = sj, si
# Create an artificial contig by connecting the two fragments w/ some Ns
# TODO: Potential error here! si and sj need to be in the same 5'-3' order
# as in the original sequence otherwise we create an artificial inversion
# (and the PCR won't work bc/ the primers' 3' ends "point away from one
# another").
# seq = si + 'N' * minlen + sj
sequences.append((seq, 'pair', [i, j])) # i, j are nodes in the graph
# TODO: Check that the primers are unique in the genome. Or make this an option.
# TODO: separate script that shows insert size distribution and annotation and
# coordinates spanned by the primers.
print('Designing primers ...')
designs = []
for seq, type_, nodes in tqdm(sequences):
singleton = True if type_ == 'singleton' else False
try:
u = design_primers(seq, singleton, minlen=minlen, maxlen=maxlen)
v = '::'.join([str(n) for n in nodes])
_ = [i.append(v) for i in u]
designs.append(*u)
except (KeyError, OSError) as e: # no valid primers found
continue
# [['ATCACTGATGGATTTGACGT', 'TACCCCAAAATGGCTAGAAC', 54.76, 55.01, 'NA', 0.251, '4324814::4324817']]
# [['AGGTTGTGTGGTTCGAATC', 'AAGCGGAGATCATACCCTTA', 55.45, 55.41, 'NA', 1.8569, '4324789::4324793']]
# Sort inplace by penalty value, smallest to largest
designs.sort(key=lambda x: x[5])
# print(designs[:10])
'''
The case occurs where a singleton can also be reached from a smaller fragment, ie it ends up in a "valid pair". Then primer3 finds the best primer in the singleton, but bc/ it is part of a pair "NA" is written as the PCR product length.
['GCCTGTT...', 'CCCGAGC...', 54.99, 54.99, 465, 0.0146, '152624'],
['GCCTGTT...', 'CCCGAGC...', 54.99, 54.99, 'NA', 0.0146, '152624,152646'],
This can happen multiple times, if a singleton is part of more than one valid pair:
['GCCTGTT...', 'CCCGAGC...', 54.99, 54.99, 'NA', 0.0146, '152624,152646'],
['GCCTGTT...', 'CCCGAGC...', 54.99, 54.99, 'NA', 0.0146, '152624,154425'],
'''
# Deduplicate
result = {}
# Note that the results are sorted by penalty bc/ adding to dict since Python
# v3.6 preserves order.
for i, j, *rest, nodes in designs:
p = tuple(sorted([i, j])) # p .. pair
if len(nodes.split('::')) == 1: # singleton
result[p] = [i, j, *rest, nodes]
else:
# If there is no entry for the primer pair from a singleton, then add
try:
_ = result[p]
except KeyError:
result[p] = [i, j, *rest, nodes]
print(f'Found {len(result)} primer pair(s), will save the best {npairs if npairs < len(result) else len(result)}.')
with open(outfile, 'w+') as out:
out.write('fwd,rev,Tm fwd,Tm rev,product,penalty,nodes\n') # header
for v in itertools.islice(result.values(), npairs):
out.write(','.join(str(i) for i in v) + '\n')
|
class Solution(object):
def missingNumber(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
# 0,1,...,n-1,n,sum of arrary (if no missing number) will be sum = n*(n+1)/2
# delete all existing nums will leave the number that is missing from the array
n = len(nums)
sum = n*(n+1)/2
for num in nums:
sum -= num
return sum
# Note: This solution has an issue of over flow, if n is very large!
# XOR is a better choice |
from .tikzeng import *
#define new block
def block_2ConvPool( name, botton, top, s_filer=256, n_filer=64, offset="(1,0,0)", size=(32,32,3.5), opacity=0.5 ):
return [
to_ConvConvRelu(
name="ccr_{}".format( name ),
s_filer=str(s_filer),
n_filer=(n_filer,n_filer),
offset=offset,
to="({}-east)".format( botton ),
width=(size[2],size[2]),
height=size[0],
depth=size[1],
),
to_Pool(
name="{}".format( top ),
offset="(0,0,0)",
to="(ccr_{}-east)".format( name ),
width=1,
height=size[0] - int(size[0]/4),
depth=size[1] - int(size[0]/4),
opacity=opacity, ),
to_connection(
"{}".format( botton ),
"ccr_{}".format( name )
)
]
def block_Unconv( name, botton, top, s_filer=256, n_filer=64, offset="(1,0,0)", size=(32,32,3.5), opacity=0.5 ):
return [
to_UnPool( name='unpool_{}'.format(name), offset=offset, to="({}-east)".format(botton), width=1, height=size[0], depth=size[1], opacity=opacity ),
to_ConvRes( name='ccr_res_{}'.format(name), offset="(0,0,0)", to="(unpool_{}-east)".format(name), s_filer=str(s_filer), n_filer=str(n_filer), width=size[2], height=size[0], depth=size[1], opacity=opacity ),
to_Conv( name='ccr_{}'.format(name), offset="(0,0,0)", to="(ccr_res_{}-east)".format(name), s_filer=str(s_filer), n_filer=str(n_filer), width=size[2], height=size[0], depth=size[1] ),
to_ConvRes( name='ccr_res_c_{}'.format(name), offset="(0,0,0)", to="(ccr_{}-east)".format(name), s_filer=str(s_filer), n_filer=str(n_filer), width=size[2], height=size[0], depth=size[1], opacity=opacity ),
to_Conv( name='{}'.format(top), offset="(0,0,0)", to="(ccr_res_c_{}-east)".format(name), s_filer=str(s_filer), n_filer=str(n_filer), width=size[2], height=size[0], depth=size[1] ),
to_connection(
"{}".format( botton ),
"unpool_{}".format( name )
)
]
def block_Res( num, name, botton, top, s_filer=256, n_filer=64, offset="(0,0,0)", size=(32,32,3.5), opacity=0.5 ):
lys = []
layers = [ *[ '{}_{}'.format(name,i) for i in range(num-1) ], top]
for name in layers:
ly = [ to_Conv(
name='{}'.format(name),
offset=offset,
to="({}-east)".format( botton ),
s_filer=str(s_filer),
n_filer=str(n_filer),
width=size[2],
height=size[0],
depth=size[1]
),
to_connection(
"{}".format( botton ),
"{}".format( name )
)
]
botton = name
lys+=ly
lys += [
to_skip( of=layers[1], to=layers[-2], pos=1.25),
]
return lys
|
import sys
class MyStack:
"""push-down スタッククラス"""
def __init__(self, stacksize):
"""コンストラクタ(初期化)"""
# フィールド
self.mystack = [ ' ' for i in range(stacksize) ]
self.top = -1;
def pushdown(self, data):
"""プッシュダウン"""
self.top += 1
if (self.top > len(self.mystack)):
print('stack overflow', file=sys.stderr)
sys.exit(0)
self.mystack[self.top] = data
def popup(self):
"""ポップアップ"""
if (self.top < 0):
print('popup from empty stack', file=sys.stderr)
sys.exit(0)
self.top -= 1
return self.mystack[self.top+1]
def isempty(self):
"""空か否か"""
if (self.top < 0):
return True
else:
return False
|
"""Merkle Tree backed by LevelDB.
Operates (and owns) a LevelDB database of leaves which can be updated.
"""
import plyvel
import math
import struct
import merkle
def _down_to_power_of_two(n):
"""Returns the power-of-2 closest to n."""
if n < 2:
raise ValueError("N should be >= 2: %d" % n)
log_n = math.log(n, 2)
p = int(log_n)
# If n is exactly power of 2 then 2**p would be n, decrease p by 1.
if p == log_n:
p -= 1
return 2**p
def encode_int(n):
"""Encode an integer into a big-endian bytestring."""
return struct.pack(">I", n)
def decode_int(n):
"""Decode a big-endian bytestring into an integer."""
return stuct.unpack(">I", n)[0]
class LeveldbMerkleTree(object):
"""LevelDB Merkle Tree representation."""
def __init__(self, leaves=None, db="./merkle_db", leaves_db_prefix='leaves-', index_db_prefix='index-', stats_db_prefix='stats-'):
"""Start with the LevelDB database of leaves provided."""
self.__hasher = IncrementalTreeHasher()
self.__db = plyvel.DB(db, create_if_missing=True)
self.__leaves_db_prefix = leaves_db_prefix
self.__index_db_prefix = index_db_prefix
self.__stats_db_prefix = stats_db_prefix
self.__leaves_db = self.__db.prefixed_db(leaves_db_prefix)
self.__index_db = self.__db.prefixed_db(index_db_prefix)
self.__stats_db = self.__db.prefixed_db(stats_db_prefix)
if leaves is not None:
self.extend(leaves)
def close(self):
self.__db.close()
@property
def tree_size(self):
return int(self.__stats_db.get('tree_size', default='0'))
@property
def sha256_root_hash(self):
return self.get_root_hash()
@property
def leaves_db_prefix(self):
return self.__leaves_db_prefix
@property
def index_db_prefix(self):
return self.__index_db_prefix
@property
def stats_db_prefix(self):
return self.__stats_db_prefix
def get_leaf(self, leaf_index):
"""Get the leaf at leaf_index."""
return self.__leaves_db.get(encode_int(leaf_index))
def get_leaves(self, start=0, stop=None):
"""Get leaves from the range [start, stop)."""
if stop is None:
stop = self.tree_size
return [l for l in self.__leaves_db.iterator(start=encode_int(start), stop=encode_int(stop), include_key=False)]
def add_leaf(self, leaf):
"""Adds |leaf| to the tree, returning the index of the entry."""
cur_tree_size = self.tree_size
leaf_hash = self.__hasher.hash_leaf(leaf)
with self.__db.write_batch() as wb:
wb.put(self.__leaves_db_prefix + encode_int(cur_tree_size), leaf_hash)
wb.put(self.__index_db_prefix + leaf_hash, encode_int(cur_tree_size))
wb.put(self.__stats_db_prefix + 'tree_size', str(cur_tree_size + 1))
return cur_tree_size
def extend(self, new_leaves):
"""Extend this tree with new_leaves on the end."""
cur_tree_size = self.tree_size
leaf_hashes = [self.__hasher.hash_leaf(l) for l in new_leaves]
with self.__db.write_batch() as wb:
for lf in leaf_hashes:
wb.put(self.__leaves_db_prefix + encode_int(cur_tree_size), lf)
wb.put(self.__index_db_prefix + lf, encode_int(cur_tree_size))
cur_tree_size += 1
wb.put(self.__stats_db_prefix + 'tree_size', str(cur_tree_size))
def get_leaf_index(self, leaf_hash):
"""Returns the index of the leaf hash, or -1 if not present."""
raw_index = self.__index_db.get(leaf_hash)
if raw_index:
return decode_int(raw_index)
else:
return -1
def get_root_hash(self, tree_size=None):
"""Returns the root hash of the tree denoted by |tree_size|."""
if tree_size is None:
tree_size = self.tree_size
if tree_size > self.tree_size:
raise ValueError("Specified size beyond known tree: %d" % tree_size)
return self.__hasher.hash_full_tree(self.get_leaves(stop=tree_size))
def _calculate_subproof(self, m, leaves, complete_subtree):
"""SUBPROOF, see RFC6962 section 2.1.2."""
n = len(leaves)
if m == n or n == 1:
if complete_subtree:
return []
else:
return [self.__hasher.hash_full_tree(leaves)]
k = _down_to_power_of_two(n)
if m <= k:
node = self.__hasher.hash_full_tree(leaves[k:n])
res = self._calculate_subproof(m, leaves[0:k], complete_subtree)
else:
# m > k
node = self.__hasher.hash_full_tree(leaves[0:k])
res = self._calculate_subproof(m - k, leaves[k:n], False)
res.append(node)
return res
def get_consistency_proof(self, tree_size_1, tree_size_2=None):
"""Returns a consistency proof between two snapshots of the tree."""
if tree_size_2 is None:
tree_size_2 = self.tree_size
if tree_size_1 > self.tree_size or tree_size_2 > self.tree_size:
raise ValueError("Requested proof for sizes beyond current tree:"
" current tree: %d tree_size_1 %d tree_size_2 %d" % (
self.tree_size, tree_size_1, tree_size_2))
if tree_size_1 > tree_size_2:
raise ValueError("tree_size_1 must be less than tree_size_2")
if tree_size_1 == tree_size_2 or tree_size_1 == 0:
return []
return self._calculate_subproof(
tree_size_1, self.get_leaves(stop=tree_size_2), True)
def _calculate_inclusion_proof(self, leaves, leaf_index):
"""Merkle audit path, RFC6962 Section 2.1.1."""
n = len(leaves)
if n == 0 or n == 1:
return []
k = _down_to_power_of_two(n)
m = leaf_index
if m < k:
mth_k_to_n = self.__hasher.hash_full_tree(leaves[k:n])
path = self._calculate_inclusion_proof(leaves[0:k], m)
path.append(mth_k_to_n)
else:
mth_0_to_k = self.__hasher.hash_full_tree(leaves[0:k])
path = self._calculate_inclusion_proof(leaves[k:n], m - k)
path.append(mth_0_to_k)
return path
def get_inclusion_proof(self, leaf_index, tree_size=None):
"""Returns an inclusion proof for leaf at |leaf_index|."""
if tree_size is None:
tree_size = self.tree_size
if tree_size > self.tree_size:
raise ValueError("Specified tree size is beyond known tree: %d" %
tree_size)
if leaf_index >= self.tree_size:
raise ValueError("Requested proof for leaf beyond tree size: %d" %
leaf_index)
return self._calculate_inclusion_proof(
self.get_leaves(stop=tree_size), leaf_index)
def __repr__(self):
return "%s(%r)" % (self.__class__.__name__, self.__hasher)
class IncrementalTreeHasher(merkle.TreeHasher):
def _hash_full(self, leaves, l_idx, r_idx):
"""Hash the leaves between (l_idx, r_idx) as a valid entire tree.
Note that this is only valid for certain combinations of indexes,
depending on where the leaves are meant to be located in a parent tree.
Returns:
(root_hash, hashes): where root_hash is that of the entire tree,
and hashes are that of the full (i.e. size 2^k) subtrees that form
the entire tree, sorted in descending order of size.
"""
width = r_idx - l_idx
if width < 0 or l_idx < 0 or r_idx > len(leaves):
raise IndexError("%s,%s not a valid range over [0,%s]" % (
l_idx, r_idx, len(leaves)))
elif width == 0:
return self.hash_empty(), ()
elif width == 1:
leaf_hash = leaves[l_idx]
return leaf_hash, (leaf_hash,)
else:
# next smallest power of 2
split_width = 2**((width - 1).bit_length() - 1)
assert split_width < width <= 2*split_width
l_root, l_hashes = self._hash_full(leaves, l_idx, l_idx+split_width)
assert len(l_hashes) == 1 # left tree always full
r_root, r_hashes = self._hash_full(leaves, l_idx+split_width, r_idx)
root_hash = self.hash_children(l_root, r_root)
return (root_hash, (root_hash,) if split_width*2 == width else
l_hashes + r_hashes)
|
#!/usr/bin/python
# coding=utf-8
print('{0:.3f}'.format(1.0/3))
print('{0:_^11}'.format('kai'))
print(r'\'{name}\' wrote\n "{book}"'.format(name='kai', book='good book'))
# print('kai', end=' ') //python3
# print('is')
# print('good man')
s = '''This is Multi Line String
Second Line
Third Line'''
print(s)
number = 23
guess = int(input('input a number: '))
if guess == number:
print ('you guess it')
elif guess < number:
print ('oh, small')
else:
print ("ok great than {}".format(number))
print ('Done.')
running = True
while running:
guess = int(input('input a number: '))
if guess == number:
print ('you guess it')
running = False;
else:
print ("error, again.")
else:
print ('while end')
for i in range(1, 9, 2):
if i >= 7:
print ('i >= 7')
break #有break 那么else不会执行了
elif i == 5:
print("i==5")
continue
else:
print (i)
else:
print('for end')
|
import constants as c
import logging
import rosbag
import os
import multiprocessing
topics_to_check = set([
'/TactileSensor4/Accelerometer',
'/TactileSensor4/Dynamic',
'/TactileSensor4/EulerAngle',
'/TactileSensor4/Gyroscope',
'/TactileSensor4/Magnetometer',
'/TactileSensor4/StaticData',
'/anomaly_detection_signal',
'/robot/limb/right/endpoint_state',
'/robotiq_force_torque_wrench'])
def check_bag(folder_path):
bag_path = os.path.join(folder_path, 'record.bag')
if not os.path.isfile(bag_path):
return (False, "not os.path.isfile(bag_path)")
with open(bag_path, 'rb') as bag_f:
bag = rosbag.Bag(bag_f)
type_info, topic_info = bag.get_type_and_topic_info()
topics_contained = set(topic_info.keys())
if not topics_to_check.issubset(topics_contained):
return (False, "not topics_to_check.issubset(topics_contained)")
signals = []
for topic_name, msg, gen_time in bag.read_messages(topics=["/anomaly_detection_signal"]):
if len(signals) == 0:
signals.append(msg)
else:
time_diff = (msg.stamp-prev_msg.stamp).to_sec()
if time_diff > 1:
signals.append(msg)
elif time_diff < 0:
raise Exception("Weird error: messages read from rosbag are not in time-increasing order")
prev_msg = msg
label_path = os.path.join(folder_path, 'anomaly_labels.txt')
if len(signals) == 0:
if os.path.isfile(label_path):
return (False, "len(signals) == 0 but os.path.isfile(label_path)")
else:
if not os.path.isfile(label_path):
return (False, "not len(signals) == 0 but not os.path.isfile(label_path)")
with open(label_path, 'r') as label_f:
label_amount = len([i for i in label_f.readlines() if i.strip() != ""])
if label_amount != len(signals):
return (False, "label_amount %s != len(signals) %s"%(label_amount, len(signals)))
return (True, None)
def run():
logger = logging.getLogger("filter_bad_bags")
logger.info("Filtering bad bags")
pool = multiprocessing.Pool()
async_results = []
with open(c.rosbag_folder_names_txt, 'r') as txt:
for line in txt:
async_result = pool.apply_async(check_bag, args=(line.strip(),))
async_results.append((line.strip(), async_result))
with open(c.good_rosbag_folder_names_txt, 'w') as good_txt,\
open(c.bad_rosbag_folder_names_txt, 'w') as bad_txt:
for line, async_result in async_results:
is_good, debug_info = async_result.get()
if is_good == True:
good_txt.write(line)
good_txt.write('\n')
else:
bad_txt.write(line)
bad_txt.write(" ")
bad_txt.write(debug_info)
bad_txt.write('\n')
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
run()
|
import setuptools
__version__ = '1.0'
setuptools.setup(
name="msfbe",
version=__version__,
url="https://github-fn.jpl.nasa.gov/methanesourcefinder/msf-be.git",
author="Caltech/Jet Propulsion Laboratory",
description="MSF-BE API.",
long_description=open('README.md').read(),
package_dir={'':'src'},
packages=['msfbe', 'msfbe.handlers'],
package_data={'msfbe': ['config.ini']},
data_files=[
],
platforms='any',
install_requires=[
'tornado',
'numpy',
'singledispatch',
'pytz',
'requests',
'utm',
'shapely==1.7.1',
'mock',
'backports.functools-lru-cache==1.3',
'boto3==1.15.17',
'pillow==5.0.0',
'psycopg2==2.8.6',
'six',
'psutil'
],
classifiers=[
'Development Status :: 1 - Pre-Alpha',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
]
)
|
#!/usr/bin/env python
# -*- coding: iso-8859-15 -*-
# -*- Mode: python -*-
from lib.common import *
################################################################
# Code borrowed and adapted from Impacket's rpcdump.py example #
################################################################
class RpcDump(object):
def __init__(self):
pass
def rpcdump(self):
logger.info('Retrieving RPC endpoint list')
self.__rpc_connect()
entries = self.__fetchList()
endpoints = {}
# Let's groups the UUIDS
for entry in entries:
binding = epm.PrintStringBinding(entry['tower']['Floors'], self.trans.get_dip())
tmpUUID = str(entry['tower']['Floors'][0])
if endpoints.has_key(tmpUUID) is not True:
endpoints[tmpUUID] = {}
endpoints[tmpUUID]['Bindings'] = list()
if ndrutils.KNOWN_UUIDS.has_key(uuid.uuidtup_to_bin(uuid.string_to_uuidtup(tmpUUID))[:18]):
endpoints[tmpUUID]['EXE'] = ndrutils.KNOWN_UUIDS[uuid.uuidtup_to_bin(uuid.string_to_uuidtup(tmpUUID))[:18]]
else:
endpoints[tmpUUID]['EXE'] = 'N/A'
endpoints[tmpUUID]['annotation'] = entry['annotation'][:-1]
endpoints[tmpUUID]['Bindings'].append(binding)
if epm.KNOWN_PROTOCOLS.has_key(tmpUUID[:36]):
endpoints[tmpUUID]['Protocol'] = epm.KNOWN_PROTOCOLS[tmpUUID[:36]]
else:
endpoints[tmpUUID]['Protocol'] = 'N/A'
#print 'Transfer Syntax: %s' % entry['Tower']['Floors'][1]
for endpoint in endpoints.keys():
print 'Protocol: %s ' % endpoints[endpoint]['Protocol']
print 'Provider: %s ' % endpoints[endpoint]['EXE']
print 'UUID : %s %s' % (endpoint, endpoints[endpoint]['annotation'])
print 'Bindings: '
for binding in endpoints[endpoint]['Bindings']:
print ' %s' % binding
print
if entries:
num = len(entries)
if 1 == num:
logger.info('Received one RPC endpoint')
else:
logger.info('Received %d endpoints' % num)
else:
logger.info('No endpoints found')
def __rpc_connect(self):
'''
Connect to epmapper named pipe
'''
logger.debug('Connecting to the epmapper named pipe')
self.smb_transport('epmapper')
self.__dce = self.trans.get_dce_rpc()
self.__dce.connect()
#self.__dce.set_auth_level(ntlm.NTLM_AUTH_PKT_PRIVACY)
#self.__dce.bind(epm.MSRPC_UUID_PORTMAP)
def __rpc_disconnect(self):
'''
Disconnect from epmapper named pipe
'''
logger.debug('Disconnecting from the epmapper named pipe')
self.__dce.disconnect()
def __fetchList(self):
entries = []
resp = epm.hept_lookup(self.trans.get_dip())
self.__rpc_disconnect()
return resp
|
@Counter
def say_hello():
print("hello")
say_hello()
say_hello()
say_hello()
say_hello()
assert say_hello.count == 4
|
"""Unit test package for xlavir."""
|
import numpy as np
from pathlib import Path
import pandas as pd
from tensorflow.keras.models import load_model
import sys
import inspect
cpath = Path(inspect.getfile(sys.modules[__name__])).resolve().parent
def transform_features(x, f="cos"):
if f == "cos":
return np.cos(x)
elif f == "sin":
return np.sin(x)
elif f == "tanh":
return np.tanh(x)
class QPowerModel:
"""
Use to evaluate quadrant power splits from control drum configurations.
Set up as init, then separately use method call to minimize reading times.
"""
def __init__(self):
#Find and load file
model_file = cpath / Path("tools/microreactor_power_model.h5")
self.raw_model = load_model(model_file)
def eval(self, pert):
pert2 = pert.copy()
pertn = np.array([pert2, ])
unorm = self.raw_model.predict(pertn).flatten()
return unorm/unorm.sum()
def qPowerModel(pert):
"""Wrapper for QPowerModel that initializes and runs"""
a = QPowerModel()
return a.eval(pert)
if __name__ == "__main__":
thetas = np.zeros(8)
thetas[[6,7]] -= np.pi
print(qPowerModel(thetas))
|
from abc import ABC #Abstract Base Classes
from collections.abc import MutableSequence
class Playlist(MutableSequence):
filmes = Playlist()
|
import yaml
import json
import os
from autoremovetorrents import logger
from autoremovetorrents.task import Task
from autoremovetorrents.compatibility.open_ import open_
def test_task(qbittorrent_mocker):
# Loggger
lg = logger.Logger.register(__name__)
# Set root directory
root_dir = os.path.join(os.path.realpath(os.path.dirname(__file__)))
lg.info('Root directory: %s', root_dir)
qbittorrent_mocker()
# Load files in directory
for file in os.listdir(os.path.join(root_dir, 'cases')):
file_path = os.path.join(root_dir, 'cases', file)
if os.path.isfile(file_path):
lg.info('Loading file: %s', file)
with open_(file_path, 'r', encoding='utf-8') as f:
conf = yaml.safe_load(f)
# Run task
instance = Task(file, conf['task'], False)
instance.execute()
assert len(instance.get_removed_torrents()) == conf['result']['num-of-removed'] |
a, b, c = list(map(int, input().split()))
tot = 0
if abs(a - b) < abs(c - b):
tot = abs(c - b)
else:
tot = abs(a - b)
print(tot - 1)
|
import sys
import getopt
from jiaowu import JiaoWu
if __name__=='__main__':
username = input('学号: ')
password = input('统一认证密码: ')
jiaowu = JiaoWu(username, password)
# 登录
if jiaowu.login():
print('登录成功!')
else:
print('登录失败!请检查用户名和密码')
exit()
# 查询课表
year = input('学年(例:2021-2022): ')
semester = input('学期(例:1|秋、冬): ')
if year == '':
year = '2021-2022'
if semester == '':
semester = '1|秋、冬'
print('\n您的课表:')
course_list = jiaowu.get_course(year, semester)
for i in range(len(course_list)):
print(course_list[i])
# 查询课程成绩
print('\n您的课程成绩:')
course_info_list = jiaowu.get_score()
for i in range(len(course_info_list)):
print(course_info_list[i])
# 查询所有课程均绩
print('\n您的所有课程均绩为:', jiaowu.get_gpa())
# 查询主修课程成绩
print('\n您的主修课程成绩:')
major_course_info_list = jiaowu.get_major_score()
for i in range(len(major_course_info_list)):
print(major_course_info_list[i])
# 查询主修课程均绩
print('\n您的主修课程均绩为:', jiaowu.get_mgpa())
# 查询成绩更正公示
score_announce_list = jiaowu.get_score_announce()
if score_announce_list == 0:
print('\n您没有成绩更正公示!')
else:
print('\n您的成绩更正公示为:')
for i in range(len(score_announce_list)):
print(score_announce_list[i])
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-04-16 00:36
from __future__ import unicode_literals
import ckeditor.fields
from django.db import migrations, models
import django.db.models.deletion
import school.options.tools
class Migration(migrations.Migration):
dependencies = [
('website', '0011_remove_news_relation'),
]
operations = [
migrations.CreateModel(
name='Collective',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(blank=True, max_length=255, null=True, verbose_name='Ad Soyad')),
('picture', models.ImageField(blank=True, null=True, upload_to=school.options.tools.get_user_profile_photo_file_name)),
('content', ckeditor.fields.RichTextField(blank=True, null=True)),
('status', models.BooleanField(default=True, verbose_name='Sayta görünüşü')),
('date', models.DateTimeField(auto_now_add=True)),
],
options={
'verbose_name': 'Multimediya',
'verbose_name_plural': 'Multimediya',
'ordering': ('-id',),
},
),
migrations.CreateModel(
name='Contact_us',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(blank=True, max_length=255, null=True, verbose_name='Başlığı')),
('about', models.CharField(blank=True, max_length=255, null=True, verbose_name='Haqqında')),
('phone_1', models.CharField(blank=True, max_length=100, null=True, verbose_name='Telefon nömrəsi 1')),
('phone_2', models.CharField(blank=True, max_length=100, null=True, verbose_name='Telefon nömrəsi 2')),
('phone_3', models.CharField(blank=True, max_length=100, null=True, verbose_name='Telefon nömrəsi 3')),
('email_1', models.EmailField(blank=True, max_length=254, null=True, verbose_name='Email 1')),
('email_2', models.EmailField(blank=True, max_length=254, null=True, verbose_name='Email 2')),
('email_3', models.EmailField(blank=True, max_length=254, null=True, verbose_name='Email 3')),
],
options={
'verbose_name': 'Bizimlə Əlaqə',
'verbose_name_plural': 'Bizimlə Əlaqə',
'ordering': ('-id',),
},
),
migrations.CreateModel(
name='Events',
fields=[
('news_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='website.News')),
],
options={
'verbose_name': 'Tədbir',
'verbose_name_plural': 'Tədbirlər',
'ordering': ('-id',),
},
bases=('website.news',),
),
migrations.CreateModel(
name='Multimedia',
fields=[
('news_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='website.News')),
('video', models.URLField(blank=True, null=True, verbose_name='Videonun linki')),
],
options={
'verbose_name': 'Multimediya',
'verbose_name_plural': 'Multimediya',
'ordering': ('-id',),
},
bases=('website.news',),
),
migrations.AlterField(
model_name='news',
name='cover_picture',
field=models.ImageField(blank=True, null=True, upload_to=school.options.tools.get_news_photo_file_name, verbose_name='Örtük şəkli'),
),
]
|
#!/usr/bin/python
import sys
import pickle
sys.path.append("../tools/")
import numpy as np
import matplotlib.pyplot as plt
from functions import *
from feature_format import featureFormat, targetFeatureSplit
from tester import dump_classifier_and_data
### Task 1: Select what features you'll use.
### features_list is a list of strings, each of which is a feature name.
### The first feature must be "poi".
#features_list = ['poi','salary', 'total_payments', 'total_stock_value', 'exercised_stock_options', 'poi_messages', 'from_poi_fraction', 'to_poi_fraction' ] # You will need to use more features
#features_list = ['poi','salary', 'total_payments', 'total_stock_value', 'exercised_stock_options', 'from_poi_to_this_person', 'from_this_person_to_poi' ] # You will need to use more features
features_list = (['poi', 'salary', 'deferral_payments',
'total_payments', 'loan_advances', 'bonus',
'restricted_stock_deferred', 'deferred_income',
'total_stock_value', 'expenses',
'exercised_stock_options', 'other',
'long_term_incentive', 'restricted_stock',
'director_fees','to_messages',
'from_poi_to_this_person', 'from_messages',
'from_this_person_to_poi', 'shared_receipt_with_poi',
'poi_messages', 'from_poi_fraction',
'to_poi_fraction'])
features_list = (['poi', 'salary', 'deferral_payments',
'total_payments', 'loan_advances', 'bonus',
'restricted_stock_deferred', 'deferred_income',
'total_stock_value', 'expenses',
'exercised_stock_options', 'other',
'long_term_incentive', 'restricted_stock',
'director_fees','to_messages',
'from_poi_to_this_person', 'from_messages',
'from_this_person_to_poi', 'shared_receipt_with_poi'])
### Load the dictionary containing the dataset
with open("final_project_dataset.pkl", "r") as data_file:
data_dict = pickle.load(data_file)
counter = 0
for value in data_dict.values():
if value['poi']:
counter += 1
### Task 2: Remove outliers
del data_dict['TOTAL']
del data_dict['THE TRAVEL AGENCY IN THE PARK']
del data_dict['LOCKHART EUGENE E']
# Turn 'NaN' values to 0
salary = []
total_payments = []
# turn NaNs to 0
salary, total_payments = nanToNumber(
salary, total_payments, 'salary', 'total_payments', data_dict)
# reshape the lists
salary, total_payments = reshape(salary, total_payments)
# create a regression prediction line
predictions = plotWithPrediction(salary, total_payments)
# clean the outliers from the lists
salary, total_payments, errors = cleanAndPlot(
predictions, salary, total_payments)
# remove the outliers from the main dictionary
removeOutliersFromDict(
salary, total_payments, 'salary', 'total_payments', data_dict)
# from and to poi email outlier removal
from_poi = []
to_poi = []
from_poi, to_poi = nanToNumber(
from_poi, to_poi,
'from_poi_to_this_person', 'from_this_person_to_poi',
data_dict)
from_poi, to_poi = reshape(from_poi, to_poi)
predictions = plotWithPrediction(from_poi, to_poi)
from_poi, to_poi, errors = cleanAndPlot(
predictions, from_poi, to_poi)
removeOutliersFromDict(from_poi, to_poi,
'from_poi_to_this_person', 'from_this_person_to_poi',
data_dict)
# total stock value and exercised stock options outlier removal
total_stock_value = []
exercised_stock_options = []
total_stock_value, exercised_stock_options = nanToNumber(
total_stock_value, exercised_stock_options,
'total_stock_value', 'exercised_stock_options',
data_dict)
total_stock_value, exercised_stock_options = reshape(
total_stock_value, exercised_stock_options)
predictions = plotWithPrediction(total_stock_value, exercised_stock_options)
total_stock_value, exercised_stock_options, errors = cleanAndPlot(
predictions, total_stock_value, exercised_stock_options)
removeOutliersFromDict(
total_stock_value, exercised_stock_options,
'total_stock_value', 'exercised_stock_options',
data_dict)
### Task 3: Create new feature(s)
# create poi_messages, from_poi_fraction, and to_poi_fraction
for value in data_dict.values():
if value['from_poi_to_this_person'] == 'NaN':
value['from_poi_to_this_person'] = 0
if value['from_this_person_to_poi'] == 'NaN':
value['from_this_person_to_poi'] = 0
if value['to_messages'] == 'NaN':
value['to_messages'] = 0
if value['from_messages'] == 'NaN':
value['from_messages'] = 0
if value['shared_receipt_with_poi'] == 'NaN':
value['shared_receipt_with_poi'] = 0
try:
value['poi_messages'] = (
float(value['from_poi_to_this_person'] +
value['from_this_person_to_poi'])) / (
value['to_messages'] + value['from_messages'])
except ZeroDivisionError:
value['poi_messages'] = 0
for value in data_dict.values():
try:
value['from_poi_fraction'] = float(
value['from_poi_to_this_person']) / value['from_messages']
except ZeroDivisionError:
value['from_poi_fraction'] = 0
try:
value['to_poi_fraction'] = float(
value['from_this_person_to_poi']) / value['to_messages']
except ZeroDivisionError:
value['to_poi_fraction'] = 0
### Store to my_dataset for easy export below.
my_dataset = data_dict
### Extract features and labels from dataset for local testing
data = featureFormat(my_dataset, features_list, sort_keys = True)
labels, features = targetFeatureSplit(data)
from sklearn.feature_selection import SelectKBest
### Task 4: Try a varity of classifiers
### Please name your classifier clf for easy export below.
### Note that if you want to do PCA or other multi-stage operations,
### you'll need to use Pipelines. For more info:
### http://scikit-learn.org/stable/modules/pipeline.html
# Provided to give you a starting point. Try a variety of classifiers.
# import and setup
from sklearn.naive_bayes import GaussianNB
from sklearn import tree
from sklearn import svm
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import precision_score, recall_score
from sklearn.model_selection import GridSearchCV, StratifiedShuffleSplit
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import MinMaxScaler
features_train, features_test, labels_train, labels_test = train_test_split(
features, labels, test_size=0.3, random_state=42)
scaler = MinMaxScaler()
select = SelectKBest(k = 5)
kfit = select.fit(features_test, labels_test)
score = kfit.scores_
NB = GaussianNB()
# create pipeline steps
steps = [('scaling', scaler), ('selection', select), ('NB' , NB)]
# run the pipeline
pipeline = Pipeline(steps)
# create a list of tuples of the most important features and their scores
most_important_features = []
klist = kfit.get_support()
for i in range(len(klist)):
if klist[i]:
t = (features_list[i], kfit.scores_[i])
most_important_features.append(t)
most_important_features.sort(key = lambda x: x[1], reverse = True)
print most_important_features
# fit the pipeline and make test the model
clf = pipeline.fit(features_train, labels_train)
pred = clf.predict(features_test)
print 'NB pipe accuracy:', clf.score(features_test, labels_test)
print 'NB pipe precision:', precision_score(labels_test, pred)
print 'NB pipe recall:', recall_score(labels_test, pred)
### Task 5: Tune your classifier to achieve better than .3 precision and recall
### using our testing script. Check the tester.py script in the final project
### folder for details on the evaluation method, especially the test_classifier
### function. Because of the small size of the dataset, the script uses
### stratified shuffle split cross validation. For more info:
### http://scikit-learn.org/stable/modules/generated/sklearn.cross_validation.StratifiedShuffleSplit.html
# Example starting point. Try investigating other evaluation techniques!
### Task 6: Dump your classifier, dataset, and features_list so anyone can
### check your results. You do not need to change anything below, but make sure
### that the version of poi_id.py that you submit can be run on its own and
### generates the necessary .pkl files for validating your results.
dump_classifier_and_data(clf, my_dataset, features_list) |
class Solution:
def maxNumOfSubstrings(self, s: str) -> List[str]:
start, end = {}, {}
for i, c in enumerate(s):
if c not in start:
start[c] = i
end[c] = i
def checkSubstring(i):
curr = i
right = end[s[curr]]
while curr <= right:
if start[s[curr]] < i:
return -1
right = max(right, end[s[curr]])
curr += 1
return right
result = []
prevRight = -1
for i, c in enumerate(s):
if i == start[c]:
right = checkSubstring(i)
if right != -1:
if i > prevRight:
result.append(s[i:right + 1])
else:
result[-1] = s[i:right + 1]
prevRight = right
return result
|
import asyncio
import datetime
import gettext
import math
import os
import random
import pyrogram
import speech_recognition
import time
import youtube_dl
from datetime import timedelta
from pyrogram.raw import types as raw_types
from pyrogram.raw.functions import phone
from pyrogram import types
from pyrogram.raw import base
from plugins.Bots.PyrogramBot.bot import PyrogramBot
from pyrogram import errors, ContinuePropagation
from plugins.Helpers import youtube_dl, media_convertor
from pyrogram.client import Client
from plugins.DataBase.mongo import MongoDataBase
from plugins.Google.google import Google
class PyrogramBotHandler:
"""
Pyrogram Handler
"""
def __init__(self, webSerber, pyrogramBot: PyrogramBot, mongoDataBase: MongoDataBase):
self.webServer = webSerber
self.pyrogramBot = pyrogramBot
self.groupCall = pyrogramBot.groupCall
self.mongoDataBase = mongoDataBase
#
# Horoscope
#
async def horoscope_command(self, client: Client, message: types.Message):
# TODO horoscope command
return
"""
Get horoscope data through site (https://ignio.com/r/export/utf/xml/daily/com.xml) [XML document]
Return text of horoscope of horo_sign for current day in format: horo_sign: horoscope
Return False if request failed
async with aiohttp.ClientSession() as session:
async with session.get('https://ignio.com/r/export/utf/xml/daily/com.xml') as response:
if response.status == 200:
string_xml = await response.text()
horoscope = xml.etree.ElementTree.fromstring(string_xml)
# for sign in horoscope.findall('aries'):
text = ''
for sign in horoscope:
if sign.tag == 'date':
continue
if sign.tag == horo_sign:
# string = ''
for day in sign:
if day.tag == 'today':
# string += day.tag + ':' + day.text
return sign.tag + ': ' + day.text
else:
return False
"""
async def language_command(self, client: Client, message: types.Message):
chat_member = await self.pyrogramBot.bot.get_chat_member(chat_id=message.chat.id, user_id=message.from_user.id)
if not chat_member.status == 'creator':
# TODO print message
return
try:
language_code = message.text.split(" ", maxsplit=1)[1]
except IndexError:
# language_code = message.from_user.language_code
language_code = 'ru'
language = self.webServer.languages.get(language_code)
if language:
language.install()
query = {'language_code': language_code}
return self.mongoDataBase.update_field(database_name='tbot', collection_name='init', action='$set',
query=query)
# ------------------------------------------------------------------------------------------------------------------
# Message ----------------------------------------------------------------------------------------------------------
# ------------------------------------------------------------------------------------------------------------------
async def type_command(self, client: Client, msg: types.Message):
try:
orig_text = msg.text.split(" ", maxsplit=1)[1]
except IndexError:
return
text = orig_text
tbp = "" # to be printed
typing_symbol = "▒"
msg = await self.pyrogramBot.user.send_message(msg.chat.id, text)
while (tbp != orig_text):
try:
await msg.edit(tbp + typing_symbol)
time.sleep(0.05) # 50 ms
tbp = tbp + text[0]
text = text[1:]
await msg.edit(tbp)
time.sleep(0.05)
except errors.FloodWait as e:
time.sleep(e.x)
# ------------------------------------------------------------------------------------------------------------------
# Player -----------------------------------------------------------------------------------------------------------
# ------------------------------------------------------------------------------------------------------------------
async def volume_command(self, client: Client, message: types.Message):
volume = message.text.split(" ", maxsplit=1)[1]
await self.groupCall.client.set_my_volume(volume=volume)
print(f'Volume set to {volume}')
async def pause_command(self, client: Client, message: types.Message):
if not self.groupCall.client.is_connected:
return await self.pyrogramBot.bot.send_message(chat_id=message.chat.id,
text="✖️{text}".format(
text=_("I am not in voice channel (/join)"))
)
else:
if self.groupCall.client.is_audio_running:
if self.groupCall.client.is_audio_paused:
await self.groupCall.client.set_audio_pause(False, False)
return await self.pyrogramBot.bot.send_message(chat_id=message.chat.id,
text="✔️{text}".format(text=_("Audio resumed"))
)
else:
await self.groupCall.client.set_audio_pause(True, False)
return await self.pyrogramBot.bot.send_message(chat_id=message.chat.id,
text="✔️{text}".format(text=_("Audio paused"))
)
else:
if self.groupCall.client.is_video_running:
if self.groupCall.client.is_video_paused:
await self.groupCall.client.set_video_pause(False, False)
return await self.pyrogramBot.bot.send_message(chat_id=message.chat.id,
text="✔️{text}".format(text=_("Video resumed"))
)
else:
await self.groupCall.client.set_video_pause(True, False)
return await self.pyrogramBot.bot.send_message(chat_id=message.chat.id,
text="✔️{text}".format(text=_("Video paused"))
)
else:
return await self.pyrogramBot.bot.send_message(chat_id=message.chat.id,
text="✖️{text}".format(
text=_("Media is not playing"))
)
async def skip_command(self, client: Client, message: types.Message):
if not self.groupCall.client.is_connected:
return await self.pyrogramBot.bot.send_message(chat_id=message.chat.id,
text="✖️{text}".format(
text=_("I am not in voice channel (/join)"))
)
if not self.groupCall.client.is_audio_running and not self.groupCall.client.is_video_running:
return await self.pyrogramBot.bot.send_message(chat_id=message.chat.id,
text="✖️{text}".format(text=_("Media is not playing"))
)
await self.groupCall.client.stop_media()
return await self.pyrogramBot.bot.send_message(chat_id=message.chat.id,
text="✔️{text}".format(text=_("Media skip"))
)
async def leave_command(self, client: Client, message: types.Message):
if not self.groupCall.client.is_connected:
return await self.pyrogramBot.bot.send_message(chat_id=message.chat.id,
text="✖️{text}".format(
text=_("I am not in voice channel (/join)"))
)
await self.groupCall.client.leave()
return await self.pyrogramBot.bot.send_message(chat_id=message.chat.id,
text="✔️{text}".format(text=_("Thank you for kicking me out"))
)
async def queue_command(self, client: Client, message: types.Message):
try:
text = message.text.split(" ", maxsplit=1)[1]
try:
page = int(text)
except Exception:
# await message.reply_text('Wrong format of command. Use command /queue or /queue [page]')
# print('not int in queue [number]')
page = 1
except IndexError:
# blank command
page = 1
query = {'media.queue': 1}
document = self.mongoDataBase.get_document(database_name='tbot',
collection_name='chats',
filter={'chat_id': message.chat.id},
query=query)
try:
document = document['media']['queue']
except (IndexError, KeyError):
return await self.pyrogramBot.bot.send_message(chat_id=message.chat.id,
text="✖️{text}".format(text=_("Media queue is empty"))
)
queue_count = len(document)
document_slice = document[(page - 1) * 10:page * 10 + 1]
if not document_slice:
page = 1
document_slice = document[:11]
texts = []
user_ids = []
for queue in document_slice:
texts.append(queue.get('text'))
user_ids.append(queue.get('user'))
users_objects = await self.pyrogramBot.bot.get_users(user_ids)
queries = []
for i, (text, user_id) in enumerate(zip(texts, user_ids)):
if i == 10:
break
for user_object in users_objects:
if user_object.id == user_id:
user_mention = f"[@{user_object.username}](tg://user?id={user_object.id})"
# query = f"({i + 1 + (page - 1) * 10}) `{text}` {_('added by')} {user_mention}"
query = "({element}) `{text}` {added_by} {user_mention}".format(element=i + 1 + (page - 1) * 10,
text=text,
added_by=_("added by"),
user_mention=user_mention)
queries.append(query)
break
if queries:
queries = '\n'.join(queries)
# text_reply = f"{message.chat.title} {_('media queue')} ({queue_count}):\n\n{queries}"
text_reply = "{chat_title} {media_queue} ({queue_count}):\n\n{queries}".format(
chat_title=message.chat.title,
media_queue=_("media queue"),
queue_count=queue_count,
queries=queries)
else:
# empy page
return await self.pyrogramBot.bot.send_message(chat_id=message.chat.id,
text="✖️{text}".format(text=_("Media queue is empty"))
)
count_of_buttons = min(10, math.ceil(queue_count / 10))
if count_of_buttons > 1:
inlineKeyboardButtons = [types.InlineKeyboardButton(text=f"⏮", callback_data=f'/queue 1'),
types.InlineKeyboardButton(text=f"◀", callback_data=f'/queue {page - 1}'),
types.InlineKeyboardButton(text=f"▶", callback_data=f'/queue {page + 1}'),
types.InlineKeyboardButton(text=f"⏭", callback_data=f'/queue {count_of_buttons}')]
# for i in range(count_of_buttons):
# inlineKeyboardButtons.append(InlineKeyboardButton(text=f"{i+1}", callback_data=f'/queue {i+1}'))
reply_markup = types.InlineKeyboardMarkup([inlineKeyboardButtons])
else:
reply_markup = None
await self.pyrogramBot.bot.send_message(chat_id=message.chat.id,
text=text_reply,
disable_notification=True,
disable_web_page_preview=True,
reply_markup=reply_markup)
async def queue_callback_query(self, client: Client, callback_query: types.CallbackQuery):
try:
text = callback_query.data.split(" ", maxsplit=1)[1]
try:
page = int(text)
except Exception:
# print('not int in queue [number]')
page = 1
except IndexError:
# print('blank command')
page = 1
message = callback_query.message
query = {'media.queue': 1}
document = self.mongoDataBase.get_document(database_name='tbot',
collection_name='chats',
filter={'chat_id': message.chat.id},
query=query)
try:
document = document['media']['queue']
except (IndexError, KeyError):
return await self.pyrogramBot.bot.send_message(chat_id=message.chat.id,
text="✖️{text}".format(text=_("Media queue is empty"))
)
queue_count = len(document)
document_slice = document[(page - 1) * 10:page * 10 + 1]
if not document_slice:
page = 1
document_slice = document[:11]
texts = []
user_ids = []
for queue in document_slice:
texts.append(queue.get('text'))
user_ids.append(queue.get('user'))
users_objects = await self.pyrogramBot.bot.get_users(user_ids)
queries = []
for i, (text, user_id) in enumerate(zip(texts, user_ids)):
if i == 10:
break
for user_object in users_objects:
if user_object.id == user_id:
user_mention = f"[@{user_object.username}](tg://user?id={user_object.id})"
# query = f"({i + 1 + (page - 1) * 10}) `{text}` {_('added by')} {user_mention}"
query = "({element}) `{text}` {added_by} {user_mention}".format(element=i + 1 + (page - 1) * 10,
text=text,
added_by=_("added by"),
user_mention=user_mention)
queries.append(query)
break
if queries:
queries = '\n'.join(queries)
# text_reply = f"{message.chat.title} {_('media queue')} ({queue_count}):\n\n{queries}"
text_reply = "{chat_title} {media_queue} ({queue_count}):\n\n{queries}".format(
chat_title=message.chat.title,
media_queue=_("media queue"),
queue_count=queue_count,
queries=queries)
else:
# text_reply = f"{message.chat.title} {_('media queue')} ({queue_count}):\n\n{_('Empty page')}"
text_reply = "{chat_title} {media_queue} ({queue_count}):\n\n{queries}".format(
chat_title=message.chat.title,
media_queue=_("media queue"),
queue_count=queue_count,
queries=_("Empty page"))
count_of_buttons = min(10, math.ceil(queue_count / 10))
if count_of_buttons > 1:
inlineKeyboardButtons = [types.InlineKeyboardButton(text=f"⏮", callback_data=f'/queue 1'),
types.InlineKeyboardButton(text=f"◀", callback_data=f'/queue {page - 1}'),
types.InlineKeyboardButton(text=f"▶", callback_data=f'/queue {page + 1}'),
types.InlineKeyboardButton(text=f"⏭", callback_data=f'/queue {count_of_buttons}')]
# for i in range(count_of_buttons):
# inlineKeyboardButtons.append(InlineKeyboardButton(text=f"{i+1}", callback_data=f'/queue {i+1}'))
reply_markup = types.InlineKeyboardMarkup([inlineKeyboardButtons])
else:
reply_markup = None
try:
await self.pyrogramBot.bot.edit_message_text(chat_id=message.chat.id,
message_id=message.message_id,
text=text_reply,
disable_web_page_preview=True,
reply_markup=reply_markup)
except errors.MessageNotModified:
# print('Same page')
await callback_query.answer()
# try:
# callback_query.answer('button', show_alert=True)
# except (IndexError, KeyError):
# callback_query.answer(f"callback_query.answer Error", show_alert=True)
async def now_command(self, client: Client, message: types.Message):
if not self.groupCall.client.is_audio_running and not self.groupCall.client.is_video_running:
return await self.pyrogramBot.bot.send_message(chat_id=message.chat.id,
text="✖️{text}".format(text=_("Media is not playing"))
)
query = {'media.now': 1}
document = self.mongoDataBase.get_document(database_name='tbot',
collection_name='chats',
filter={'chat_id': message.chat.id},
query=query)
try:
document = document['media']['now']
except (IndexError, KeyError):
return await self.pyrogramBot.bot.send_message(chat_id=message.chat.id,
text="✖️{text}".format(text=_("Media is not playing"))
)
now = {
'title': document.get('title'),
'url': document.get('url'),
'webpage_url': document.get('webpage_url'),
'channel_url': document.get('channel_url'),
'thumbnail': document.get('thumbnail'),
'uploader': document.get('uploader'),
'uploader_url': document.get('uploader_url'),
# 'thumbnail': document.get('thumbnail'),
'channel': document.get('channel'),
'duration': document.get('duration'),
'protocol': document.get('protocol'),
'user': document.get('user')
}
user = await self.pyrogramBot.bot.get_users(now['user'])
duration = f"({timedelta(seconds=int(now['duration']))})"
title = f"[{now['title']}]({now['webpage_url']})"
channel = f"[{now['uploader']}]({now['uploader_url']})"
user_mention = f"[@{user.username}](tg://user?id={now['user']})"
# text_reply = f"{_('Now playing')}\n" \
# f"{_('Title')}: {title}\n" \
# f"{_('Uploader')}: {channel}\n" \
# f"{_('Duration')}: {duration}\n" \
# f"{_('Added by')}{user_mention}\n"
text_reply = "{now_playing_text}\n" \
"{title_text}: {title}\n" \
"{uploader_text}: {uploader}\n" \
"{duration_text}: {duration}\n" \
"{added_by_text}: {user_mention}\n".format(now_playing_text=_("Now playing"),
title_text=_('Title'),
title=title,
uploader_text=_('Uploader'),
uploader=channel,
duration_text=_('Duration'),
duration=duration,
added_by_text=_('Added by'),
user_mention=user_mention)
try:
await self.pyrogramBot.bot.send_photo(chat_id=message.chat.id,
photo=now['thumbnail'],
caption=text_reply,
disable_notification=True)
except errors.WebpageMediaEmpty:
await self.pyrogramBot.bot.send_message(chat_id=message.chat.id,
text=text_reply,
disable_notification=True,
disable_web_page_preview=True)
# print('not supported image for send_photo')
# print(now['thumbnail'])
async def lyrics_command(self, client: Client, message: types.Message):
try:
text = message.text.split(" ", maxsplit=1)[1]
except IndexError:
# print('blank command')
text = ''
if text:
# FIXME check if it returns error
lyrics = self.webServer.google.lyrics(song_name=text)
else:
if not self.groupCall.client.is_audio_running and not self.groupCall.client.is_video_running:
return await self.pyrogramBot.bot.send_message(chat_id=message.chat.id,
text="✖️{text}\n{tip}".format(
text=_("Media is not playing"),
tip=_("Use /lyrics [song title] instead"))
)
query = {'media.now': 1}
document = self.mongoDataBase.get_document(database_name='tbot',
collection_name='chats',
filter={'chat_id': message.chat.id},
query=query)
try:
document = document['media']['now']
except (IndexError, KeyError):
return await self.pyrogramBot.bot.send_message(chat_id=message.chat.id,
text="✖️{text}".format(text=_("Media is not playing"))
)
now = {
'title': document.get('title'),
'url': document.get('url'),
'webpage_url': document.get('webpage_url'),
'channel_url': document.get('channel_url'),
'thumbnail': document.get('thumbnail'),
'uploader': document.get('uploader'),
'uploader_url': document.get('uploader_url'),
# 'thumbnail': document.get('thumbnail'),
'channel': document.get('channel'),
'duration': document.get('duration'),
'protocol': document.get('protocol'),
'user': document.get('user')
}
lyrics = self.webServer.google.lyrics(song_name=now['title'])
if not lyrics:
return await self.pyrogramBot.bot.send_message(chat_id=message.chat.id,
text="✖️{text}".format(text=_("Lyrics not found"))
)
text_reply = f"[{lyrics['title']}]({lyrics['link']}):\n{lyrics['lyrics']}"
await self.pyrogramBot.bot.send_message(chat_id=message.chat.id,
text=text_reply,
disable_notification=True,
disable_web_page_preview=True)
async def clear_command(self, client: Client, message: types.Message):
try:
text = message.text.split(" ", maxsplit=1)[1]
try:
count = int(text)
except Exception:
return
except IndexError:
# print('blank command')
count = 0
if count == 0:
query = {'media.queue': -1}
self.mongoDataBase.update_field(database_name='tbot',
collection_name='chats',
action='$unset',
filter={'chat_id': message.chat.id},
query=query)
return await self.pyrogramBot.bot.send_message(chat_id=message.chat.id,
text="✔️{text}".format(text=_("Media queue cleared"))
)
else:
# a value of -1 to remove the first element of an array and 1 to remove the last element in an array.
query = {'media.queue': int(-1 * math.copysign(1, count))}
for i in range(abs(count)):
self.mongoDataBase.update_field(database_name='tbot',
collection_name='chats',
action='$pop',
filter={'chat_id': message.chat.id},
query=query)
return await self.pyrogramBot.bot.send_message(chat_id=message.chat.id,
text="✔️{count} {tracks_cleared}".format(count=abs(count),
tracks_cleared=_(
"track(s) cleared"))
)
async def join_command(self, client: Client, message: types.Message):
if not self.groupCall.client.is_connected:
try:
await self.groupCall.client.join(group=int(message.chat.id)) # , join_as=-1001571685575)
except Exception:
peer = await self.pyrogramBot.user.resolve_peer(message.chat.id)
startGroupCall = phone.CreateGroupCall(peer=raw_types.InputPeerChannel(channel_id=peer.channel_id,
access_hash=peer.access_hash),
random_id=int(self.pyrogramBot.bot.rnd_id()) // 9000000000)
try:
await self.pyrogramBot.user.send(startGroupCall)
await self.groupCall.client.join(group=message.chat.id)
except errors.ChatAdminRequired:
await self.pyrogramBot.bot.send_message(chat_id=message.chat.id,
text="✖️{text}".format(
text=_("I need manage voice permission"))
)
return False
return await self.pyrogramBot.bot.send_message(chat_id=message.chat.id,
text="✔️{text}".format(
text=_("Successfully connected to voice channel"))
)
else:
return await self.pyrogramBot.bot.send_message(chat_id=message.chat.id,
text="✖️{text}".format(
text=_("I am already in voice channel"))
)
async def play_command(self, client: Client, message: types.Message):
# TODO autoconnect
# print(message.chat.id)
# print(self.groupCall.client.is_audio_running, self.groupCall.client.is_video_running)
# print(self.groupCall.client.is_audio_paused, self.groupCall.client.is_video_paused)
try:
text = message.text.split(" ", maxsplit=1)[1]
query = {'media.queue': {'text': text, 'user': message.from_user.id}}
self.mongoDataBase.update_field(database_name='tbot', collection_name='chats', action='$push',
filter={'chat_id': message.chat.id}, query=query)
return await self.pyrogramBot.bot.send_message(chat_id=message.chat.id,
text="✔️{text}".format(text=_("Successfully added to queue"))
)
except IndexError:
# print('blank command')
pass
if self.groupCall.client.is_audio_running or self.groupCall.client.is_video_running:
# return await self.pyrogramBot.bot.send_message(chat_id=message.chat.id,
# text="✖️Media currently playing")
return
if not self.groupCall.client.is_connected:
return await self.pyrogramBot.bot.send_message(chat_id=message.chat.id,
text="✖️{text}".format(
text=_("I need to be in voice channel (/join)"))
)
# await self.join_command(client=client, message=message)
while True:
while self.groupCall.client.is_audio_running or self.groupCall.client.is_video_running:
await asyncio.sleep(1)
else:
if not self.groupCall.client.is_connected:
return
query = {'media.queue': -1}
document = self.mongoDataBase.update_field(database_name='tbot', collection_name='chats',
action='$pop', filter={'chat_id': message.chat.id},
query=query)
try:
# print(document)
text = document['media']['queue'][0]['text']
user = document['media']['queue'][0]['user']
except (IndexError, KeyError):
query = {'media.now': 1}
self.mongoDataBase.update_field(database_name='tbot', collection_name='chats', action='$unset',
filter={'chat_id': message.chat.id}, query=query)
return
# queue = document['media']['queue']
search_engine = None
lip_sync = False
ydl_opts = {
'format': 'bestaudio/best[height<=?720][width<=?1280]',
'quiet': True,
'ignoreerrors': True,
'noplaylist': True,
}
if '@video@' in text:
text = text.replace('@video@', '', 1)
if '@sync@' in text:
text = text.replace('@sync@', '', 1)
lip_sync = True
ydl_opts['format'] = 'best'
if '@yt@' in text:
text = text.replace('@yt@', '', 1)
search_engine = 'ytsearch'
else:
if '@sc@' in text:
text = text.replace('@sc@', '', 1)
search_engine = 'scsearch'
text = text.strip()
info = youtube_dl.get_best_info_media(title=text, ydl_opts=ydl_opts, search_engine=search_engine)
if not info:
return await self.pyrogramBot.bot.send_message(chat_id=message.chat.id,
text="✖️{text}".format(text=_("Media load failed"))
)
# print(info)
# 'userID': data['userID'],
if isinstance(info, list):
# print(len(info))
# print(info)
info = info[0]
# print(info)
filter = {'chat_id': message.chat.id}
now = {
'title': info.get('title'),
'url': info.get('url'),
'webpage_url': info.get('webpage_url'),
'channel_url': info.get('channel_url'),
'uploader': info.get('uploader'),
'uploader_url': info.get('uploader_url'),
'thumbnail': info.get('thumbnail'),
# 'thumbnail': document['thumbnails'][len(document.get('thumbnails'))],
'channel': info.get('channel'),
'duration': info.get('duration'),
'protocol': info.get('protocol'),
'user': user
}
query = {'media.now': {'title': now['title'],
'url': now['url'],
'webpage_url': now['webpage_url'],
'channel_url': now['channel_url'],
'uploader': info.get('uploader'),
'uploader_url': info.get('uploader_url'),
'thumbnail': now['thumbnail'],
'channel': now['channel'],
'duration': now['duration'],
'user': now['user']}}
self.mongoDataBase.update_field(database_name='tbot', collection_name='chats', action='$set',
filter=filter, query=query)
# print('start playing')
if info.get('ext') in self.groupCall.audio_formats:
try:
await self.groupCall.client.start_audio(source=info['url'], repeat=False)
except Exception:
return await self.pyrogramBot.bot.send_message(chat_id=message.chat.id,
text="✖️{text}".format(
text=_("Audio playout failed"))
)
else:
if info.get('ext') in self.groupCall.video_formats:
try:
await self.groupCall.client.start_video(source=info['url'], repeat=False,
enable_experimental_lip_sync=lip_sync)
except Exception:
return await self.pyrogramBot.bot.send_message(chat_id=message.chat.id,
text="✖️{text}".format(
text=_("Video playout failed"))
)
else:
return await self.pyrogramBot.bot.send_message(chat_id=message.chat.id,
text="✖️{text}".format(
text=_("Media playout failed"))
)
user = await self.pyrogramBot.bot.get_users(now['user'])
duration = f"({timedelta(seconds=int(now['duration']))})"
title = f"[{now['title']}]({now['webpage_url']})"
channel = f"[{now['uploader']}]({now['uploader_url']})"
user_mention = f"[@{user.username}](tg://user?id={now['user']})"
# text_reply = f"{_('Playing from queue')}\n" \
# f"{_('Title')}: {title}\n" \
# f"{_('Uploader')}: {channel}\n" \
# f"{_('Duration')}: {duration}\n" \
# f"{_('Added by')} {user_mention}\n"
text_reply = "{playing_text}\n" \
"{title_text}: {title}\n" \
"{uploader_text}: {uploader}\n" \
"{duration_text}: {duration}\n" \
"{added_by_text}: {user_mention}\n".format(playing_text=_("Playing from queue"),
title_text=_('Title'),
title=title,
uploader_text=_('Uploader'),
uploader=channel,
duration_text=_('Duration'),
duration=duration,
added_by_text=_('Added by'),
user_mention=user_mention)
try:
await self.pyrogramBot.bot.send_photo(chat_id=message.chat.id, photo=now['thumbnail'],
caption=text_reply, disable_notification=True)
except errors.WebpageMediaEmpty:
# print('not supported image for send_photo')
# print(now['thumbnail'])
await self.pyrogramBot.bot.send_message(chat_id=message.chat.id, text=text_reply,
disable_notification=True, disable_web_page_preview=True)
# reply_markup=reply_markup)
async def answer_inline_result(self, client: Client, chosen_inline_result: types.ChosenInlineResult):
# chosen_inline_result.result_id
# inlineQueryResultArticle = InlineQueryResultArticle()
# print(chosen_inline_result)
print('choosen_inline_result')
# result = self.pyrogramBot.user.get_inline_bot_results(bot='@F0S_bot', query=chosen_inline_result.query)
# print(result)
async def play_inline_query(self, client: Client, inline_query: types.InlineQuery):
# query_message = inline_query.query
# from_user = inline_query.from_user
# print(query_message)
try:
text = inline_query.query.split(" ", maxsplit=1)[1]
# text = inline_query.query
except IndexError:
return
try:
ydl_opts = {
'skip_download': True,
'quiet': True,
'ignoreerrors': True,
'noplaylist': True,
}
search_engine = None
video = ''
sync = ''
if '@video@' in text:
text = text.replace('@video@', '', 1)
video = '@video@'
if '@sync@' in text:
text = text.replace('@sync@', '', 1)
sync = '@sync@'
if '@yt@' in text:
text = text.replace('@yt@', '', 1)
search_engine = 'ytsearch'
else:
if '@sc@' in text:
text = text.replace('@sc@', '', 1)
search_engine = 'scsearch'
text = text.strip()
info = youtube_dl.get_info_media(title=text,
ydl_opts=ydl_opts,
search_engine=search_engine,
result_count=6)
results = []
if not isinstance(info, list):
info = [info]
for result in info:
if not result:
# print('AttributeError, no results')
return
res = {
'title': result.get('title'),
'webpage_url': result.get('webpage_url'),
'thumbnail': result.get('thumbnail'),
'channel': result.get('channel'),
'uploader': result.get('uploader'),
'duration': result.get('duration'),
'protocol': result.get('protocol')
}
message_text = f"/play {res['webpage_url']} {video} {sync}"
# print(f"{input_message_content}")
duration = f"({timedelta(seconds=int(res['duration']))})"
# print(res)
results.append(types.InlineQueryResultArticle(
title=f"{duration} {res['title']}",
input_message_content=types.InputTextMessageContent(message_text=message_text,
parse_mode='markdown',
disable_web_page_preview=True),
url=res['webpage_url'],
description=res['uploader'],
thumb_url=res['thumbnail']
# reply_markup=InlineKeyboardMarkup([
# [InlineKeyboardButton(text="Add to queue", callback_data=f'/callback')]
# ])
))
# 604800 - 7 days, 21600, 21600 - 6 hours
return await inline_query.answer(results=results, cache_time=21600)
except errors.QueryIdInvalid:
# print(exceptions.QueryIdInvalid)
return
async def start_command(self, client: Client, message: types.Message):
await message.reply('Start message')
async def help_command(self, client: Client, message: types.Message):
await message.reply('Help message')
async def echo_command(self, client: Client, message: types.Message):
try:
await message.reply(text=message.text.split(" ", maxsplit=1)[1])
except IndexError:
return
# ------------------------------------------------------------------------------------------------------------------
# SPEECH RECOGNITION -----------------------------------------------------------------------------------------------
# ------------------------------------------------------------------------------------------------------------------
async def speech_to_text_command(self, client: Client, message: types.Message):
try:
language = message.text.split(" ", maxsplit=1)[1]
except IndexError:
language = 'ru-RU'
if not message.reply_to_message or not message.reply_to_message.voice:
return await self.pyrogramBot.bot.send_message(chat_id=message.chat.id,
text="✖️{text}".format(
text=_("Message does not contain voice message"))
)
file_id = message.reply_to_message.voice.file_id
source = await self.pyrogramBot.bot.download_media(message=file_id, file_name='downloads/')
converted_source = media_convertor.convert_audio_file(source)
os.remove(source)
recognizer = speech_recognition.Recognizer()
text = ''
if message.reply_to_message.voice.duration < 60:
audio_file = speech_recognition.AudioFile(converted_source)
with audio_file as source:
audio_data = recognizer.record(source)
try:
text = recognizer.recognize_google(audio_data=audio_data, language=language)
except (speech_recognition.UnknownValueError,
speech_recognition.RequestError,
speech_recognition.WaitTimeoutError):
await self.pyrogramBot.bot.send_message(chat_id=message.chat.id,
text="✖️{text}".format(text=_("Speech Recognition Error"))
)
else:
# TODO long file recognition
text = 'Long voice audio recognition not yet supported.' \
'You can only use < 60 sec voice audio recognition'
"""
audio_segment = AudioSegment.from_wav(file=converted_source)
chunks = split_on_silence(audio_segment=audio_segment, min_silence_len=1000, silence_thresh=-16)
i = 0
# process each chunk
for chunk in chunks:
# Create 0.5 seconds silence chunk
chunk_silent = AudioSegment.silent(duration=10)
# add 0.5 sec silence to beginning and
# end of audio chunk. This is done so that
# it doesn't seem abruptly sliced.
audio_chunk = chunk_silent + chunk + chunk_silent
# specify the bitrate to be 192 k
audio_chunk.export("downloads/chunk{0}.wav".format(i), bitrate='192k', format="wav")
# the name of the newly created chunk
filename = 'downloads/chunk' + str(i) + '.wav'
print("Processing chunk " + str(i))
# get the name of the newly created chunk
# in the AUDIO_FILE variable for later use.
file = filename
# recognize the chunk
with speech_recognition.AudioFile(file) as src:
# remove this if it is not working
# correctly.
recognizer.adjust_for_ambient_noise(src)
audio_listened = recognizer.listen(src)
try:
# try converting it to text
chunk_text = recognizer.recognize_google(audio_data=audio_listened, language=language)
print(chunk_text)
text = text + chunk_text
except (speech_recognition.UnknownValueError,
speech_recognition.RequestError,
speech_recognition.WaitTimeoutError):
print('speech_recognition.Error')
i += 1
"""
os.remove(converted_source)
if text:
if message.chat.type in ('group', 'supergroup', 'channel'):
link = message.reply_to_message.link
await self.pyrogramBot.bot.send_message(chat_id=message.chat.id,
text=f"[{text}]({link})")
else:
await self.pyrogramBot.bot.send_message(chat_id=message.chat.id,
text=f"{text}")
# https://cloud.google.com/speech-to-text/docs/languages
# recognize_bing()
# recognize_google()
# recognize_google_cloud()
# recognize_ibm()
# recognize_sphinx()
#
# XP check
#
async def stats_command(self, client: Client, message: types.Message):
# TODO add addition info and parameters
# RANKING message = 50 xp, second in voice = 1 xp
message_xp = 100
voice_xp = 50
chat = message.chat
try:
query = message.text.split(" ", maxsplit=1)[1]
if query == 'random':
max_value = await self.pyrogramBot.user.search_messages_count(chat_id=chat.id)
# random_value = random.random()
# min_value + (random_value * (max_value - min_value))
try:
# get history onlu for user bots
random_message = await self.pyrogramBot.user.get_history(
chat_id=chat.id,
offset=random.randint(0, max_value),
limit=1
)
random_message = random_message[0]
random_message: types.Message
except ValueError:
return await self.pyrogramBot.bot.send_message(chat_id=chat.id,
text="✖️{text} {chat_title}".format(
text=_("No messages in"),
chat_title=chat.title)
)
link = random_message.link
return await self.pyrogramBot.bot.send_message(
chat_id=chat.id,
text="✔️{random_text} [{message_text}]({link})".format(random_text=_("Random"),
message_text=_("message"),
link=link)
)
else:
user_name = query
try:
user = await self.pyrogramBot.bot.get_users(user_name)
except (errors.UsernameInvalid, errors.PeerIdInvalid):
return await self.pyrogramBot.bot.send_message(chat_id=chat.id,
text="✖️{text} {user_name}".format(
text=_("No stats found about"),
user_name=user_name)
)
query = {'_id': 0, f'users.{user.id}.stats': 1}
document = self.mongoDataBase.get_document(database_name='tbot', collection_name='chats',
filter={'chat_id': chat.id}, query=query)
try:
stats = document['users'][f'{user.id}']['stats']
seconds = 0.0
for voicetime in stats['voicetime']:
seconds += voicetime
seconds = round(seconds)
except(IndexError, KeyError, TypeError):
seconds = 0
query = ""
query_filter = "empty"
# Search only for userbots
messages_count = await self.pyrogramBot.user.search_messages_count(chat_id=chat.id, from_user=user.id,
query=query, filter=query_filter)
user_mention = f"[@{user.username}](tg://user?id={user.id})"
date = datetime.timedelta(seconds=seconds)
xp = (messages_count * message_xp) + ((seconds // 60) * voice_xp)
return await self.pyrogramBot.bot.send_message(
chat_id=chat.id,
text="{user_mention} {xp_text}: {xp}\n{messages_text}: {messages_count} | {voice_time_text}: {voice_time}\n\n"
"{message_xp} {xp_per_messaage_text}\n{voice_xp} {xp_per_voice_second_text}".format(
user_mention=user_mention,
xp_text=_("xp"),
xp=round(xp),
messages_text=_("messages"),
messages_count=messages_count,
voice_time_text=_("voice time"),
voice_time=date,
message_xp=message_xp,
xp_per_messaage_text=_("xp per message"),
voice_xp=voice_xp,
xp_per_voice_second_text=_("xp per voice minute"))
# text=f"{user_mention} {_('xp')}: {round(xp)} {_('messages')}: {messages_count} {_('voice time')}: {date}\n\n"
# f"({message_xp}{_('xp per message')} | {voice_xp} {_('xp per voice second')})"
)
except IndexError:
query = {'_id': 0, f'users.$': 1}
document = self.mongoDataBase.get_document(database_name='tbot', collection_name='chats',
filter={'chat_id': chat.id}, query=query)
query = ""
query_filter = "empty"
stats = []
async for member in self.pyrogramBot.user.iter_chat_members(chat_id=chat.id):
messages_count = await self.pyrogramBot.user.search_messages_count(chat_id=chat.id,
from_user=member.user.id,
query=query, filter=query_filter)
user = member.user
try:
voice_stats = document['users'][f'{user.id}']['stats']
seconds = 0
for voicetime in voice_stats['voicetime']:
seconds += voicetime
seconds = round(seconds)
# date = datetime.timedelta(seconds=seconds)
except(IndexError, KeyError, TypeError):
seconds = 0
xp = (messages_count * message_xp) + ((seconds // 60) * voice_xp)
stat = (user, messages_count, seconds, xp)
stats.append(stat)
stats.sort(reverse=True, key=lambda x: x[3])
# [0:10:2] # start 0, stop: 10, step:2
# [0:9] # start 0, stop: 9, step:1
# [9:] # start 9, stop: end of string, step:1
# [9::2] # start 9, stop: end of string, step:2
# [::2] # start 0, stop: end of string, step:2
# top 10 users
top_list = "{top_members_text} {chat_title}\n\n".format(top_members_text=_("Top members of"),
chat_title=chat.title)
i = 0
for user, messages_count, seconds, xp in stats[0:10]:
date = datetime.timedelta(seconds=seconds)
i += 1
user_mention = f"[@{user.username}](tg://user?id={user.id})"
top_list = "{top_list}{i}.{user_mention} {xp_text}: {xp}\n{messages_text}: {messages_count} | {voice_time_text}: {voice_time}\n".format(
top_list=top_list,
i=i,
user_mention=user_mention,
xp_text=_("xp"),
xp=round(xp),
messages_text=_("messages"),
messages_count=messages_count,
voice_time_text=_("voice time"),
voice_time=date)
# top_list = f"{top_list}{i}.{user_mention} {_('xp')}: {round(xp)} {_('messages')}: {messages_count} {_('voice time')}: {date}\n"
top_list = "{top_list}\n\n{message_xp} {xp_per_messaage_text}\n{voice_xp} {xp_per_voice_second_text}".format(
top_list=top_list,
message_xp=message_xp,
xp_per_messaage_text=_("xp per message"),
voice_xp=voice_xp,
xp_per_voice_second_text=_("xp per voice minute"))
# top_list = f"{top_list}\n\n({message_xp}{_('xp per message')} | {voice_xp} {_('xp per voice second')})"
return await self.pyrogramBot.bot.send_message(chat_id=chat.id, text=top_list, disable_notification=True)
# query = ""
# query_filter = "empty"
# Search only for userbots
# messages_count = await self.pyrogramBot.user.search_messages_count(chat_id=chat.id, from_user=user.id,
# query=query, filter=query_filter)
# stat = (user.username, messages_count)
# print(user.username, messages_count)
# return
# print(messages_count)
"""
query = {'_id': 0, f'users.{user.id}.messages_count': 1, f'users.{user.id}.voice_time': 1}
document = self.mongoDataBase.get_document(database_name='tbot', collection_name='chats',
filter={'chat_id': message.chat.id}, query=query)
try:
info = document['users'][f'{user.id}']
except(IndexError, KeyError, TypeError):
user_mention = f"[@{user.username}](tg://user?id={user.id})"
return await self.pyrogramBot.bot.send_message(chat_id=message.chat.id,
text=f"✖️No stats found about {user_mention}")
user_mention = f"[@{user.username}](tg://user?id={user.id})"
return await self.pyrogramBot.bot.send_message(chat_id=message.chat.id,
text=f"{user_mention} stats:\n"
f"Messages: {info.get('messages_count')}\n"
f"Voice time: {info.get('voice_time')} minutes")
"""
# ------------------------------------------------------------------------------------------------------------------
# RAW UPDATE HANDLER -----------------------------------------------------------------------------------------------
# ------------------------------------------------------------------------------------------------------------------
async def raw_update_handler(self, client: Client, update: base.Update, users: dict, chats: dict):
"""
Raw Updates Handler
# MIN_CHANNEL_ID = -1002147483647
# MAX_CHANNEL_ID = -1000000000000
# MIN_CHAT_ID = -2147483647
# MAX_USER_ID_OLD = 2147483647
# MAX_USER_ID = 999999999999
# MAX - ID = NEW ID
"""
# (client.get_chat_member(userid)).status - member admin permissions
# APRIL FOOLS --------------------------------------------------------------------------------------------------
"""
if isinstance(update, raw_types.UpdateNewChannelMessage):
update: raw_types.UpdateNewChannelMessage
# self.pyrogramBot.bot: pyrogram.Client
# chat = list(chats.items())[0][1]
# chat_id = -1000000000000 - chat.id
# user = list(users.items())[0][1]
if random.randint(1, 4) == 1:
# print(1)
chat_id = -1000000000000 - update.message.peer_id.channel_id
message_id = update.message.id
emoji = "🎉🔥🤮🤯👍👎💩🤩😱😁🤬😢🥰👏❤🤔"
# "️"
# print(message_id, chat_id, update)
# print(message_id, chat_id, update)
while True:
try:
if await self.pyrogramBot.user.send_reaction(chat_id=chat_id, message_id=message_id, emoji=random.choice(emoji)):
return
except errors.FloodWait as e:
await asyncio.sleep(e.x)
except Exception as e:
return print(e)
# chat_member = await self.pyrogramBot.bot.get_chat_member(chat_id=chat_id, user_id='me')
"""
# --------------------------------------------------------------------------------------------------------------
# TODO add handle of another update types
# TODO make log file of the day or after some time
if isinstance(update, raw_types.update_group_call.UpdateGroupCall):
update: raw_types.update_group_call.UpdateGroupCall
# chat = list(chats.items())[0][1]
# chat_member = await self.pyrogramBot.bot.get_chat_member(chat_id=-1000000000000 - chat.id, user_id='me')
# if chat_member.status != 'administrator':
# raise ContinuePropagation
try:
version = update.call.version
if version == 1:
query = {f'call_id': update.call.id}
return self.mongoDataBase.update_field(database_name='tbot', collection_name='chats', action='$set',
filter={'chat_id': -1000000000000 - update.chat_id},
query=query)
except AttributeError:
query = {f'call_id': 1}
return self.mongoDataBase.update_field(database_name='tbot', collection_name='chats',
action='$unset',
filter={'chat_id': -1000000000000 - update.chat_id},
query=query)
# query = {f'call_id': update.call.id}
# return self.mongoDataBase.update_field(database_name='tbot', collection_name='chats',
# action='$setOnInsert', filter={'chat_id': update.chat_id},
# query=query)
if isinstance(update, raw_types.update_group_call_participants.UpdateGroupCallParticipants):
update: raw_types.update_group_call_participants.UpdateGroupCallParticipants
for participant in update.participants:
participant: raw_types.GroupCallParticipant
if participant.left:
voicetime = time.time() - participant.date
user = list(users.items())[0][1]
# getGroupCall = phone.GetGroupCall(call=update.call, limit=1)
# groupCall = await self.pyrogramBot.user.send(getGroupCall)
# groupCall: raw_types.GroupCall
# print(groupCall)
# chat = await self.pyrogramBot.bot.get_chat(chat_id=groupCall.title)
query = {f'users.{user.id}.stats.datetime': datetime.datetime.now(),
f'users.{user.id}.stats.voicetime': voicetime}
return self.mongoDataBase.update_field(database_name='tbot', collection_name='chats',
action='$push', filter={'call_id': update.call.id},
query=query, upsert=False)
"""
try:
# UpdateNewChannelMessage
if isinstance(update, raw_types.update_new_channel_message.UpdateNewChannelMessage):
update: raw_types.update_new_channel_message.UpdateNewChannelMessage
message = update.message
if isinstance(message, raw_types.Message):
message: raw_types.Message
user = list(users.items())[0][1]
channel = list(chats.items())[0][1]
# chats[id_of_chat]
if isinstance(user, raw_types.User):
user: raw_types.User
else:
raise ContinuePropagation
if isinstance(channel, raw_types.Channel):
channel: raw_types.Channel
else:
raise ContinuePropagation
else:
# FIXME
raise ContinuePropagation
'''
for user_id, user in users.items():
if isinstance(user, raw_types.User):
user: raw_types.User
for chat_id, chat in chats.items():
if isinstance(chat, raw_types.Channel):
chat: raw_types.Channel
'''
query = {f'users.{user.id}.messages_count': 1, f'users.{user.id}.voice_time': 0}
channel_id = -1000000000000 - channel.id
self.mongoDataBase.update_field(database_name='tbot', collection_name='chats', action='$inc',
filter={'chat_id': channel_id}, query=query)
else:
raise ContinuePropagation
except Exception:
# TODO Exceptions
pass
# update.continue_propagation()
raise ContinuePropagation
"""
# ------------------------------------------------------------------------------------------------------------------
# Something else ---------------------------------------------------------------------------------------------------
# ------------------------------------------------------------------------------------------------------------------
# url = urls.split('&')
# for item in url:
# print(item)
# if info['duration'] >= YoutubeHelper.max_track_duration:
# return None
# if info['duration'] <= 0.1:
# return None
# if 'entries' in info:
# for video in info['entries']:
# url = video['url']
# url = video['formats']['480p']['url']
# else:
# url = info['url']
# url = info['formats']['url']
# Source = "https://www.youtube.com/watch?v=A7k2CMRBq4U"
# with youtube_dl.YoutubeDL(dict(forceurl=True)) as ydl:
# r = ydl.extract_info(Source, download=False)
# media_url = r['formats'][-1]['webhook_url']
# print(media_url)
# SOURCE = "https://www.youtube.com/watch?v=p0lbm5J0uUs"
# video = pafy.new(SOURCE)
# source = video.getbest().url
# source = "https://cf-media.sndcdn.com/UPc0t917g9OV.128.mp3?Policy=eyJTdGF0ZW1lbnQiOlt7IlJlc291cmNlIjoiKjovL2NmLW1lZGlhLnNuZGNkbi5jb20vVVBjMHQ5MTdnOU9WLjEyOC5tcDMqIiwiQ29uZGl0aW9uIjp7IkRhdGVMZXNzVGhhbiI6eyJBV1M6RXBvY2hUaW1lIjoxNjM1ODIyNDA1fX19XX0_&Signature=FdEBG9g8KB4JCLZmYWbPkCyD-QJJNrae5l4R1iyR6a96iLmBrDafuDKttVUIp0HvH5N5Sj6ez6AsHwfCIH6ZoxdvElrLCGs9YxGYsH8uSLUo7a8r74VbMY9V-XFxRLQCusIhxjrJocwATxhG-brwQELjnuOaNtWZHbEN7RRto9L-99jyVJN-6gDd-oAB5Sh8y3EGunfebhAU1hAqf-YFG1Ue1oSvvKzQEhqjECx3RI0UUvzeyKkSd3srskqh-klzazZ0fcSBzlTvUQlrrHtIistwtgSxWK65WaI4qLluHrg8y8j-K2S6kVUMwBQfTKVrybOLwq5M~R9Qx7TNUjKEaA__&Key-Pair-Id=APKAI6TU7MMXM5DG6EPQ"
# @staticmethod
# def on_played_data(self, gc, length, fifo=av.AudioFifo(format='s16le')):
# data = fifo.read(length / 4)
# if data:
# data = data.to_ndarray().tobytes()
# return data
# async def start(self, user_bot, message: Message):
# group_call_factory = GroupCallFactory(user_bot, GroupCallFactory.MTPROTO_CLIENT_TYPE.PYROGRAM)
# group_call_raw = group_call_factory.get_raw_group_call(on_played_data=self.on_played_data)
# group_call_raw.play_on_repeat = False
# if not group_call_raw.is_connected:
# await group_call_raw.start(message.chat.id)
# else:
# await message.reply_text(text="I'm already connected")
|
from django.db import models
# Create your models here.
class Todo(models.Model):
ident = models.CharField(max_length=50, primary_key=True)
desc = models.CharField(max_length=500)
group = models.IntegerField(default=0)
schedule_date = models.DateField(blank=False)
finish_date = models.DateField(null=True)
remind_type = models.IntegerField(default=0)
remind_date = models.DateTimeField(null=True)
icon_index = models.IntegerField(default=0)
status = models.IntegerField(default=0)
user_id = models.IntegerField(blank=False)
last_modified = models.DateTimeField(blank=False)
def to_dict(self):
dict = {}
dict["ident"] = self.ident
dict["desc"] = self.desc
dict["group"] = self.group
dict["scheduleDate"] = self.schedule_date
dict["finishDate"] = self.finish_date
dict["remindType"] = self.remind_type
dict["remindDate"] = None if (self.remind_date == None) else self.remind_date.strftime("%Y-%m-%d %H:%M:%S")
dict["iconIndex"] = self.icon_index
dict["status"] = self.status
dict["userId"] = self.user_id
dict["lastModified"] = self.last_modified.strftime("%Y-%m-%d %H:%M:%S")
return dict
class Goal(models.Model):
ident = models.CharField(max_length=50, primary_key=True)
title = models.CharField(max_length=500)
start_date = models.DateField(blank=False)
end_date = models.DateField(blank=False)
content = models.TextField(null=True)
completeness = models.IntegerField(default=0)
status = models.IntegerField(default=0)
delete_status = models.IntegerField(default=0)
user_id = models.IntegerField(blank=False)
last_modified = models.DateTimeField(blank=False)
def to_dict(self):
dict = {}
dict["ident"] = self.ident
dict["title"] = self.title
dict["startDate"] = self.start_date
dict["endDate"] = self.end_date
dict["content"] = self.content
dict["completeness"] = self.completeness
dict["status"] = self.status
dict["deleteStatus"] = self.delete_status
dict["userId"] = self.user_id
dict["lastModified"] = self.last_modified.strftime("%Y-%m-%d %H:%M:%S")
return dict |
import argparse
from mmap import *
from pathlib import Path
from ctypes import *
from .common import *
class DisplayVariable(BigEndianStructure):
_pack_ = 1
_fields_ = [("valid", c_uint8),
("type", c_uint8),
("sp_word", c_uint16),
("desc_len_words", c_uint16),
("vp_word", c_uint16)]
type_map = {}
def __init_subclass__(cls) -> None:
cls.type_map[cls.type_code] = cls
def get_subclass(self) -> object:
return self.type_map[self.type]
def __new__(cls, buf, off):
return cls.from_buffer(buf, off)
def __init__(self, buf, off) -> None:
assert self.valid == 0x5a, f'bad magic: 0x{self.valid:02x} off 0x{off:x}'
if self.__class__ is not DisplayVariable:
assert sizeof(self) == 0x20, '{} has bad size 0x{:x}'.format(self.__class__.__name__, sizeof(self))
assert self.sp_word == 0xffff, f'SP not supported yet: 0x{self.sp_word:04x} off 0x{off:x}'
self.vp = VP(self.vp_word)
self.pic = Pic(off // 0x800)
def __str__(self) -> str:
return '{} {} {:<7} {}'.format(self.pic, self.area, self.__class__.__name__, self.vp)
class Icon(DisplayVariable):
type_code = 0x00
_pack_ = 1
_fields_ = [("pos", Coord),
("val_min", c_uint16),
("val_max", c_uint16),
("icon_min", c_uint16),
("icon_max", c_uint16),
("icon_lib", c_uint8),
("opaque", Bool),
("_reserved", c_uint8 * 10)]
def __init__(self, buf, off) -> None:
super().__init__(buf, off)
# would need to parse icons to get real area
self.area = Area(self.pos, self.pos)
self.vp.set_type(VP_Type.WORD)
class ImageAnimation(DisplayVariable):
type_code = 0x04
_pack_ = 1
_fields_ = [("pic_begin", Pic),
("pic_end", Pic),
("frame_time_8ms", c_uint8),
("_reserved", c_uint8 * 0x13)]
def __init__(self, buf, off) -> None:
super().__init__(buf, off)
self.vp.set_type(VP_Type.NONE)
def __str__(self) -> str:
return '{} {} {} to {} every {} ms'.format(
self.pic,
self.__class__.__name__,
self.pic_begin,
self.pic_end,
self.frame_time_8ms * 8)
class Slider(DisplayVariable):
type_code = 0x02
_pack_ = 1
_fields_ = [("val_min", c_uint16),
("val_max", c_uint16),
("xy_begin", Position),
("xy_end", Position),
("icon", c_uint16),
("yx", Position),
("adj_left_top", c_uint8),
("vertical", Bool),
("icon_lib", c_uint8),
("opaque", Bool),
("vp_format", c_uint16),
("_reserved", c_uint8 * 6)]
def __init__(self, buf, off) -> None:
super().__init__(buf, off)
if self.vertical:
self.pos = Coord(self.yx, self.xy_begin)
self.end = Coord(self.yx, self.xy_end)
else:
self.pos = Coord(self.xy_begin, self.yx)
self.end = Coord(self.xy_end, self.yx)
#TODO: take adj_left_top into account?
assert self.adj_left_top == 0
self.area = Area(self.pos, self.end)
self.vp.set_from_vp_format_standard(self.vp_format)
class BitIcon(DisplayVariable):
type_code = 0x06
_pack_ = 1
_fields_ = [("vp_aux_ptr_word", c_uint16), # also called AP, 2 words
("bitmask", c_uint16),
("mode", c_uint8),
("arrangement", c_uint8),
("opaque", Bool),
("icon_lib", c_uint8),
("icon0s", c_uint16),
("icon0e", c_uint16),
("icon1s", c_uint16),
("icon1e", c_uint16),
("pos", Coord),
("spacing", Position),
("_reserved", c_uint8 * 2)]
def __init__(self, buf, off) -> None:
super().__init__(buf, off)
# multiple spaced icons aren't supported yet
assert bin(self.bitmask).count('1') == 1
assert int(self.spacing) == 0, self.spacing
# would need to parse icons to get real area
self.area = Area(self.pos, self.pos)
# compute size based on self.bitmask
if bin(self.bitmask).count('1') == 1:
self.vp.set_type(VP_Type.BIT, bit=self.bitmask.bit_length() - 1)
elif self.bitmask & 0xff == 0:
self.vp.set_type(VP_Type.BYTE, low_byte=False)
elif self.bitmask & 0xff00 == 0:
self.vp.set_type(VP_Type.BYTE, low_byte=True)
self.ap = VP(self.vp_aux_ptr_word)
self.ap.size = 4
#assert self.vp_aux_ptr_word == self.vp_word + 1
#self.vp_size = 6
class Numeric(DisplayVariable):
type_code = 0x10
_pack_ = 1
_fields_ = [("text_pos", Coord),
("color", Color),
("font", c_uint8),
("x_px", c_uint8),
("alignment", c_uint8),
("int_digits", c_uint8),
("dec_digits", c_uint8),
("vp_format", c_uint8),
("suffix_len", c_uint8),
("_suffix", c_char * 11)]
def num_chars(self):
decimalpoint = 1 if self.dec_digits > 0 else 0
return min(self.int_digits, 1) + self.dec_digits + decimalpoint
def __init__(self, buf, off) -> None:
super().__init__(buf, off)
self.suffix = self._suffix[:self.suffix_len].decode('ascii')
self.y_px = self.x_px * 2
self.area = Area(self.text_pos, self.text_pos)
self.area.end.y += self.y_px
self.area.end.x += self.x_px * self.num_chars()
self.vp.set_from_vp_format_numeric(self.vp_format)
def __str__(self) -> str:
return '{} {}.{} digits {}x{}px suffix \'{}\' {}'.format(
super().__str__(),
self.int_digits, self.dec_digits,
self.x_px, self.y_px,
self.suffix,
self.color)
class Text(DisplayVariable):
type_code = 0x11
_pack_ = 1
_fields_ = [("text_pos", Coord),
("color", Color),
("area", Area),
("length", c_uint16),
("font_ascii", c_uint8),
("font_nonascii", c_uint8), # documentation is ambiguous
("x_px", c_uint8),
("y_px", c_uint8),
("monospace", Bool, 1),
("encoding", c_uint8, 7),
("x_kerning_px", c_uint8),
("y_tracking_px", c_uint8),
("_reserved", c_uint8)]
def __init__(self, buf, off) -> None:
super().__init__(buf, off)
self.vp.set_type(VP_Type.TEXT, len=self.length)
def __str__(self) -> str:
return '{} {:2} chars {}x{}px {} {}'.format(
super().__str__(),
self.vp.size,
self.x_px, self.y_px,
"monospace" if self.monospace else "variable",
self.color)
class Curve(DisplayVariable):
type_code = 0x20
_pack_ = 1
_fields_ = [("area", Area),
("y_center", Position),
("value_center", c_uint16),
("color", Color),
("_y_scale256th", c_uint16),
("channel", c_uint8),
("x_spacing", c_uint8),
("_reserved", c_uint8 * 6)]
def __init__(self, buf, off) -> None:
super().__init__(buf, off)
self.vp.set_type(VP_Type.NONE)
self.y_scale = self._y_scale256th / 256
def __str__(self) -> str:
return '{} y_center {}@{} scale {}x{:.03f} channel {} {}'.format(
super().__str__(),
self.value_center, self.y_center,
self.x_spacing, self.y_scale,
self.channel,
self.color)
class Parser:
@staticmethod
def make_class(mm, off) -> object:
touch = DisplayVariable(mm, off)
while True:
t = touch.get_subclass()
if t == touch.__class__:
return touch
touch = t(mm, off)
def __init__(self, dirname):
d = Path(dirname) / 'DWIN_SET'
filename = next(d.glob('14*.bin'))
with open(filename, 'r+b') as f:
# cannot be read-only if we want to use the buffer directly
#mm = mmap(f.fileno(), 0, access=ACCESS_READ)
self.mm = mmap(f.fileno(), 0)
# print(filename, 'len', len(self.mm))
def __iter__(self):
off = 0
while off < len(self.mm):
if 0x00 == self.mm[off]:
off += 0x20
continue
# print('off: {:04x}'.format(off))
t = self.make_class(self.mm, off)
off += sizeof(t)
yield t
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('basedir', nargs='?', type=Path, default='../dgusm')
args = parser.parse_args()
for c in Parser(args.basedir):
print(c)
|
"""
Copyright (c) 2016-2020 The scikit-optimize developers.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
NOTE: Changes were made to the scikit-optimize source code included here.
For the most recent version of scikit-optimize we refer to:
https://github.com/scikit-optimize/scikit-optimize/
Copyright (c) 2019-2020 Alexander Thebelt.
"""
import numpy as np
import pytest
from numpy.testing import assert_array_equal
from numpy.testing import assert_equal
from numpy.testing import assert_raises
from entmoot import entmoot_minimize
from entmoot.benchmarks import Rosenbrock, SimpleCat
from entmoot.learning import EntingRegressor
from entmoot.optimizer import Optimizer
from scipy.optimize import OptimizeResult
ESTIMATOR_STRINGS = ("GBRT", "RF")
STD_ESTIMATORS = ("BDD","L1BDD","DDP","L1DDP")
ACQ_FUNCS = ["LCB"]
ACQ_OPTIMIZER = ["sampling", "global"]
@pytest.mark.fast_test
def test_multiple_asks():
# calling ask() multiple times without a tell() inbetween should
# be a "no op"
bench1 = Rosenbrock()
opt = Optimizer(bench1.get_bounds(2), "ENTING", n_initial_points=10,
acq_optimizer="sampling",
base_estimator_kwargs={"min_child_samples":2})
opt.run(bench1, n_iter=13)
# ask() computes the next point ready for the next call to ask()
# hence there are three after three iterations
assert_equal(len(opt.models), 3)
assert_equal(len(opt.Xi), 13)
opt.ask()
# training happens in ask(), that's why there's one more model now
assert_equal(len(opt.models), 4)
assert_equal(len(opt.Xi), 13)
assert_equal(opt.ask(), opt.ask())
opt.update_next()
assert_equal(opt.ask(), opt.ask())
@pytest.mark.fast_test
def test_model_queue_size():
# Check if model_queue_size limits the model queue size
bench1 = Rosenbrock()
opt = Optimizer(bench1.get_bounds(2), "ENTING", n_initial_points=10,
acq_optimizer="sampling", model_queue_size=2,
base_estimator_kwargs={"min_child_samples":2})
opt.run(bench1, n_iter=13)
# tell() computes the next point ready for the next call to ask()
# hence there are three after three iterations
assert_equal(len(opt.models), 2)
assert_equal(len(opt.Xi), 13)
opt.ask()
assert_equal(len(opt.models), 2)
assert_equal(len(opt.Xi), 13)
assert_equal(opt.ask(), opt.ask())
opt.update_next()
assert_equal(opt.ask(), opt.ask())
@pytest.mark.fast_test
def test_invalid_tell_arguments():
bench1 = Rosenbrock()
opt = Optimizer(bench1.get_bounds(2), "ENTING", n_initial_points=10,
acq_optimizer="sampling", model_queue_size=2,
base_estimator_kwargs={"min_child_samples":2})
# can't have single point and multiple values for y
assert_raises(ValueError, opt.tell, [1.], [1., 1.])
@pytest.mark.fast_test
def test_invalid_tell_arguments_list():
bench1 = Rosenbrock()
opt = Optimizer(bench1.get_bounds(2), "ENTING", n_initial_points=10,
acq_optimizer="sampling", model_queue_size=2,
base_estimator_kwargs={"min_child_samples":2})
assert_raises(ValueError, opt.tell, [[1.], [2.]], [1., None])
@pytest.mark.fast_test
def test_bounds_checking_1D():
low = -2.
high = 2.
opt = Optimizer([(low, high)], "ENTING", n_initial_points=1,
acq_optimizer="sampling")
assert_raises(ValueError, opt.tell, [high + 0.5], 2.)
assert_raises(ValueError, opt.tell, [low - 0.5], 2.)
# feed two points to tell() at once
assert_raises(ValueError, opt.tell, [high + 0.5, high], (2., 3.))
assert_raises(ValueError, opt.tell, [low - 0.5, high], (2., 3.))
@pytest.mark.fast_test
def test_bounds_checking_2D():
low = -2.
high = 2.
opt = Optimizer([(low, high)], "ENTING", n_initial_points=1,
acq_optimizer="sampling")
assert_raises(ValueError, opt.tell, [high + 0.5, high + 4.5], 2.)
assert_raises(ValueError, opt.tell, [low - 0.5, low - 4.5], 2.)
# first out, second in
assert_raises(ValueError, opt.tell, [high + 0.5, high + 0.5], 2.)
assert_raises(ValueError, opt.tell, [low - 0.5, high + 0.5], 2.)
@pytest.mark.fast_test
def test_bounds_checking_2D_multiple_points():
low = -2.
high = 2.
opt = Optimizer([(low, high)], "ENTING", n_initial_points=1,
acq_optimizer="sampling")
# first component out, second in
assert_raises(ValueError, opt.tell,
[(high + 0.5, high + 0.5), (high + 0.5, high + 0.5)],
[2., 3.])
assert_raises(ValueError, opt.tell,
[(low - 0.5, high + 0.5), (low - 0.5, high + 0.5)],
[2., 3.])
@pytest.mark.fast_test
def test_dimension_checking_1D():
low = -2
high = 2
opt = Optimizer([(low, high)], "ENTING", n_initial_points=1,
acq_optimizer="sampling")
with pytest.raises(ValueError) as e:
# within bounds but one dimension too high
opt.tell([low+1, low+1], 2.)
assert "Dimensions of point " in str(e.value)
@pytest.mark.fast_test
def test_dimension_checking_2D():
low = -2
high = 2
opt = Optimizer([(low, high), (low, high)], "ENTING", n_initial_points=10,
acq_optimizer="sampling")
# within bounds but one dimension too little
with pytest.raises(ValueError) as e:
opt.tell([low+1, ], 2.)
assert "Dimensions of point " in str(e.value)
# within bounds but one dimension too much
with pytest.raises(ValueError) as e:
opt.tell([low+1, low+1, low+1], 2.)
assert "Dimensions of point " in str(e.value)
@pytest.mark.fast_test
def test_dimension_checking_2D_multiple_points():
low = -2
high = 2
opt = Optimizer([(low, high), (low, high)])
# within bounds but one dimension too little
with pytest.raises(ValueError) as e:
opt.tell([[low+1, ], [low+1, low+2], [low+1, low+3]], 2.)
assert "dimensions as the space" in str(e.value)
# within bounds but one dimension too much
with pytest.raises(ValueError) as e:
opt.tell([[low + 1, low + 1, low + 1], [low + 1, low + 2],
[low + 1, low + 3]], 2.)
assert "dimensions as the space" in str(e.value)
@pytest.mark.consistent
@pytest.mark.parametrize("acq_optimizer", ACQ_OPTIMIZER)
def test_result_rosenbrock(acq_optimizer):
bench1 = Rosenbrock()
opt = Optimizer(bench1.get_bounds(2), "ENTING", n_initial_points=10,
acq_optimizer=acq_optimizer,
base_estimator_kwargs={"min_child_samples":2},
random_state=100)
opt.run(bench1, n_iter=20, no_progress_bar=True)
if acq_optimizer == "sampling":
res_known = np.array(
[[0.701053475292035, -0.36025795213264167],
[0.10806258681567815, -1.4393145750513043],
[-1.4061114159677395, -1.2842304177627843],
[-1.1873986869551771, -0.19357735788859287],
[1.5161042350797533, -1.7871624483849504],
[0.5091814944214517, 0.09563236039209677],
[0.25516550013357264, -2.0241727838227086],
[-0.7887945257988347, 1.843954950307714],
[-1.5292242379168393, -1.7244656733329853],
[-0.7726975624367143, 0.5422431772370664],
[-0.26851521090965313, -0.27168208839378893],
[0.18559023118643525, -0.30091277104839054],
[2.037449372031385, -0.003908949645985427],
[0.3618262062035891, -0.886575853445807],
[-0.70099499312817, -0.013753379624784401],
[0.3074119029928717, 1.3227888228213858],
[0.43370695792140035, -0.5401762255031577],
[0.060723595879015324, 2.0360116729103517],
[-0.8891589013732526, -0.2756841022715071],
[-0.5803197996462619, -0.6720516369715639]]
)
elif acq_optimizer == "global":
res_known = np.array(
[[0.701053475292035, -0.36025795213264167],
[0.10806258681567815, -1.4393145750513043],
[-1.4061114159677395, -1.2842304177627843],
[-1.1873986869551771, -0.19357735788859287],
[1.5161042350797533, -1.7871624483849504],
[0.5091814944214517, 0.09563236039209677],
[0.25516550013357264, -2.0241727838227086],
[-0.7887945257988347, 1.843954950307714],
[-1.5292242379168393, -1.7244656733329853],
[-0.7726975624367143, 0.5422431772370664],
[-0.27264284694027374, -0.27692000000000005],
[0.191375558305514, -0.31858999999999993],
[2.048, 0.0],
[0.3821699999999999, -0.896229816148446],
[-0.7085110933262221, 0.0],
[-0.8912055180959243, -0.23524999999999974],
[0.0, 1.2022767998385142],
[0.31867, 2.048],
[0.4353459945606931, -0.5351577612613215],
[-0.05614795431522378, 0.4412061957063193]]
)
res = opt.get_result()
assert_array_equal(res.x_iters, res_known)
@pytest.mark.consistent
@pytest.mark.parametrize("acq_optimizer", ACQ_OPTIMIZER)
def test_result_rosenbrock(acq_optimizer):
bench1 = SimpleCat()
opt = Optimizer(bench1.get_bounds(), "ENTING", n_initial_points=5,
acq_optimizer=acq_optimizer,
base_estimator_kwargs={"min_child_samples":2},
random_state=100)
opt.run(bench1, n_iter=10, no_progress_bar=True)
if acq_optimizer == "sampling":
res_known = np.array(
[[0.6846225344648778, -0.35181440637953276, 'pow2'],
[-1.4055806396985395, -1.3731556796559956, 'mult6'],
[-1.159569030229665, -0.18904038856307892, 'pow2'],
[-1.7452758285009282, 0.4972475531459488, 'pow2'],
[0.24918505872419194, -1.9767312342018637, 'mult6'],
[1.9533151312376096, 1.9911019960902978, 'mult6'],
[-0.30869536630063044, -0.8116308097286711, 'mult6'],
[1.9832830176095446, -1.6137030366593792, 'pow2'],
[-0.7534228429864178, -1.9474210829020842, 'mult6'],
[-1.9397005641914857, -1.9937770993390025, 'mult6']]
)
elif acq_optimizer == "global":
res_known = np.array(
[[0.6846225344648778, -0.35181440637953276, 'pow2'],
[-1.4055806396985395, -1.3731556796559956, 'mult6'],
[-1.159569030229665, -0.18904038856307892, 'pow2'],
[-1.7452758285009282, 0.4972475531459488, 'pow2'],
[0.24918505872419194, -1.9767312342018637, 'mult6'],
[2.0, 2.0, 'mult6'],
[-0.26728431683174625, -0.8793390813532928, 'mult6'],
[2.0, -1.6258926741796051, 'pow2'],
[-0.7295094146491492, -2.0, 'mult6'],
[-2.0, -2.0, 'mult6']]
)
res = opt.get_result()
assert_array_equal(res.x_iters, res_known) |
_base_ = '../faster_rcnn/faster_rcnn_x101_64x4d_fpn_1x_coco.py'
model = dict(
roi_head=dict(
bbox_head=dict(
num_classes=22
)
)
)
dataset_type = 'CaltechDataset'
classes = ('bobcat', 'opossum', 'empty', 'coyote', 'racoon', 'bird', 'dog', 'cat', 'squirrel', 'rabbit', 'skunk', 'lizard', 'rodent', 'badger', 'deer', 'cow', 'car', 'fox', 'pig','mountain_lion', 'bat', 'insect')
data = dict(
train=dict(
type='CaltechDataset',
img_prefix='/media/SSD2project/WilLiCam/datasets/caltech/cct_images',
#img_prefix='/media/Pool/Thesis/Datensets/cct_images',
ann_file='customDataCaltech/caltech_adv/eccv_train_adv.json'),
val=dict(
type='CaltechDataset',
img_prefix='/media/SSD2project/WilLiCam/datasets/caltech/cct_images',
#img_prefix='/media/Pool/Thesis/Datensets/cct_images',
classes=classes,
ann_file='customDataCaltech/caltech_adv/eccv_val_adv.json'),
test=dict(
type='CaltechDataset',
img_prefix='/media/SSD2project/WilLiCam/datasets/caltech/cct_images',
#img_prefix='/media/Pool/Thesis/Datensets/cct_images',
classes=classes,
ann_file='customDataCaltech/caltech_adv/eccv_val_TEST_adv.json'))
#http://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_x101_64x4d_fpn_1x_coco/faster_rcnn_x101_64x4d_fpn_1x_coco_20200204-833ee192.pth
load_from = 'checkpoints/faster_rcnn_x101_64x4d_fpn_1x_coco_20200204-833ee192.pth'
work_dir = '/media/SSD2project/WilLiCam/checkpoint_workdir/faster_rcnn_x101_64x4d_fpn_1x_coco_from_coco'
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# CarMonitor EnviroPHAT Polling Thread and LED Interface
#
# Author: Patrick Schmidt <[email protected]>
# License: Apache License, Version 2.0
#
# See: https://shop.pimoroni.de/products/enviro-phat
# See: https://learn.pimoroni.com/tutorial/sandyj/getting-started-with-enviro-phat
# See: https://learn.pimoroni.com/tutorial/sandyj/getting-started-with-enviro-phat
# See: http://docs.pimoroni.com/envirophat/
#
from envirophat import weather, leds, motion
import threading
import time
class EnviroPoller(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.stopRequest = threading.Event()
self.enviroData = None
self.ledStatus = True
def run(self):
print '[CarMonitor::EnviroPoller] Starting...'
try:
while not self.stopRequest.isSet():
temperature = weather.temperature()
pressure = weather.pressure()
accelerometer = motion.accelerometer()
magnetometer = motion.magnetometer()
heading = motion.heading()
self.enviroData = {
'temperature': temperature,
'pressure': pressure,
'accelerometer': {'x': accelerometer.x, 'y': accelerometer.y, 'z': accelerometer.z },
'magnetometer': {'x': magnetometer.x, 'y': magnetometer.y, 'z': magnetometer.z },
'heading': heading
}
time.sleep(.5)
except StopIteration:
pass
def getData(self):
return self.enviroData
def ledOn(self):
self.ledStatus = True
leds.on()
def ledOff(self):
self.ledStatus = False
leds.off()
def ledToggle(self):
if self.ledStatus:
self.ledOff()
else:
self.ledOn()
def join(self, timeout=None):
self.stopRequest.set()
print '[CarMonitor::EnviroPoller] Stopping...'
super(EnviroPoller, self).join(10)
|
# Copyright 2014 Facebook, Inc.
# You are hereby granted a non-exclusive, worldwide, royalty-free license to
# use, copy, modify, and distribute this software in source code or binary
# form for use in connection with the web services and APIs provided by
# Facebook.
# As with any software that integrates with the Facebook platform, your use
# of this software is subject to the Facebook Developer Principles and
# Policies [http://developers.facebook.com/policy/]. This copyright notice
# shall be included in all copies or substantial portions of the software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from facebook_business.adobjects.abstractobject import AbstractObject
from facebook_business.adobjects.abstractcrudobject import AbstractCrudObject
from facebook_business.adobjects.objectparser import ObjectParser
from facebook_business.api import FacebookRequest
from facebook_business.typechecker import TypeChecker
"""
This class is auto-generated.
For any issues or feature requests related to this class, please let us know on
github and we'll fix in our codegen framework. We'll not be able to accept
pull request for this class.
"""
class AdContract(
AbstractCrudObject,
):
def __init__(self, fbid=None, parent_id=None, api=None):
self._isAdContract = True
super(AdContract, self).__init__(fbid, parent_id, api)
class Field(AbstractObject.Field):
account_id = 'account_id'
account_mgr_fbid = 'account_mgr_fbid'
account_mgr_name = 'account_mgr_name'
adops_person_name = 'adops_person_name'
advertiser_address_fbid = 'advertiser_address_fbid'
advertiser_fbid = 'advertiser_fbid'
advertiser_name = 'advertiser_name'
agency_discount = 'agency_discount'
agency_name = 'agency_name'
bill_to_address_fbid = 'bill_to_address_fbid'
bill_to_fbid = 'bill_to_fbid'
campaign_name = 'campaign_name'
created_by = 'created_by'
created_date = 'created_date'
customer_io = 'customer_io'
io_number = 'io_number'
io_terms = 'io_terms'
io_type = 'io_type'
last_updated_by = 'last_updated_by'
last_updated_date = 'last_updated_date'
max_end_date = 'max_end_date'
mdc_fbid = 'mdc_fbid'
media_plan_number = 'media_plan_number'
min_start_date = 'min_start_date'
msa_contract = 'msa_contract'
payment_terms = 'payment_terms'
rev_hold_flag = 'rev_hold_flag'
rev_hold_released_by = 'rev_hold_released_by'
rev_hold_released_on = 'rev_hold_released_on'
salesrep_fbid = 'salesrep_fbid'
salesrep_name = 'salesrep_name'
sold_to_address_fbid = 'sold_to_address_fbid'
sold_to_fbid = 'sold_to_fbid'
status = 'status'
subvertical = 'subvertical'
thirdparty_billed = 'thirdparty_billed'
thirdparty_password = 'thirdparty_password'
thirdparty_uid = 'thirdparty_uid'
thirdparty_url = 'thirdparty_url'
vat_country = 'vat_country'
version = 'version'
vertical = 'vertical'
id = 'id'
_field_types = {
'account_id': 'string',
'account_mgr_fbid': 'string',
'account_mgr_name': 'string',
'adops_person_name': 'string',
'advertiser_address_fbid': 'string',
'advertiser_fbid': 'string',
'advertiser_name': 'string',
'agency_discount': 'float',
'agency_name': 'string',
'bill_to_address_fbid': 'string',
'bill_to_fbid': 'string',
'campaign_name': 'string',
'created_by': 'string',
'created_date': 'unsigned int',
'customer_io': 'string',
'io_number': 'unsigned int',
'io_terms': 'string',
'io_type': 'string',
'last_updated_by': 'string',
'last_updated_date': 'unsigned int',
'max_end_date': 'unsigned int',
'mdc_fbid': 'string',
'media_plan_number': 'string',
'min_start_date': 'unsigned int',
'msa_contract': 'string',
'payment_terms': 'string',
'rev_hold_flag': 'bool',
'rev_hold_released_by': 'int',
'rev_hold_released_on': 'unsigned int',
'salesrep_fbid': 'string',
'salesrep_name': 'string',
'sold_to_address_fbid': 'string',
'sold_to_fbid': 'string',
'status': 'string',
'subvertical': 'string',
'thirdparty_billed': 'unsigned int',
'thirdparty_password': 'string',
'thirdparty_uid': 'string',
'thirdparty_url': 'string',
'vat_country': 'string',
'version': 'unsigned int',
'vertical': 'string',
'id': 'string',
}
@classmethod
def _get_field_enum_info(cls):
field_enum_info = {}
return field_enum_info
|
from twozerofoureight import *
game_board = board()
game_board.generate_newpiece("start")
game_board.generate_newpiece("start")
game_board.print_board("normal")
def move(direction):
original_board = list(game_board.board_list)
if direction == "left":
game_board.move_left_right("left")
elif direction == "right":
game_board.move_left_right("right")
elif direction == "up":
game_board.upkey()
elif direction == "down":
game_board.downkey()
else:
pass
if original_board == game_board.board_list:
game_board.generate_newpiece("checkforloss")
else:
game_board.generate_newpiece("normal")
def play():
key = Arrow.input()
while key != Arrow.NONE:
# print(key)
if key == Arrow.UP:
move("up")
elif key == Arrow.DOWN:
move("down")
elif key == Arrow.LEFT:
move("left")
elif key == Arrow.RIGHT:
move("right")
key = Arrow.input()
print("None of the arrow keys was pressed")
if __name__ == "__main__":
play()
|
# -*- coding: utf-8 -*-
import unittest
import httpreplay
class TestHttp(unittest.TestCase):
def test_request(self):
r = httpreplay.interpret_http("POST / HTTP/1.0\r\nConnection: close\r\n\r\n1234", True)
self.assertEqual(r.body, "1234")
|
import sys
import re
import random
from libs.playerCommandsLib import Commands
from libs.playerCommandsLib import playerCommand
from libs.decs.itemDecs import *
from libs.decs.eventDecs import *
from libs.decs.NpcDecs import *
from libs.decs.playerDec import *
from libs.decs.structureDecs import *
from libs.decs.zoneDecs import *
##Done with the includes
def stringContains(word, phrase):##this guy finds a word in a phrase, and can be asked in a manner consistent with the rest of python.
if(findWord(word)(phrase)):
return True
else:
return False
def findWord(w):##This guy finds if a word is in a phrase, intelligently
return re.compile(r'\b({0})\b'.format(w), flags=re.IGNORECASE).search
def bDeeper(dictValue):##This checks if there i a deeper level of conversation
try:
x = len(dictValue.keys())
return True
except:
return False
def Conversation(Location, Character, NPC, stage, previousStage):##conversation 'scene'. KISS STRIKES AGAIN, WAIT EVENT NOT WORKING
cmd = input(">>Say>>")
if(cmd.lower() == "back" or cmd.lower() == "nevermind"):
print("<<" + NPC.name + "<<" + previousStage["introtext"])
Conversation(Location, Character, NPC, previousStage, NPC.Convo["intro"])
if("bye" in cmd.lower() or stringContains("leave", cmd) == True or "farewell" in cmd.lower()):
print("<<" + NPC.name + "<<" + NPC.Convo["intro"]["goodbye"])
Scene(Location, Character)
for i in stage:
if(stringContains(i, cmd.lower()) == True):
if(bDeeper(stage[i]) == True):
print("<<" + NPC.name + "<<" + stage[i]["introtext"])
if(NPC.bEvent == False):
Conversation(Location, Character, NPC, stage[i], stage)
break
else:
for e in NPC.Event.keys():
if(i in e):
stringToClass(NPC.Event[e]).triggerEvent(Location, Character)
break
else:
Conversation(Location, Character, NPC, stage[i], stage)
break
else:
if(NPC.bEvent == False):
print("<<" + NPC.name + "<<" + stage[i])
Conversation(Location, Character, NPC, stage, previousStage)
break
else:
print("<<" + NPC.name + "<<" + stage[i])
for e in NPC.Event:
if(i in e):##+++++++++++++++++++++++++++++++++++++++++++CLOSER
stringToClass(NPC.Event[e]).triggerEvent(Location, Character)
break
else:
Conversation(Location, Character, NPC, stage, previousStage)
break
else:
print("%s looks confused." % (NPC.name))
print("<<" + NPC.name + "<<" + NPC.Convo["intro"]["none"])
Conversation(Location, Character, NPC, stage, previousStage)
def checkForEvent(Location, Character, caller, situation):##Call this to check if an event should be run.
if(caller.bEvent == True):
if(caller.Trigger == situation):
caller.Event.triggerEvent(Location, Character)
else:
Scene(Location, Character)
else:
Scene(Location, Character)
def ChangeLocation(oldLocation, newLocation, Character):##This moves the player from oldLocation to newLocation. Moves the character class to maintain inventory and stats.
print("You step out of the " + oldLocation.name + " and into " + newLocation.name + ", " + newLocation.description)
if(newLocation.npcs != []):
for c in newLocation.npcs:
print("%s is here." % (stringToClass(c).name))
checkForEvent(newLocation, Character, newLocation, "enterZone")
def Scene(Location, Character):##====This is the current scene. All commands and events should come back to this.
for c in Location.npcs:
if stringToClass(c).bAggressive == True:
print("Battle because %s wants to fight." % (stringToClass(c).name))
print("----------")
Battle(Character, stringToClass(c), Location)
break
else:
print("----------")##clock could go here.
cmd = input(">>>")
for i in Commands:
if(stringContains(i, cmd) == True):
stringToClassDef(playerCommand, i)(Location, Character, cmd)## This is where all player input is passed to the relevant command
else:
print("Command not recognised.")
Scene(Location, Character)
def stringToClass(s):##This is meant to turn strings into class names.
return getattr(sys.modules[__name__], s)
def stringToClassDef(className, defName):##This takes strings and makes them a def name within a class. className.defName is the result. can be handed arguments
return getattr(className, defName)
def enemyAttack(player, enemy, location, bDefend):
atk = random.randint(0, 2)
if(bDefend == False):
if atk == 0:
if(player.Body + random.randint(1, 20) < enemy.Body + random.randint(1, 20)):
Edmg = int(random.randint(0, enemy.HP) / 2)
print(enemy.Attacks["HP"][random.randint(0, len(enemy.Attacks["HP"])-1)])
print("You receive " + str(Edmg) + " HP damage.")
player.HP -= Edmg
print("----------")
if player.HP <= 0:
enemy.bAggressive = False
BattleComplete("playerHP_Lose", player, enemy, location)
else:
Battle(player, enemy, location)
else:
print(enemy.Attacks["HP"][random.randint(0, len(enemy.Attacks["HP"])-1)])
print("You manage to avoid taking damage.")
print("----------")
Battle(player, enemy, location)
if atk == 1:
if(player.Spirit + random.randint(1, 20) < enemy.Spirit + random.randint(1, 20)):
Edmg = int(random.randint(0, enemy.SP) / 2)
print(enemy.Attacks["SP"][random.randint(0, len(enemy.Attacks["SP"])-1)])
print("You receive " + str(Edmg) + " SP damage.")
player.SP -= Edmg
print("----------")
if player.SP <= 0:
enemy.bAggressive = False
BattleComplete("playerSP_Lose", player, enemy, location)
else:
Battle(player, enemy, location)
else:
print(enemy.Attacks["SP"][random.randint(0, len(enemy.Attacks["SP"])-1)])
print("You manage to avoid taking damage.")
print("----------")
Battle(player, enemy, location)
if atk == 2:
if(player.Mind + random.randint(1, 20) < enemy.Mind + random.randint(1, 20)):
Edmg = int(random.randint(0, enemy.MP) / 2)
print(enemy.Attacks["MP"][random.randint(0, len(enemy.Attacks["MP"])-1)])
print("You receive " + str(Edmg) + " MP damage.")
player.MP -= Edmg
print("----------")
if player.MP <= 0:
enemy.bAggressive = False
BattleComplete("playerMP_Lose", player, enemy, location)
else:
Battle(player, enemy, location)
else:
print(enemy.Attacks["MP"][random.randint(0, len(enemy.Attacks["MP"])-1)])
print("You manage to avoid taking damage.")
print("----------")
Battle(player, enemy, location)
else:
if atk == 0:
if(player.Body + random.randint(10, 30) < enemy.Body + random.randint(1, 20)):
Edmg = int(random.randint(0, enemy.HP) / 2)
print(enemy.Attacks["HP"][random.randint(0, len(enemy.Attacks["HP"])-1)])
print("You receive " + str(Edmg) + " HP damage.")
player.HP -= Edmg
print("----------")
if player.HP <= 0:
enemy.bAggressive = False
BattleComplete("playerHP_Lose", player, enemy, location)
else:
Battle(player, enemy, location)
else:
print(enemy.Attacks["HP"][random.randint(0, len(enemy.Attacks["HP"])-1)])
print("You manage to avoid taking damage.")
print("----------")
Battle(player, enemy, location)
if atk == 1:
if(player.Spirit + random.randint(10, 30) < enemy.Spirit + random.randint(1, 20)):
Edmg = int(random.randint(0, enemy.SP) / 2)
print(enemy.Attacks["SP"][random.randint(0, len(enemy.Attacks["SP"])-1)])
print("You receive " + str(Edmg) + " SP damage.")
player.SP -= Edmg
print("----------")
if player.SP <= 0:
enemy.bAggressive = False
BattleComplete("playerSP_Lose", player, enemy, location)
else:
Battle(player, enemy, location)
else:
print(enemy.Attacks["SP"][random.randint(0, len(enemy.Attacks["SP"])-1)])
print("You manage to avoid taking damage.")
print("----------")
Battle(player, enemy, location)
if atk == 2:
if(player.Mind + random.randint(10, 30) < enemy.Mind + random.randint(1, 20)):
Edmg = int(random.randint(0, enemy.MP) / 2)
print(enemy.Attacks["MP"][random.randint(0, len(enemy.Attacks["MP"])-1)])
print("You receive " + str(Edmg) + " MP damage.")
player.MP -= Edmg
print("----------")
if player.MP <= 0:
enemy.bAggressive = False
BattleComplete("playerMP_Lose", player, enemy, location)
else:
Battle(player, enemy, location)
else:
print(enemy.Attacks["MP"][random.randint(0, len(enemy.Attacks["MP"])-1)])
print("You manage to avoid taking damage.")
print("----------")
Battle(player, enemy, location)
def BattleComplete(cause, PC, NPC, location):##called when the battle is complete.
if(len(NPC.inventory) > 0):
print("The %s is defeated, dropping:" % NPC.name)
for i in NPC.inventory:
location.addItem(i, NPC.inventory[i])
if NPC.inventory[i] > 1:
print(str(NPC.inventory[i]) + " " + i + "s")
else:
print("a %s" % i)
else:
print("The %s is defeated, dropping no items." % NPC.name)
print("----------")
stringToClass(NPC.Event[cause]).triggerEvent(location, PC)
def Battle(player, enemy, location):
print("You are battling a " + enemy.name + ", " + enemy.description)
if enemy.HP >= (enemy.Body * 10 / 2):
print("It looks in perfect health.")
if enemy.HP <= (enemy.Body * 10 / 4):
print("It looks a little shaky on its feet.")
if enemy.HP <= (enemy.Body):
print("It looks near death.")
print("----------")
print("HP: %s" % (str(player.HP)))
print("SP: %s" % (str(player.SP)))
print("MP: %s" % (str(player.MP)))
print("----------")
cmd = input(">>Attack>>")
print("----------")
if cmd.lower() == "help":
print("Available Battle Commands:")
print("'Body' - A basic attack.")
print("'Spirit' - Perform a spiritual attack on your enemy.")
print("'Mind' - Attack your enemy's mind.")
print("'Defend' - Ready yourself for any enemy attack.")
print("----------")
Battle(player, enemy, location)
if cmd.lower() == "body":
if(player.Body + random.randint(1, 20) > enemy.Body + random.randint(1, 20)):
dmg = int(random.randint(0, player.HP) / 2)
print(player.Attacks["HP"][random.randint(0, len(player.Attacks["HP"]) -1)] + " causing " + str(dmg) + " HP damage.")
enemy.HP -= dmg
print("----------")
if enemy.HP <= 0:
enemy.bAggressive = False
BattleComplete("playerHP_Victory", player, enemy, location)
else:
enemyAttack(player, enemy, location, False)
else:
print(player.Attacks["HP"][random.randint(0, len(player.Attacks["HP"]) -1)] + " but your attack misses.")
print("----------")
enemyAttack(player, enemy, location, False)
if cmd.lower() == "spirit":
if(player.Spirit + random.randint(1, 20) > enemy.Spirit + random.randint(1, 20)):
dmg = int(random.randint(0, player.SP) / 2)
print(player.Attacks["SP"][random.randint(0, len(player.Attacks["SP"]) -1)] + " causing " + str(dmg) + " SP damage.")
enemy.SP -= dmg
print("----------")
if enemy.SP <= 0:
enemy.bAggressive = False
BattleComplete("playerSP_Victory", player, enemy, location)
else:
enemyAttack(player, enemy, location, False)
else:
print(player.Attacks["SP"][random.randint(0, len(player.Attacks["SP"]) -1)] + " but your attack fails.")
print("----------")
enemyAttack(player, enemy, location, False)
if cmd.lower() == "mind":
if(player.Mind + random.randint(1, 20) > enemy.Mind + random.randint(1, 20)):
dmg = int(random.randint(0, player.MP) / 2)
print(player.Attacks["MP"][random.randint(0, len(player.Attacks["MP"]) -1)] + " causing " + str(dmg) + " MP damage.")
enemy.MP -= dmg
print("----------")
if enemy.MP <= 0:
enemy.bAggressive = False
BattleComplete("playerMP_Victory", player, enemy, location)
else:
enemyAttack(player, enemy, location, False)
else:
print(player.Attacks["MP"][random.randint(0, len(player.Attacks["MP"]) -1)] + " but your attack fails.")
print("----------")
enemyAttack(player, enemy, location, False)
if cmd.lower() == "defend":
print("You ready yourself for any attacks that may come.")
print("----------")
enemyAttack(player, enemy, location, True)
else:
print("Command not recognized")
print("----------")
Battle(player, enemy, location) |
import os
cur_dir = os.path.dirname(os.path.abspath(__file__))
num_questions = 0
with open(f"{cur_dir}/input") as f:
cur_group = set()
initialized = False
for line in f:
if line == "\n":
num_questions += len(cur_group)
cur_group = set()
initialized = False
else:
new_member_in_group = {x for x in line.strip()}
if initialized:
cur_group = cur_group.intersection(new_member_in_group)
else:
cur_group = new_member_in_group
initialized = True
print(f"{num_questions} total questions were asked")
|
from .multisource_dataset import MultiSourceDataset
from .streaming_multisource_dataset import StreamingMultiSourceDataset
from .multilingual_translation_task import MultilingualTranslationTask
|
""" An example of the 'science' theme. """
import numpy as np
import matplotlib.pyplot as plt
plt.style.use(['science'])
x = np.linspace(0.75, 1.25, 201)
def model(x, p):
return x ** (2 * p + 1) / (1 + x ** (2 * p))
fig, ax = plt.subplots()
for p in [10, 15, 20, 30, 50, 100]:
ax.plot(x, model(x, p), label=p)
ax.legend(title=r'Order')
ax.set(xlabel=r'$V_0 / V_\mathrm{{gap}}$')
ax.set(ylabel=r'$I_\mathrm{{dc}}^0 / I_\mathrm{{gap}}$')
ax.autoscale(tight=True)
fig.savefig('figures/fig1.pdf')
fig.savefig('figures/fig1.pgf')
fig.savefig('figures/fig1.jpg', dpi=300)
|
from django.shortcuts import render
# Create your views here.
from rest_framework import viewsets
from .serializers import (
TmDataSerializer,
CycleSerializer
)
from .models import (
Tm,
Cycle
)
class TmViewSet(viewsets.ModelViewSet):
serializer_class = TmDataSerializer
queryset = Tm.objects.all()
class CycleViewSet(viewsets.ModelViewSet):
serializer_class = CycleSerializer
queryset = Cycle.objects.all()
|
import pygame
from modules import basic_classes
from modules import basic_globals
from modules.gamemaker_functions import *
from games.grarantanna import grarantanna_gun
import os
class Player(basic_classes.UpdatableObj):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.tag = 'player'
# Starting vars
self.start_x = self.x
self.start_y = self.y
for sprite in range(12):
surf = pygame.Surface((self.size, self.size))
surf.fill(basic_globals.BG_COLOR)
surf.blit(pygame.image.load(os.path.join('resources/plyta', f'plyta{sprite+1}.png')), (0, 0))
self.sprites.append(surf)
self.spd = 2.2
self.grv = 0.4
self.hsp = 0
self.vsp = 0
self.on_ground = False
self.on_boost = False
self.jump = -7
self.power_jump = -11.3
self.drawing_death_animation = False
self.is_flying = False
self.flying_speed = 3
self.is_kicked_sideways = False
self.kick_sideways_speed = 3
self.teleporting = False
self.teleporting_prev = False
self.teleport_up_speed = -5
self.moving_from_prev = 1
self.collected_strings = []
self.to_collect_string = ''
self.to_collect_string_colored = ''
self.collect_indicator_offset = 10
self.font = pygame.font.Font('resources/joystix.ttf', 18)
self.gun = grarantanna_gun.Gun(owner=self, x=self.x+3, y=self.y+3)
def update(self, keys):
super().update(keys)
if self.to_collect_string == ' ' * len(self.to_collect_string_colored):
self.parent.next_level()
self.teleporting_prev = self.teleporting
self.teleporting = False
# If player died
if self.drawing_death_animation:
self.vsp += self.grv
self.y += self.vsp
self.animation_speed = 1
if self.y > self.parent.HEIGHT:
self.reset_vars()
self.parent.reset_level()
return
# Basic movement
self.hsp = (int(keys[pygame.K_d]) - int(keys[pygame.K_a])) * self.spd * self.moving_from_prev
if self.hsp == 0:
self.moving_from_prev = 1
self.vsp += self.grv
# If player is flying
if self.is_flying:
self.hsp = self.flying_speed
self.vsp = 0
self.is_kicked_sideways = False
elif self.is_kicked_sideways:
self.hsp = self.kick_sideways_speed
# Jumping
if keys[pygame.K_SPACE] and self.on_ground:
if not self.is_flying:
self.vsp = self.jump if not self.on_boost else self.power_jump
self.is_flying = False
self.on_ground = False
self.on_boost = False
self.parent.channel.play(self.parent.sound_skok)
if self.vsp < -15:
self.vsp = -15
elif self.vsp > 15:
self.vsp = 15
hsp = self.hsp
vsp = self.vsp
self.animation_speed = 0.6 * sign(self.hsp)
# Collision handling
for block in self.parent.game_tiles:
if block.tag == 'start':
continue
if self.vsp == 0 and self.hsp == 0:
break
else: # For every other block
if block.tag != 'czesc':
if place_meeting(self.x + hsp, self.y, block, self):
while not place_meeting(self.x + sign(self.hsp), self.y, block, self):
self.x += sign(self.hsp)
if not 0 <= self.x <= self.parent.WIDTH:
break
self.is_flying = False
self.hsp = 0
hsp = 0
if place_meeting(self.x, self.y + vsp, block, self):
while not place_meeting(self.x, self.y + sign(self.vsp), block, self):
self.y += sign(self.vsp)
if not 0 <= self.y <= self.parent.HEIGHT:
break
self.vsp = 0
vsp = 0
self.is_kicked_sideways = False
# Test for the right side of the player
if place_meeting(self.x + 1, self.y, block, self):
if block.tag == 'magnes_lewo' or block.tag == 'magnes_wszystko':
self.hsp = 0
hsp = 0
if block.tag == 'zabija_lewo':
self.lose_hp()
if block.tag == 'trojkat_lewo':
self.is_kicked_sideways = True
self.kick_sideways_speed = -abs(self.kick_sideways_speed)
if block.tag == 'tp':
if place_meeting(self.x + 1, self.y - self.size, block, self) and \
place_meeting(self.x + 1, self.y + self.size, block, self):
if block.has_portal_on_side('left'):
self.teleporting = True
b = block.get_opposite()
if b is not None and not self.teleporting_prev:
self.teleporting_prev = True
self.tp_self(b, block, 'left')
if block.tag == 'czesc':
self.collected_strings.append(block.text)
block.letter_collect()
continue
# Test for the left side of the player
if place_meeting(self.x - 1, self.y, block, self):
if block.tag == 'magnes_prawo' or block.tag == 'magnes_wszystko':
self.hsp = 0
hsp = 0
if block.tag == 'zabija_prawo':
self.lose_hp()
if block.tag == 'trojkat_prawo':
self.is_kicked_sideways = True
self.kick_sideways_speed = abs(self.kick_sideways_speed)
if block.tag == 'tp':
if place_meeting(self.x - 1, self.y - self.size, block, self) and \
place_meeting(self.x - 1, self.y + self.size, block, self):
if block.has_portal_on_side('right'):
self.teleporting = True
b = block.get_opposite()
if b is not None and not self.teleporting_prev:
self.teleporting_prev = True
self.tp_self(b, block, 'right')
if block.tag == 'czesc':
self.collected_strings.append(block.text)
block.letter_collect()
continue
# Test for player's head
if place_meeting(self.x, self.y - 1, block, self):
if block.tag == 'magnes_dol' or block.tag == 'magnes_wszystko':
self.vsp = 0
vsp = 0
# if keys[pygame.K_SPACE]:
# vsp = 0.1
if block.tag == 'tp':
if place_meeting(self.x+self.size, self.y - 1, block, self) and \
place_meeting(self.x-self.size, self.y - 1, block, self):
if block.has_portal_on_side('bottom'):
self.teleporting = True
b = block.get_opposite()
if b is not None and not self.teleporting_prev:
self.teleporting_prev = True
self.tp_self(b, block, 'bottom')
if block.tag == 'czesc':
self.collected_strings.append(block.text)
block.letter_collect()
continue
# Test for player's feet
if place_meeting(self.x, self.y + 1, block, self):
self.on_ground = True
if block.tag == 'magnes_gora' or block.tag == 'magnes_wszystko':
self.vsp = 0
vsp = 0
if block.tag == 'kolce':
self.lose_hp()
elif block.tag == 'trojkat_gora':
self.on_boost = True
elif block.tag == 'zamiana':
block.tag = 'kwadrat'
self.spd *= -1
block.rem(delay=500)
if block.tag == 'znikajacy_kwadrat':
block.rem(delay=200)
if block.tag == 'leci_lewo':
self.is_flying = True
self.flying_speed = -abs(self.flying_speed)
self.x = block.x-self.size
self.y = block.y + block.size // 2 - self.size // 2
self.gun.x = self.x + self.gun.size//2
self.gun.y = self.y + self.gun.size//2
if block.tag == 'leci_prawo':
self.is_flying = True
self.flying_speed = abs(self.flying_speed)
self.x = block.x + block.size
self.y = block.y + block.size // 2 - self.size // 2
if block.tag == 'tp':
if place_meeting(self.x+self.size, self.y + 1, block, self) and \
place_meeting(self.x-self.size, self.y + 1, block, self):
if block.has_portal_on_side('top'):
self.teleporting = True
b = block.get_opposite()
if b is not None and not self.teleporting_prev:
self.on_ground = False
self.teleporting_prev = True
self.tp_self(b, block, 'top')
if block.tag == 'czesc':
self.collected_strings.append(block.text)
block.letter_collect()
continue
if self.vsp > 0:
# Falling
self.on_ground = False
self.on_boost = False
self.x += hsp
self.y += vsp
# If on the edge of the screen
if not 0 <= self.x <= self.parent.WIDTH or \
not 0 <= self.y <= self.parent.HEIGHT:
self.lose_hp()
def tp_self(self, block, block_original, current):
self.parent.channel.play(self.parent.sound_teleport)
dest = 'right'
sides = ['right', 'left', 'bottom', 'top']
if block == block_original:
sides = [s for s in sides if s != current]
for side in sides:
if block.has_portal_on_side(side):
dest = side
break
if dest == 'top':
self.vsp = min(self.vsp, self.teleport_up_speed)
if dest == 'right':
self.x = block.x + block.width + 5
self.y = block.y + block.height // 2 - self.height // 2
elif dest == 'left':
self.x = block.x - self.width - 5
self.y = block.y + block.height // 2 - self.height // 2
elif dest == 'top':
self.x = block.x + block.width // 2 - self.width // 2
self.y = block.y - self.height - 5
elif dest == 'bottom':
self.x = block.x + block.width // 2 - self.width // 2
self.y = block.y + block.height + 5
self.moving_from_prev = 1
if (current == 'right' and dest == 'right') or (current == 'left' and dest == 'left'):
self.moving_from_prev = -1
self.gun.x = self.x + self.gun.size//2
self.gun.y = self.y + self.gun.size//2
def reset_vars(self):
self.x = self.start_x
self.y = self.start_y
self.vsp = 0
self.hsp = 0
self.gun.x = self.x + self.gun.size//2
self.gun.y = self.y + self.gun.size//2
self.spd = abs(self.spd)
self.collected_strings = []
self.is_flying = False
self.on_boost = False
self.on_ground = False
self.drawing_death_animation = False
def lose_hp(self):
# What happens when dies
self.vsp = -7
self.drawing_death_animation = True
self.parent.channel.play(self.parent.sound_smierc)
def draw(self, surface):
super().draw(surface)
text1 = self.font.render(self.to_collect_string, True, (20, 20, 20)) # Not collected
pygame.draw.rect(surface, (150, 150, 150), (0, 0, text1.get_width() + self.collect_indicator_offset * 2,
text1.get_height() + self.collect_indicator_offset * 2))
surface.blit(text1, (self.collect_indicator_offset, self.collect_indicator_offset))
for string in self.collected_strings:
for letter in string:
if letter == ' ':
continue
index = self.to_collect_string.find(letter)
self.to_collect_string_colored = self.to_collect_string_colored[:index] + letter + self.to_collect_string_colored[index + 1:]
self.to_collect_string = self.to_collect_string[:index] + ' ' + self.to_collect_string[index + 1:]
self.collected_strings.remove(string)
text2 = self.font.render(self.to_collect_string_colored, True, (249, 240, 7)) # Collected
surface.blit(text2, (self.collect_indicator_offset, self.collect_indicator_offset))
|
#!/usr/bin/python
# A tool to quickly generate large skip, load, xfail list for Rally in case of customized cloud policies
#
# How to use:
# 1. Prepare a list of testcases you want to include (e.g. rally verify show | grep failed > cases.list)
# 2. Prepare a complete list of testcases with ids (cd ~/.rally/verification/verifier-<uuid>; . .venv/bin/activate; cd repo; stestr list > ~/source/cvp-configuration/tempest/stestr.list)
# 3. Run this tool, use results.
#
cases = []
explanation = "Transition from the state Allowed to state Allowed is not allowed"
with open("cases.list", "rt") as f:
cases = [x.replace("\n", "") for x in f.readlines()]
full_list = []
with open("stestr.list", "rt") as f:
full_list = [x.replace("\n", "") for x in f.readlines()]
results = []
for case in cases:
for id in full_list:
if case in id:
results.append(id + ": " + explanation)
break
for id in results:
print(id)
|
import neuralnet as n
import numpy as np
net = n.NeuralNet([2, 4, 1])
net.train(np.array([[0, 1]]), np.array([[1]]))
|
import collections
import functools
import typing
from hat.event.common.data import EventType
class Subscription:
"""Subscription defined by query event types"""
def __init__(self,
query_types: typing.Iterable[EventType],
cache_maxsize: typing.Optional[int] = 0):
self._root = False, {}
for query_type in query_types:
self._root = _add_query_type(self._root, query_type)
if cache_maxsize is None or cache_maxsize > 0:
self.matches = functools.lru_cache(
maxsize=cache_maxsize)(self.matches)
def get_query_types(self) -> typing.Iterable[EventType]:
"""Calculate sanitized query event types"""
yield from _get_query_types(self._root)
def matches(self, event_type: EventType) -> bool:
"""Does `event_type` match subscription"""
return _matches(self._root, event_type, 0)
def union(self, *others: 'Subscription') -> 'Subscription':
"""Create new subscription including event types from this and
other subscriptions."""
result = Subscription([])
result._root = _union(
[self._root, *(other._root for other in others)])
return result
def isdisjoint(self, other: 'Subscription') -> bool:
"""Return ``True`` if this subscription has no event types in common
with other subscription."""
return _isdisjoint(self._root, other._root)
_Node = typing.Tuple[bool, # is_leaf
typing.Dict[str, # subtype
'_Node']] # child
def _add_query_type(node, query_type):
is_leaf, children = node
if '*' in children:
return node
if not query_type:
return True, children
head, rest = query_type[0], query_type[1:]
if head == '*':
if rest:
raise ValueError('invalid query event type')
children.clear()
children['*'] = True, {}
else:
child = children.get(head, (False, {}))
child = _add_query_type(child, rest)
children[head] = child
return node
def _get_query_types(node):
is_leaf, children = node
if is_leaf and '*' not in children:
yield ()
for head, child in children.items():
for rest in _get_query_types(child):
yield (head, *rest)
def _matches(node, event_type, event_type_index):
is_leaf, children = node
if '*' in children:
return True
if event_type_index >= len(event_type):
return is_leaf
child = children.get(event_type[event_type_index])
if child and _matches(child, event_type, event_type_index + 1):
return True
child = children.get('?')
if child and _matches(child, event_type, event_type_index + 1):
return True
return False
def _union(nodes):
if len(nodes) < 2:
return nodes[0]
is_leaf = any(i for i, _ in nodes)
names = {}
for _, node_children in nodes:
for name, node_child in node_children.items():
if name == '*':
return is_leaf, {'*': (True, {})}
if name not in names:
names[name] = collections.deque()
names[name].append(node_child)
children = {name: _union(named_children)
for name, named_children in names.items()}
return is_leaf, children
def _isdisjoint(first_node, second_node):
first_is_leaf, first_children = first_node
second_is_leaf, second_children = second_node
if first_is_leaf and second_is_leaf:
return False
if (('*' in first_children and second_children) or
('*' in second_children and first_children)):
return False
if '?' in first_children:
for child in second_children.values():
if not _isdisjoint(first_children['?'], child):
return False
if '?' in second_children:
for name, child in first_children.items():
if name == '?':
continue
if not _isdisjoint(second_children['?'], child):
return False
names = set(first_children.keys()).intersection(second_children.keys())
for name in names:
if name == '?':
continue
if not _isdisjoint(first_children[name], second_children[name]):
return False
return True
|
# EPAN_NI.py (flowsa)
# !/usr/bin/env python3
# coding=utf-8
"""
Projects
/
FLOWSA
/
Years = 2012
Eliminated all columns with fraction in the title.
"""
import io
import pandas as pd
import xml.etree.ElementTree as ET
from esupy.remote import make_url_request
def url_file(url):
url_array = url.split("get/")
url_file = url_array[1]
return url_file
def column_names(file_name):
base_url = 'https://www.sciencebase.gov/catalog/file/get/'
pacific_region = ['5d407318e4b01d82ce8d9b3c?f=__disk__22%2F5c%2Fe3%2F225'
'ce31141477eb0904f38f95f1d472bbe2a2a11',
'5d407318e4b01d82ce8d9b3c?f=__disk__2b%2F75%2F2b%2F2b7'
'52b0c5decf8e83c035d559a2688c481bb0cfe']
midwestern = ['5cbf5150e4b09b8c0b700df3?f=__disk__66%2F4f%2Ff2%2F664ff289'
'064560bbce748082f7b34593dad49ca2',
'5cbf5150e4b09b8c0b700df3?f=__disk__bf%2F73%2F1f%2Fbf731fdf'
'4e984a5cf50c0f1a140cda366cb8c1d3']
northeastern = ['5d4192aee4b01d82ce8da477?f=__disk__c2%2F02%2F06%2Fc202060'
'78520c5ec87394a3499eea073f472a27d',
'5d4192aee4b01d82ce8da477?f=__disk__b0%2Fb9%2F35%2Fb0b9350'
'21a47ccf57f7584cc7f14d82aacc491d1']
southwestern = ['5f8f1f1282ce06b040efc90e?f=__disk__f8%2Fb8%2Ff9%2Ff8b8f9'
'bdc2a07f014ed6dced8feb2dd7bc63e056',
'5f8f1f1282ce06b040efc90e?f=__disk__8e%2F8e%2Fb8%2F8e8eb8'
'203ea14ab19a45372919a0dbf667d033b2']
southeastern = ['5d6e70e5e4b0c4f70cf635a1?f=__disk__fb%2Fdb%2F92%2Ffbdb928'
'1872069b23bcd134a4c5fa1ddc7280b53',
'5d6e70e5e4b0c4f70cf635a1?f=__disk__14%2Fc1%2F63%2F14c1636'
'eef91529f548d5fe29ff3f426d3b4b996']
if file_name in pacific_region:
legend_name = "5d407318e4b01d82ce8d9b3c?f=__disk__ab%2F27%2F08%2Fab" \
"27083f354bd851ec09bc0f33c2dc130f808bb5"
elif file_name in midwestern:
legend_name = "5cbf5150e4b09b8c0b700df3?f=__disk__a6%2Ffb%2Fd6%2Fa6f" \
"bd6f6bcce874109d2e989d1d4d5a67c33cd49"
elif file_name in northeastern:
legend_name = "5d4192aee4b01d82ce8da477?f=__disk__81%2F5d%2F3d%2F815" \
"d3deb08f82c1662ff94eb941074ff99c75088"
elif file_name in southwestern:
legend_name = "5f8f1f1282ce06b040efc90e?f=__disk__44%2Ff6%2F74%2F44f" \
"674b54b2fa571191a597c8dfae0923893d3d3"
elif file_name in southeastern:
legend_name = "5d6e70e5e4b0c4f70cf635a1?f=__disk__93%2Fba%2F5c%2F93b" \
"a5c50c58ced4116ad2e5b9783fc7848ab2cb5"
contents = make_url_request(base_url + legend_name)
xslt_content = contents.content.decode('utf-8')
root = ET.fromstring(xslt_content)
label = []
name = []
for attr in root.iter('attr'):
for child in attr:
if str(child.tag) == 'attrlabl':
label.append(str(child.text))
if str(child.tag) == 'attrdef':
name.append(str(child.text))
legend = pd.DataFrame()
legend["label"] = label
legend["name"] = name
return legend
def name_and_unit_split(df_legend):
for i in range(len(df_legend)):
apb = df_legend.loc[i, "name"]
apb_str = str(apb)
if ',' in apb_str:
apb_split = apb_str.split(',')
activity = apb_split[0]
unit_str = apb_split[1]
unit_list = unit_str.split('/')
unit = unit_list[0]
df_legend.loc[i, "name"] = activity
df_legend.loc[i, "Unit"] = unit
else:
df_legend.loc[i, "Unit"] = None
return df_legend
def name_replace(df_legend, df_raw):
for col_name in df_raw.columns:
for i in range(len(df_legend)):
if col_name == df_legend.loc[i, "label"]:
if col_name.lower() != "comid":
df_raw = df_raw.rename(
columns={col_name: df_legend.loc[i, "name"]})
return df_raw
def sparrow_url_helper(*, build_url, config, **_):
"""
This helper function uses the "build_url" input from flowbyactivity.py,
which is a base url for data imports that requires parts of the url text
string to be replaced with info specific to the data year. This function
does not parse the data, only modifies the urls from which data is
obtained.
:param build_url: string, base url
:param config: dictionary, items in FBA method yaml
:return: list, urls to call, concat, parse, format into Flow-By-Activity
format
"""
# initiate url list for coa cropland data
urls = []
# replace "__xlsx_name__" in build_url to create three urls
for x in config['txt']:
url = build_url
url = url.replace("__txt__", x)
urls.append(url)
return urls
def sparrow_call(*, resp, url, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing df
into FBA format
:param resp: df, response from url call
:param url: string, url
:return: pandas dataframe of original source data
"""
text = resp.content
ph = ['5cbf5150e4b09b8c0b700df3?f=__disk__bf%2F73%2F1f%2Fbf731fdf4e984'
'a5cf50c0f1a140cda366cb8c1d3',
'5d407318e4b01d82ce8d9b3c?f=__disk__2b%2F75%2F2b%2F2b752b0c5decf8e'
'83c035d559a2688c481bb0cfe',
'5d4192aee4b01d82ce8da477?f=__disk__b0%2Fb9%2F35%2Fb0b935021a47ccf5'
'7f7584cc7f14d82aacc491d1',
'5f8f1f1282ce06b040efc90e?f=__disk__8e%2F8e%2Fb8%2F8e8eb8203ea14ab1'
'9a45372919a0dbf667d033b2',
'5d6e70e5e4b0c4f70cf635a1?f=__disk__14%2Fc1%2F63%2F14c1636eef91529f'
'548d5fe29ff3f426d3b4b996']
ni = ['5cbf5150e4b09b8c0b700df3?f=__disk__66%2F4f%2Ff2%2F664ff289064560bb'
'ce748082f7b34593dad49ca2',
'5d407318e4b01d82ce8d9b3c?f=__disk__22%2F5c%2Fe3%2F225ce31141477eb09'
'04f38f95f1d472bbe2a2a11',
'5d4192aee4b01d82ce8da477?f=__disk__c2%2F02%2F06%2Fc20206078520c5ec'
'87394a3499eea073f472a27d',
'5f8f1f1282ce06b040efc90e?f=__disk__f8%2Fb8%2Ff9%2Ff8b8f9bdc2a07f01'
'4ed6dced8feb2dd7bc63e056',
'5d6e70e5e4b0c4f70cf635a1?f=__disk__fb%2Fdb%2F92%2Ffbdb9281872069b2'
'3bcd134a4c5fa1ddc7280b53']
comid_cap = ["5f8f1f1282ce06b040efc90e?f=__disk__8e%2F8e%2Fb8%2F8e8eb8203e"
"a14ab19a45372919a0dbf667d033b2",
"5f8f1f1282ce06b040efc90e?f=__disk__f8%2Fb8%2Ff9%2Ff8b8f9bdc"
"2a07f014ed6dced8feb2dd7bc63e056"]
url_file_name = url_file(url)
legend = column_names(url_file_name)
legend = name_and_unit_split(legend)
if url_file_name in ph:
chem_type = "Phosphorus"
else:
chem_type = "Nitrogen"
for i in range(len(legend)):
if "incremental" in legend.loc[i, "name"].lower():
legend.loc[i, "FlowName"] = chem_type + ' incremental'
elif "accumulated" in legend.loc[i, "name"].lower():
legend.loc[i, "FlowName"] = chem_type + ' accumulated'
elif "cumulated" in legend.loc[i, "name"].lower():
legend.loc[i, "FlowName"] = chem_type + ' cumulated'
elif "mean" in legend.loc[i, "name"].lower():
legend.loc[i, "FlowName"] = chem_type + ' mean'
elif "Total upstream watershed area" in legend.loc[i, "name"].lower():
legend.loc[i, "FlowName"] = chem_type + ' total'
else:
legend.loc[i, "FlowName"] = chem_type
if "5d407318e4b01d82ce8d9b3c?f=__disk__2b%2F75%2F2b%2F2b752b0c5decf8e83" \
"c035d559a2688c481bb0cfe" in url:
df_raw = pd.read_csv(io.StringIO(text.decode('utf-8')), sep='\t')
else:
df_raw = pd.read_csv(io.StringIO(text.decode('utf-8')))
df_raw = name_replace(legend, df_raw)
legend = legend.drop(columns=['label'])
legend = legend.rename(columns={"name": "ActivityProducedBy"})
if url_file_name in comid_cap:
df_raw = df_raw.rename(columns={"COMID": "comid"})
df_spread = pd.DataFrame()
df_no_spread = pd.DataFrame()
for column_name in df_raw.columns:
if "fraction" in column_name.lower() or "flux" in column_name.lower():
df_raw = df_raw.drop(columns=[column_name])
elif "standard error" in column_name.lower():
df_spread[column_name] = df_raw[column_name]
df_raw = df_raw.drop(columns=[column_name])
spread_coul = []
for cn in df_spread.columns:
if "Standard error for " in cn:
c_name = cn.split("Standard error for ")
df_spread = df_spread.rename(columns={cn: c_name[1].capitalize()})
spread_coul.append(c_name[1].capitalize())
else:
c_name = cn.split("standard error")
spread_coul.append(c_name[0].capitalize())
for column_name in df_raw.columns:
if column_name not in spread_coul and column_name != "comid":
df_no_spread[column_name] = df_raw[column_name]
df_raw = df_raw.drop(columns=[column_name])
df_no_spread["comid"] = df_raw["comid"]
df_spread["comid"] = df_raw["comid"]
# use "melt" fxn to convert colummns into rows
df = df_raw.melt(id_vars=["comid"],
var_name="ActivityProducedBy",
value_name="FlowAmount")
df = df.rename(columns={"comid": "Location"})
df_spread = df_spread.melt(id_vars=["comid"],
var_name="spread_name",
value_name="Spread")
df_spread = df_spread.rename(columns={"comid": "Location"})
df_spread = df_spread.rename(columns={"spread_name": "ActivityProducedBy"})
df_spread["MeasureofSpread"] = 'SE'
df_no_spread = df_no_spread.melt(id_vars=["comid"],
var_name="ActivityProducedBy",
value_name="FlowAmount")
df_no_spread = df_no_spread.rename(columns={"comid": "Location"})
df_no_spread = pd.merge(df_no_spread, legend, on="ActivityProducedBy")
df = pd.merge(df, legend, on="ActivityProducedBy")
df = pd.merge(df, df_spread, left_on=["ActivityProducedBy", "Location"],
right_on=["ActivityProducedBy", "Location"])
dataframes = [df, df_no_spread]
df1 = pd.concat(dataframes)
df1.reset_index(drop=True)
return df1
def sparrow_parse(*, df_list, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param year: year
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
df = pd.DataFrame()
dataframe = pd.DataFrame()
for df in df_list:
df["Compartment "] = "ground"
df["Class"] = "Chemicals"
df["SourceName"] = "USGS_SPARROW"
df["LocationSystem"] = 'HUC'
df["Year"] = str(year)
df["ActivityConsumedBy"] = None
dataframe = pd.concat(df_list, ignore_index=True)
return dataframe
|
from __future__ import division
# LIBTBX_SET_DISPATCHER_NAME prime.viewstats
'''
Author : Uervirojnangkoorn, M.
Created : 9/19/2014
Description : View convergence and other stats for post-refinement.
'''
import matplotlib.pyplot as plt
import sys
import numpy as np
if len(sys.argv)==1:
print 'Use prime.viewstats to view convergence of post-refined parameters.'
print 'Usage: prime.viewstats your_run_no'
exit()
run_no = sys.argv[1]
#read .paramhist files and display refinement results
import os
cn_file = 0
for file_in in os.listdir(run_no):
if file_in.endswith('.paramhist'):
cn_file += 1
if cn_file == 0:
print 'Cannot find .paramhist file in your ', run_no, '.'
print 'To enable viewstats, rerun prime with flag_output_verbose=True in .phil file.'
print 'The .paramhist parameters will be recorded during the run.'
exit()
param_file_list = []
for i in range(cn_file):
param_file_list.append(run_no+'/'+str(i)+'.paramhist')
data_dict_list = []
for param_file in param_file_list:
pf = open(param_file,'r')
data = pf.read().split('\n')
data_dict = {}
n_data = 0
for data_row in data:
dc = data_row.split()
#use row 1 to set n_col
if n_data == 0:
n_col = len(dc)
if len(dc)==n_col:
data_dict[dc[n_col-1]] = np.array([float(dc[i]) for i in range(n_col-1)])
n_data += 1
data_dict_list.append(data_dict)
#prepare test key
data_dict_0 = data_dict_list[0]
data_dict_1 = data_dict_list[1]
for i in range(n_data):
test_key = data_dict_list[0].keys()[i]
if (test_key in data_dict_0) and (test_key in data_dict_1):
test_id = i
break
#Fix Tpr and Txy for first data_dict
test_param_0_raw = np.array(data_dict_0[test_key])
test_param_1 = data_dict_1[test_key]
for key in data_dict_0.keys():
if key in data_dict_1:
data_param_0 = data_dict_0[key]
data_param_1 = data_dict_1[key]
data_param_0[1] = data_param_1[0]
data_param_0[3] = data_param_1[2]
data_dict_0[key] = data_param_0
test_param_0_update = data_dict_0[test_key]
test_delta_1_calc = np.absolute(test_param_1-test_param_0_update)
print 'test id', test_id
print 'test key', test_key
print '0th cycle (raw):', test_param_0_raw
print '0th cycle (updated):', test_param_0_update
print '1st cycle:', test_param_1
print 'delta (calc.):', test_delta_1_calc
delta_dict_list = []
for i in range(len(data_dict_list)-1):
data_dict = data_dict_list[i]
data_dict_next = data_dict_list[i+1]
delta_dict = {}
for key in data_dict.keys():
if (key in data_dict_next):
delta_param = np.absolute(data_dict_next[key] - data_dict[key])
else:
delta_param = np.zeros(n_col-1)
delta_dict[key] = delta_param
delta_dict_list.append(delta_dict)
delta_dict_0 = delta_dict_list[0]
test_delta_1 = delta_dict_0[test_key]
print 'delta (prog.):', test_delta_1
print 'delta diff.:', test_delta_1 - test_delta_1_calc
print 'sum of delta diff.', np.sum(np.absolute(test_delta_1 - test_delta_1_calc))
x_range = range(1, len(delta_dict_list)+1)
x_label = []
for i in range(1, len(delta_dict_list)+1):
x_label.append(str(i))
data_title = ['Tpr_i','Tpr','Txy_i','Txy','G','B','RotX','RotY','ry','rz','r0','re','voigt_nu','a','b','c','alpha','beta','gamma','CC1/2']
cn_plot = 1
for i in range(n_col-1):
if i not in (0,2):
data_series = []
for delta_dict in delta_dict_list:
narr = np.array([delta_dict[key][i] for key in delta_dict.keys()])
data_series.append(narr)
ax = plt.subplot(3, 6, cn_plot, title=data_title[i])
plt.boxplot(data_series)
plt.xticks(x_range, x_label)
if data_title[i] in ('ry','rz','r0','re'):
plt.ylim([0, 0.01])
plt.grid(True)
cn_plot += 1
plt.show()
|
import os
import sys
sys.path.append(
os.path.normpath(
os.path.join(os.path.abspath(__file__), "..", "..", "..", "common")
)
)
from env_indigo import *
indigo = Indigo()
indigo.setOption("timeout", "2000")
t = indigo.loadMolecule(
"N(C1N=CC=C2C=1C=C(C1C=CC=CC=1)C(C1C=CC(C3(NC(=O)OC(C)(C)C)CC4(OCCO4)C3)=CC=1)=N2)N"
)
q = indigo.loadQueryMolecule(
"C.C.C.C.C.O.O.CC1(OCCO1)C.C=C(C1C(C2C=CC=CC=2)=CC2C3=NN=C(C4N=CN(C)C=4)N3C=CC=2N=1)C"
)
def test(t, q):
matcher = indigo.substructureMatcher(t)
try:
print(matcher.match(q) != None)
except IndigoException as e:
print(getIndigoExceptionText(e))
# test multiple times
test(t, q)
test(t, q)
test(t, q)
t = indigo.loadMolecule("C1N=CC=CC=1C=C(C1C=CC=CC=1)")
q = indigo.loadQueryMolecule("C.C.C.C.C.O")
test(t, q)
test(t, q)
|
from recoder.embedding import AnnoyEmbeddingsIndex
import pytest
import numpy as np
def test_build_index():
embeddings_mat = np.random.rand(1000, 128)
index = AnnoyEmbeddingsIndex(embeddings=embeddings_mat)
index.build(index_file='/tmp/test_embeddings')
index_loaded = AnnoyEmbeddingsIndex()
index_loaded.load(index_file='/tmp/test_embeddings')
assert index_loaded.embedding_size == index.embedding_size and index.embedding_size == 128
test_item = np.random.randint(1000)
assert index.get_embedding(test_item) == index_loaded.get_embedding(test_item)
assert index.get_nns_by_id(test_item, 100) == index_loaded.get_nns_by_id(test_item, 100)
test_item_1 = np.random.randint(0, 1000)
test_item_2 = np.random.randint(0, 1000)
assert index.get_similarity(test_item_1, test_item_2) == \
index_loaded.get_similarity(test_item_1, test_item_2)
|
#!/usr/bin/env python
"""
Contains the proxies.BaseProxy class definition for all inherited proxy
classes
Please note that this module is private. The proxies.BaseProxy class is
available in the ``wpipe.proxies`` namespace - use that instead.
"""
from .core import numbers, datetime, si, in_session, try_scalar
__all__ = ['BaseProxy']
class BaseProxy:
"""
Parent class of all proxy classes
Parameters
----------
parents : sqlintf.Base object
TODO
attr_name : string
TODO
try_scalar : boolean
TODO
Attributes
----------
parents : sqlintf.Base object
TODO
parent_id : int
TODO
attr_name : string
TODO
try_scalar : boolean
TODO
"""
def __new__(cls, *args, **kwargs):
if cls is BaseProxy:
parent = kwargs.pop('parent', None)
with si.begin_session() as session:
session.add(parent)
proxy = getattr(parent, kwargs.pop('attr_name', ''))
if kwargs.pop('try_scalar', False):
proxy = try_scalar(proxy)
if isinstance(proxy, str) or isinstance(proxy, numbers.Number):
from . import StrNumProxy
cls = StrNumProxy
elif isinstance(proxy, datetime.datetime):
from . import DatetimeProxy
cls = DatetimeProxy
else:
raise ValueError("Invalid proxy type %s" % type(proxy))
args = [proxy]
return cls.__new__(cls, *args, *kwargs)
return super().__new__(cls, *args, *kwargs)
def __init__(self, *args, **kwargs):
self._parent = kwargs.pop('parent', None)
self._attr_name = kwargs.pop('attr_name', '')
self._try_scalar = kwargs.pop('try_scalar', False)
self._session = None
self._parent_id = self._get_parent_id()
@property
def parent(self):
"""
TODO
"""
return self._parent
@property
def parent_id(self):
"""
TODO
"""
return self._parent_id
@property
def attr_name(self):
"""
TODO
"""
return self._attr_name
@property
def try_scalar(self):
"""
TODO
"""
return self._try_scalar
@in_session('parent')
def delete(self):
si.delete(self._parent)
@in_session('parent')
def _get_parent_id(self):
return int(self._parent.id)
@in_session('parent')
def _augmented_assign(self, operator, other):
"""
TODO
"""
for retry in self._session.retrying_nested():
with retry:
_temp = retry.retry_state.query(self.parent.__class__).with_for_update(). \
filter_by(id=self.parent_id).one()
retry.retry_state.refresh(_temp)
_result = getattr(
[lambda x: x, try_scalar][self._try_scalar](getattr(_temp, self.attr_name)),
operator)(other)
if _result is not NotImplemented:
setattr(_temp, self.attr_name, _result)
_temp = BaseProxy(parent=self.parent,
attr_name=self.attr_name,
try_scalar=self.try_scalar)
retry.retry_state.commit()
if _result is NotImplemented:
raise TypeError("unsupported operand type(s) for augmented assignment")
else:
return _temp
|
import json
import os
import sys
#usr_part = "\n".join([e for i,e in enumerate(utterances) if i%2==0])+"\n"
global DOT_CHAR
DOT_CHAR = "@DOT"
def historify_src(utterances, even=True):
#helper function to add the entire dialogue history as src
parity = 0 if even else 1
assert set([bool(utt.strip()) == True for utt in utterances]) == {True}, utterances
usr_part = ""
for i, e in enumerate(utterances):
if i%2==parity:
usr_part += f" {DOT_CHAR} ".join(utterances[:i+1])+"\n"
return usr_part
def main(args):
#defaults:
directory = "../kvr/"
splitpart = "dev" if args == 0 else args[0]
EXT = "FINAL" if args == 0 or len(args) <= 1 else args[1]
assert splitpart in ["dev", "train", "test"]
filename = f"kvret_{splitpart}_public.json"
with open(directory+filename, "r") as f:
data= json.load(f)
dialogues=[]
scenarios=[]
for idx, setting in enumerate(data):
skip = False
dialogue = setting["dialogue"]
scenario = setting["scenario"]
convo = []
lastspeaker = "assistant"
for turn in dialogue:
utterance = turn["data"]["utterance"]
convo.append(utterance)
speaker = turn["turn"]
#assert speaker != lastspeaker, utterance
lastspeaker = speaker
# if "which is cafe ven" in utterance.lower():
# input("error? {} {}".format(convo, scenario))
if not utterance.strip():
input("skip?: {}".format(convo))
skip = True # skip this dialogue; someone didnt answer
if not scenario["kb"]["items"] and EXT=="DISCARD":
# DISCARD DIALOGUES WITH NO KB TO AVOID JUST TRAINING ON THESE
skip = True
if not skip:
scenarios.append(scenario)
dialogues.append(convo)
skip = False
for convo in dialogues:
for utt in convo:
assert "i need to locate a shop" not in utt.lower(), convo
unanswered = ""
scenario_lkp = ""
convo_usr, convo_car = "", ""
for idx, utterances in enumerate(dialogues):
"""
if idx == 119:
assert False, [utterances, scenarios[idx]]
"""
if len(utterances)%2==1:
# input("unanswered? : {} {}".format(idx, utterances))
unanswered+=utterances[-1]+"\n"
utterances = utterances[:-1]
if not utterances:
# skip empty dialogue
continue
nturns = len(utterances)
assert nturns%2==0
assert nturns, utterances
try:
usr_part = historify_src(utterances)
except Exception as e:
# continue
assert False, (e, utterances, idx, filename)
car_part = "\n".join(
[e for i,e in enumerate(utterances) if i % 2 == 1]
)+"\n"
scenario_part = (str(idx)+"\n")*(nturns//2)
"""
venetia_debug = list(set(["which is cafe ven" in utt.lower() for utt in utterances]))
if len(venetia_debug) == 2:
assert False, (utterances, scenario_part, idx, nturns)
"""
lines = lambda s: len(s.split("\n"))
assert lines(usr_part) == lines(car_part) == lines(scenario_part), \
(usr_part, car_part, scenario_part)
convo_usr += usr_part
convo_car += car_part
scenario_lkp += scenario_part
assert len(convo_usr.split("\n")) == len(convo_car.split("\n"))
train_usr, train_car = splitpart+".usr"+EXT, splitpart+".car"+EXT
with open(directory+train_usr, "w") as usr, open(directory+train_car, "w") as car:
usr.write(convo_usr)
car.write(convo_car)
# for normalize scenarios.py
scenariofile = "scenarios_"+splitpart+"_"+EXT+".json"
with open(directory+scenariofile, "w") as scenes:
json.dump(scenarios, scenes, indent=4)
# for data.py minibatch
scenario_lkp_file = splitpart+".lkp"+EXT
with open(directory+scenario_lkp_file, "w") as lkp:
lkp.write(scenario_lkp)
unanswered_file = splitpart+".noans"+EXT
with open(directory+unanswered_file, "w") as unan:
unan.write(unanswered) # user thanks etc that were never answered
return 0
if __name__ == "__main__":
if len(sys.argv) > 1:
sys.exit(main(sys.argv[1:]))
else: main(0)
|
from typing import Mapping, Tuple, Union
from uqbar.containers import UniqueTreeList
from .Attachable import Attachable
from .Attributes import Attributes
class TableCell(UniqueTreeList, Attachable):
"""
A Graphviz HTML table.
::
>>> import uqbar.graphs
>>> table_cell = uqbar.graphs.TableCell()
>>> print(format(table_cell, 'graphviz'))
<TD></TD>
::
>>> table_cell = uqbar.graphs.TableCell(
... attributes={
... 'border': 5,
... 'bgcolor': 'blue',
... },
... )
>>> print(format(table_cell, 'graphviz'))
<TD BGCOLOR="blue" BORDER="5"></TD>
"""
### CLASS VARIABLES ###
__documentation_section__ = "HTML Classes"
### INITIALIZER ###
def __init__(
self,
children=None,
*,
attributes: Union[Mapping[str, object], Attributes] = None,
name: str = None,
) -> None:
from .Text import Text
if isinstance(children, str):
children = [Text(children)]
UniqueTreeList.__init__(self, children=children, name=name)
Attachable.__init__(self)
self._attributes = Attributes("table_cell", **(attributes or {}))
### SPECIAL METHODS ###
def __format__(self, format_spec: str = None) -> str:
# TODO: make the format specification options machine-readable
if format_spec == "graphviz":
return self.__format_graphviz__()
return str(self)
def __format_graphviz__(self) -> str:
result = []
start, stop = "<TD", "</TD>"
if self.edges:
start += ' PORT="{}"'.format(self._get_port_name())
attributes = format(self._attributes, "html")
if attributes:
start += " {}".format(attributes)
start += ">"
result.append(start)
for child in self:
result.append(format(child, "graphviz"))
result.append(stop)
return "".join(result)
### PRIVATE PROPERTIES ###
@property
def _node_class(self) -> Tuple[type, ...]:
import uqbar.graphs
return (uqbar.graphs.Table, uqbar.graphs.LineBreak, uqbar.graphs.Text)
|
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import tensorflow as tf
import time
import utils
import datasets
from NetworkLib import GlimpseNet, LocationNet, ContextNet, ClassificationNet, RecurrentNet, BaselineNet
from visualizer import *
from config import Config
class DRAM(object):
def __init__(self):
self.config = Config()
self.gstep = tf.Variable(0, dtype=tf.int32,
trainable=False, name='global_step')
self.num_epochs = self.config.num_epochs
self.batch_size = self.config.batch_size
self.isTraining = self.config.isTraining
self.isVisualize = self.config.isVisualize
self.isAnimate = self.config.isAnimate
self.dataset = datasets.MNIST(self.config)
def get_data(self):
"""
Get dataset and create iterators for test and train.
"""
with tf.name_scope('data'):
train_dataset, test_dataset = self.dataset.get_dataset()
iterator = tf.compat.v1.data.Iterator.from_structure(tf.compat.v1.data.get_output_types(train_dataset),
tf.compat.v1.data.get_output_shapes(train_dataset))
img, self.label = iterator.get_next()
self.img = tf.reshape(img, [-1, self.config.height, self.config.width, self.config.color_channels])
self.train_init = iterator.make_initializer(train_dataset)
self.test_init = iterator.make_initializer(test_dataset)
def model_init(self):
"""
Creates instances of each network component in DRAM.
Defines LSTM cell for use in RNN
"""
# Initiate Networks
self.gn = GlimpseNet(self.img, self.config)
self.ln = LocationNet(self.config)
self.class_net = ClassificationNet(self.config)
# self.context_net = ContextNet(self.config)
self.baseline_net = BaselineNet(self.config)
self.rnn = RecurrentNet(self.config, self.ln, self.gn, self.class_net)
self.LSTM_cell = tf.nn.rnn_cell.LSTMCell(self.config.cell_size, state_is_tuple=True, activation=tf.nn.tanh, forget_bias=1.)
def inference(self):
"""
Data flow process:
1) Extracts first glimpse through glimpse network.
2) First glimpse is passed through RNN network,
which integrates RNN components, location/emission
network, and glimpse network.
3) Final output is passed into classification network,
where prediction is obtained.
Also maintains location tensors for each glimpse to be
used by visualizer.
"""
# self.state_init_input = self.context_net(self.img)
# self.init_location, _ = self.ln(self.state_init_input) # gets initial location using context vector
self.init_location = tf.zeros([self.batch_size, 2], dtype=tf.float32)
self.loc_array = [self.init_location]
self.mean_loc_array = [self.init_location]
self.init_glimpse = [self.gn(self.init_location)]
self.init_glimpse.extend([0] * (self.config.num_glimpses - 1))
self.logits, self.outputs, locations, mean_locations = self.rnn(self.init_glimpse, self.LSTM_cell)
self.loc_array += locations
self.mean_loc_array += mean_locations
with tf.compat.v1.name_scope('make_location_tensors'):
self.sampled_locations = tf.concat(self.loc_array, axis=0)
self.mean_locations = tf.concat(self.mean_loc_array, axis=0)
self.sampled_locations = tf.reshape(self.sampled_locations, (self.config.num_glimpses, self.batch_size, 2))
self.sampled_locations = tf.transpose(self.sampled_locations, [1, 0, 2])
self.mean_locations = tf.reshape(self.mean_locations, (self.config.num_glimpses, self.batch_size, 2))
self.mean_locations = tf.transpose(self.mean_locations, [1, 0, 2])
self.baselines = []
with tf.compat.v1.name_scope('baseline'):
for t, output in enumerate(self.outputs):
baseline_t = self.baseline_net(output)
baseline_t = tf.squeeze(baseline_t)
self.baselines.append(baseline_t)
self.baselines = tf.stack(self.baselines)
self.baselines = tf.transpose(self.baselines) # shape is [batch_size, time_steps]
def loglikelihood(self):
with tf.name_scope("loglikelihood"):
stddev = self.config.stddev
mean = tf.stack(self.mean_loc_array)
sampled = tf.stack(self.loc_array)
gaussian = tf.compat.v1.distributions.Normal(mean, stddev)
logll = gaussian.log_prob(sampled)
logll = tf.reduce_sum(logll, 2)
logll = tf.transpose(logll)
return logll
def loss(self):
with tf.name_scope("loss"):
# Cross entropy
entropy = tf.nn.softmax_cross_entropy_with_logits_v2(labels=self.label, logits=self.logits)
self.cross_ent = tf.reduce_mean(entropy, name='cross_ent')
# Baseline MSE
self.preds = tf.nn.softmax(self.logits)
correct_preds = tf.equal(tf.argmax(self.preds, 1), tf.argmax(self.label, 1))
self.rewards = tf.cast(correct_preds, tf.float32)
# Reshape to match baseline
self.rewards = tf.expand_dims(self.rewards, 1) # shape [batch_size, 1]
self.rewards = tf.tile(self.rewards, [1, self.config.num_glimpses])
self.baseline_mse = tf.reduce_mean(tf.square(self.rewards - self.baselines), name='baseline_mse')
# Loglikelihood
self.logll = self.loglikelihood()
self.baseline_term = self.rewards - tf.stop_gradient(self.baselines)
self.logllratio = tf.reduce_mean(self.logll * self.baseline_term, name='loglikelihood_ratio')
# Total Loss
self.hybrid_loss = -self.logllratio * self.config.reward_weight + self.cross_ent + self.baseline_mse
def optimize(self):
with tf.name_scope('optimize'):
optimizer = tf.compat.v1.train.AdamOptimizer(self.config.lr)
gradients, variables = zip(*optimizer.compute_gradients(self.hybrid_loss))
gradients, _ = tf.clip_by_global_norm(gradients, self.config.max_global_norm)
self.opt = optimizer.apply_gradients(zip(gradients, variables), global_step=self.gstep)
def build(self):
self.get_data()
print("\nDataset loaded.\n")
self.model_init()
self.inference()
self.loss()
self.optimize()
self.eval()
self.summaries()
print("\nModel built.\n")
def eval(self):
"""
Count number of correct predictions per batch.
"""
with tf.name_scope("predict"):
preds = tf.nn.softmax(self.logits)
correct_preds = tf.equal(tf.argmax(preds, 1), tf.argmax(self.label, 1))
self.accuracy = tf.reduce_mean(tf.cast(correct_preds, tf.float32))
def summaries(self):
with tf.name_scope("summaries"):
tf.compat.v1.summary.scalar('cross_entropy', self.cross_ent)
tf.compat.v1.summary.scalar('baseline_mse', self.baseline_mse)
tf.compat.v1.summary.scalar('loglikelihood', self.logllratio)
tf.compat.v1.summary.scalar('hybrid_loss', self.hybrid_loss)
tf.compat.v1.summary.scalar('accuracy', self.accuracy)
self.summary_op = tf.compat.v1.summary.merge_all()
def train(self, num_epochs, isTraining, isVisualize):
"""
1) Make checkpoint folder (using config.checkpoint_path) and saver.
2) Alternate between training and testing for each epoch.
"""
self.num_epochs = num_epochs
self.isVisualize = isVisualize
utils.make_dir(self.config.checkpoint_path)
writer = tf.compat.v1.summary.FileWriter(self.config.graphs_path, tf.compat.v1.get_default_graph())
with tf.compat.v1.Session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
saver = tf.compat.v1.train.Saver()
ckpt = tf.train.get_checkpoint_state(os.path.dirname(self.config.checkpoint_path))
if ckpt and ckpt.model_checkpoint_path:
print("Checkpoint path found, restoring:")
saver.restore(sess, ckpt.model_checkpoint_path)
step = self.gstep.eval()
if isTraining:
for epoch in range(num_epochs):
step = self.train_one_epoch(sess, saver, self.train_init, writer, step, epoch)
self.eval_once(sess, self.test_init, writer, step, epoch)
else:
self.eval_once(sess, self.test_init, writer, step, epoch)
writer.close()
def train_one_epoch(self, sess, saver, init, writer, step, epoch):
print("Training epoch {0}/{1}".format(epoch, self.num_epochs))
sess.run(init)
start = time.time()
num_batches = 0
total_loss = 0
try:
while True:
fetches = [self.cross_ent, self.hybrid_loss, self.logllratio, self.baseline_mse,
self.accuracy, self.opt, self.summary_op, self.mean_locations,
self.preds, self.label, self.img]
cross_ent, hybrid_loss, logllratio, base_mse, accuracy, _, summary, locations, preds, labels, imgs = sess.run(fetches)
writer.add_summary(summary, global_step=step)
# Report summary data
if (step + 1) % self.config.report_step == 0:
print("----------------LOSSES----------------")
print("Accuracy at step {0}: {1}".format(step, accuracy))
print("Cross entropy loss at step {0}: {1}".format(step, cross_ent))
print("Baseline MSE at step {0}: {1}".format(step, base_mse))
print("Loglikelihood ratio at step {0}: {1}".format(step, logllratio))
print("Total loss at step {0}: {1}".format(step, hybrid_loss))
print("--------------------------------------\n")
# Call to visualizer
if (step + 1) % self.config.visualize_step == 0 and self.isVisualize:
plot_glimpse(self.config, imgs, locations, preds, labels, step, self.isAnimate)
num_batches += 1
total_loss += hybrid_loss
step += 1
except tf.errors.OutOfRangeError:
pass
saver.save(sess, self.config.checkpoint_path + self.config.checkpoint_name, global_step=self.gstep)
print("Average loss per batch: {0}".format(total_loss / num_batches))
print("Time taken: {}".format(time.time() - start))
return step
def eval_once(self, sess, init, writer, step, epoch):
sess.run(init)
start = time.time()
num_batches = 0
total_acc = 0
try:
while True:
acc, summary = sess.run([self.accuracy, self.summary_op])
writer.add_summary(summary, global_step=step)
total_acc += acc
num_batches += 1
except tf.errors.OutOfRangeError:
pass
print("-----------------EVAL----------------")
print("Average accuracy: {}".format(total_acc / num_batches))
print("Time taken: {}".format(time.time() - start))
print('\n')
if __name__ == "__main__":
model = DRAM()
model.build()
model.train(model.num_epochs, model.isTraining, model.isVisualize)
|
from collections import OrderedDict
TARGETS = OrderedDict([('2.5', (2, 5)),
('2.6', (2, 6)),
('2.7', (2, 7)),
('3.0', (3, 0)),
('3.1', (3, 1)),
('3.2', (3, 2)),
('3.3', (3, 3)),
('3.4', (3, 4)),
('3.5', (3, 5)),
('3.6', (3, 6)),
('3.7', (3, 7)),
('3.8', (3, 8))])
SYNTAX_ERROR_OFFSET = 5
TARGET_ALL = next(reversed(TARGETS.values()))
|
# Testoob, Python Testing Out Of (The) Box
# Copyright (C) 2005-2006 The Testoob Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"Report results in XML format"
from base import BaseReporter
import time
class XMLReporter(BaseReporter):
"""Reports test results in XML, in a format resembling Ant's JUnit xml
formatting output."""
def __init__(self):
BaseReporter.__init__(self)
from cStringIO import StringIO
self._sio = StringIO()
try:
from elementtree.SimpleXMLWriter import XMLWriter
except ImportError:
from testoob.compatibility.SimpleXMLWriter import XMLWriter
self.writer = XMLWriter(self._sio, "utf-8")
self.test_starts = {}
def start(self):
BaseReporter.start(self)
self.writer.start("results")
self.writer.start("testsuites")
def done(self):
BaseReporter.done(self)
self.writer.element("total_time", value="%.4f"%self.total_time)
self.writer.end("testsuites")
if self.cover_amount is not None and self.cover_amount == "xml":
self._write_coverage(self.coverage)
self.writer.end("results")
def get_xml(self):
return self._sio.getvalue()
def startTest(self, test_info):
BaseReporter.startTest(self, test_info)
self.test_starts[test_info] = time.time()
def addError(self, test_info, err_info):
BaseReporter.addError(self, test_info, err_info)
self._add_unsuccessful_testcase("error", test_info, err_info)
def addFailure(self, test_info, err_info):
BaseReporter.addFailure(self, test_info, err_info)
self._add_unsuccessful_testcase("failure", test_info, err_info)
def _add_testcase_element(self, test_info, result, add_elements = lambda:None):
self._start_testcase_tag(test_info)
self.writer.element("result", result)
add_elements()
self.writer.end("testcase")
def addSuccess(self, test_info):
BaseReporter.addSuccess(self, test_info)
self._add_testcase_element(test_info, "success")
def addSkip(self, test_info, err_info, isRegistered=True):
BaseReporter.addSkip(self, test_info, err_info, isRegistered)
def add_elements():
self.writer.element( "reason", err_info.exception_value() )
self._add_testcase_element(test_info, "skip")
def _add_unsuccessful_testcase(self, failure_type, test_info, err_info):
def add_elements():
"Additional elements specific for failures and errors"
self.writer.element(failure_type, str(err_info), type=err_info.exception_type(), message=err_info.exception_value())
self._add_testcase_element(test_info, failure_type, add_elements)
def _start_testcase_tag(self, test_info):
self.writer.start("testcase", name=str(test_info), time=self._test_time(test_info))
def _test_time(self, test_info):
result = time.time() - self.test_starts[test_info]
return "%.4f" % result
def _write_coverage(self, coverage):
self.writer.start("coverage")
for filename, stats in coverage.getstatistics().items():
# TODO: can probably extract loop body to a method
self.writer.start("sourcefile", name=filename,
statements=str(stats["lines"]),
executed=str(stats["covered"]),
percent=str(stats["percent"]))
lines, covered = coverage.coverage[filename].values()
missing = [line for line in covered if line not in lines]
self.writer.data(str(missing)[1:-1].replace(" ", ""))
self.writer.end("sourcefile")
self.writer.end("coverage")
class XMLFileReporter(XMLReporter):
def __init__(self, filename):
XMLReporter.__init__(self)
self.filename = filename
def done(self):
XMLReporter.done(self)
f = file(self.filename, "w")
try: f.write(self.get_xml())
finally: f.close()
|
#! /usr/bin/env python3
from sim import Simulation
if __name__ == "__main__":
sim = Simulation(ants=5, x=100, y=100, num_rivers=20)
sim.run()
|
def Golden_eagle(thoughts, eyes, eye, tongue):
return f"""
{thoughts} ,:=+++++???++?+=+=
{thoughts} :+?????\$MUUUUUUUUUMO+??~
{thoughts} :+I??\$UUUUMUMMMMUUMUMUUMUUMM???I+:
{thoughts} ,+??+ZOUUMMUMMMUUUUUMUUUMUUUMUMUUMZI+?+:
{thoughts} ~I?+MMUMUUUMUUUOOUMMMMMMUUUUUMMMUUUUUUMMUM$??~
I?+7MMMMMUUO7?+?IUMMMMMMMMUUMUUMUUUUUUUUUUMMMUMO?I
~I?+MMMUUUO????+?IOUZ7?,.......,+\$\$OUMUUUUUUMUMUUUMUU+I:
=??\$UMUUU7++??????II???????=.....,?OUUMMMUUMUUUUUUUMMUUU+=
+??UUMMM7??????+??+?????+??=,...\$MUMUUUUMUUMUUUUUUUUMMUM7II??=
,+?IUMMMI???III?++??+?????+~....... ......MUUMUUUUUUUUUMMU7?~
IIUMMM+?+?IUUUUMUUM7I?????????????I?+=:......MUUMMUMUMMMMUUU+~
:?+UMMU+?+?7UMMUUUZ7\$\$7????+++????????????=.....+UMUUUUMMMMMMUZ?
?+UMUM???+MMMMMU?++???????????++????????++????....OMMMUMMMMUMMUI:
+\$MMUM?+?ZMMU:\$MM???OUUU+??+???+????????????????,...UMUMUUUUMMUMM?~
IUUUU?I?OUUU,..UU?IMMUUMUUI???+?????????????????I,..:UUMUUUUMMUMU?+
?UUUMUM\$UMUU~..UUUUU\$,IUUUMM7+?????????????????+?I~..UUUUUUUMMMUU+?
?OUMUUUUMMUI+.?UUUU=...~UMMUU\$?????+???????????????..MUMUUUUUMUMU??
:??IUUMMUMUMMOMUU7........OUUUMMU?I????????????????I..MUMUUUUMUUMU?+
+IIUMUO.IUUUUUUO..............?UMUMUM7??????????????+?..UUMUUUUMUUMU?=
,IZMMU,.:UU7:..........,UUUMUZ....MUUMMO+???????????????..UUUUUUUUUUU?:
IZUUU:..UUUI=....... IUUUUUMMUZ,.MMUMU$?+???????????????.MUUMUUUUMUMUI
,+IUUM..O=..........\$UUMMMUU?~....UMMUUI?????????????????=.UMMUUUUUMMU?+
+?UMU~............OUMMUU~..... .UUMUMM+?????????????????=.UMMUMUMUUMOI=
?\$MU~... ...:MUMU=~........,UUMMMUI+????????????????+IUMMMUUUUMUU?+
+OMU.... ...?UMU=..:~~,.....MMMUUU+?+????????????????~MMUUUUMUMMU?+~
?OMU~ .. ...?UMUUUMUMUMUMUMUUUMUUMUI???????????????+?+OUUMUUMMMMUIUUUMO,
??UMU~.....\$MUUUOM???UMMUUUMMMUUMM7?++????????????++OMMMUMUUMMUI??UMIU+~
:?7UUU\$...UMMM?I~, +?MMUUMMMMMMUU?????????????+??\$UMMMMUUUMU\$?: ,??I?:
?IMUMUUZMU+?, =?UMMMMMMMMO??????????????+UMUMMUMUMUU?I~
?+\$MUMUMU?? ?MMMMMMMMU??+???????????IUMMMUUUMUUZ?=
,+???ZUO?~ +ZUMMUMUMU???++??+???IUMMUMUMMUUO??~
,,:~= ,?UMUMUMU???+??+?+?7UMUMUMUMUI??:
?UMUMMMM?+??++?ZUUMUMUMUZ++?,
?UMMMMMO+???MMUMUMUMUMOII=,
?UUUMUUZOMUUMUMMUMM+??=
?UMMUMMUUUMUMM\$???~
,?UMUUMUUU\$?+?~:
:IUUUM?+?I=:
????~,
""" |
num_rows = int(input())
matrix = [input().split(", ") for _ in range(num_rows)]
first_diag = [matrix[i][i] for i in range(len(matrix))]
second_diag = [matrix[i][-(i + 1)] for i in range(len(matrix))]
print(f"First diagonal: {', '.join(first_diag)}. Sum: {sum(map(int, first_diag))}")
print(f"Second diagonal: {', '.join(second_diag)}. Sum: {sum(map(int, second_diag))}") |
import random
from time import sleep
itens = ["Rock", "Paper", "Scissors"]
computer = random.choice(itens)
print("-"*24)
print("Scissors, Paper and Rock")
print("-"*24)
print("Your Options")
print("[1] Rock\n[2] Paper\n[3] Scissors")
player = int(input("Choose one option: "))
print("-"*17)
print("Scissors")
sleep(0.5)
print("Paper")
sleep(0.5)
print("Rock")
sleep(0.5)
print("-"*16)
if computer == "Rock":
if player == 1:
print("No one won!\nThat's a draw")
elif player == 2:
print("And the Winner is: Player")
elif player == 3:
print("And the Winner is: Computer")
else:
print("Invalid movement")
elif computer == "Paper":
if player == 1:
print("And the Winner is: Computer")
elif player == 2:
print("No one won!\nThat's a draw")
elif player == 3:
print("And the Winner is: Player")
else:
print("Invalid movent")
elif computer == "Scissors":
if player == 1:
print("And the winner is: Player")
elif player == 2:
print("And the Winner is: Computer")
elif player == 3:
print("No one won!\nThat's a draw")
else:
print("Invalid movement")
if player == 1:
print("Player Choosed: Rock")
elif player == 2:
print("Player Choosed: Paper")
else:
print("Player Choosed: Scissors")
print("Computer Choosed: {} ".format(computer))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.