blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
283
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
41
| license_type
stringclasses 2
values | repo_name
stringlengths 7
96
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 58
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 12.7k
662M
⌀ | star_events_count
int64 0
35.5k
| fork_events_count
int64 0
20.6k
| gha_license_id
stringclasses 11
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 43
values | src_encoding
stringclasses 9
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
5.88M
| extension
stringclasses 30
values | content
stringlengths 7
5.88M
| authors
sequencelengths 1
1
| author
stringlengths 0
73
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b8ed4dd75db4c11d74ead4f0f3d2b08577c4eb57 | 1cae869afd37b514ea1c85ccd1d3e9928b0e63d0 | /main.py | a9fd0378575d035e73bed780cb32bf4ccbcbd874 | [] | no_license | diegodocs/python-flask-container | fc346c4add4d68423803ea2e37c72c2ca6c47241 | 2e9449b74be2ee2dab9916c3cf71fb825a991ae3 | refs/heads/master | 2023-06-15T11:23:52.573811 | 2021-07-15T15:06:33 | 2021-07-15T15:06:33 | 386,331,309 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 372 | py | from flask import render_template
import connexion
# Create the application instance
app = connexion.App(__name__, specification_dir="./")
# Read the swagger.yml file to configure the endpoints
app.add_api("swagger.yml")
# create a URL route in our application for "/"
@app.route("/")
def home():
return render_template("home.html")
app.run(host='0.0.0.0') | [
"[email protected]"
] | |
8fdad98f0744d1235757c322861bd452fb27013c | 69f3174a930c82cee384e4e254d83602287fe05c | /app/views.py | 62204c9ce18f8a366d79ba3b1a1c564b674985df | [
"BSD-3-Clause"
] | permissive | ruganda/ShoppinglistApp | f459fe38e6376530b1cb6ccc9c3dd04ca07822b5 | 30ae866dab2e220e0004e501870ca6402bb7ee2e | refs/heads/master | 2021-06-25T11:40:42.746943 | 2017-09-10T06:43:04 | 2017-09-10T06:43:04 | 103,006,893 | 0 | 0 | null | 2017-09-10T06:43:05 | 2017-09-10T05:46:01 | null | UTF-8 | Python | false | false | 4,635 | py | from flask import Flask, render_template, flash, redirect, url_for, session, request, logging
from wtforms import Form, StringField, TextAreaField, PasswordField, validators
from passlib.hash import sha256_crypt
from functools import wraps
import models
app = Flask(__name__)
# Register Form Class
class RegisterForm(Form):
name = StringField('Name', [validators.Length(min=1, max=50)])
username = StringField('Username', [validators.Length(min=4, max=25)])
email = StringField('Email', [validators.Length(min=6, max=50)])
password = PasswordField('Password', [
validators.DataRequired(),
validators.EqualTo('confirm', message='Passwords do not match')
])
confirm = PasswordField('Confirm Password')
# Shoppinglist Form Class
class ShoppinglistForm(Form):
title = StringField('Title', [validators.Length(min=1, max=200)])
# Index
@app.route('/')
def index():
return render_template('home.html')
# About
@app.route('/about')
def about():
return render_template('about.html')
# User Register
@app.route('/register', methods=['GET', 'POST'])
def register():
form = RegisterForm(request.form)
if request.method == 'POST' and form.validate():
name = form.name.data
email = form.email.data
username = form.username.data
password = sha256_crypt.encrypt(str(form.password.data))
flash('You are now registered and can log in', 'success')
return redirect(url_for('login'))
return render_template('register.html', form=form)
# User login
@app.route('/login', methods=['GET', 'POST'])
def login():
if request.method == 'POST':
# Get Form Fields
username = request.form['username']
password_candidate = request.form['password']
# Compare Passwords
if sha256_crypt.verify(password_candidate, request.form['password']):
# Passed
session['logged_in'] = True
session['username'] = username
flash('You are now logged in', 'success')
return redirect(url_for('dashboard'))
else:
error = 'Invalid login'
return render_template('login.html', error=error)
else:
error = 'Username not found'
return render_template('login.html', error=error)
# Check if user logged in
def is_logged_in(f):
@wraps(f)
def wrap(*args, **kwargs):
if 'logged_in' in session:
return f(*args, **kwargs)
else:
flash('Unauthorized, Please login', 'danger')
return redirect(url_for('login'))
return wrap
# Dashboard
@app.route('/dashboard')
@is_logged_in
def dashboard():
# Create cursor
cur = {}
shoppinglists = cur.view_shoppinglist()
if len(cur) > 0:
return render_template('dashboard.html', shoppinglists=shoppinglists)
else:
msg = 'No Shoppinglists Found'
return render_template('dashboard.html', msg=msg)
# Add shoppinglist
@app.route('/add_shoppinglist', methods=['GET', 'POST'])
@is_logged_in
def add_shoppinglist():
form = ShoppinglistForm(request.form)
if request.method == 'POST' and form.validate():
title = form.title.data
# Create Cursor
cur ={}
cur.add_shoppinglist()
flash('Shoppinglist Created', 'success')
return redirect(url_for('dashboard'))
return render_template('add_article.html', form=form)
@app.route('/edit_shoppinglist/<string:id>', methods=['GET', 'POST'])
@is_logged_in
def edit_shoppinglist(id):
# Create cursor
cur = {}
cur.view_shoppinglist(id)
# Get form
form = ShoppinglistForm(request.form)
# Populate shoppinglist form fields
form.title.data = shoppinglist['title']
if request.method == 'POST' and form.validate():
title = request.form['title']
# Create Cursor
cur.edit_shoppinglist(id)
flash('Shoppinglist Updated', 'success')
return redirect(url_for('dashboard'))
return render_template('edit_shoppinglist.html', form=form)
# Delete shoppinglist
@app.route('/delete_shoppinglist/<string:id>', methods=['POST'])
@is_logged_in
def delete_shoppinglist(id):
# Create cursor
cur = {}
# Execute
cur.delete_shoppinglist(id)
flash('Article Deleted', 'success')
return redirect(url_for('dashboard'))
# Logout
@app.route('/logout')
@is_logged_in
def logout():
session.clear()
flash('You are now logged out', 'success')
return redirect(url_for('login'))
if __name__ == '__main__':
app.secret_key='secret123'
app.run(debug=True) | [
"[email protected]"
] | |
7447e71d41061af2d2b957e2cd0674a8d8f0083a | 1f154c0499c6302a4b4416485b6edb03de0df4bb | /index/migrations/0003_auto_20200626_1416.py | f0fc64e05f1f70cf851ee34381d1fc0ceb9662a5 | [] | no_license | sbswapnil/OnlineStore | d2aa19c5a81b70ebea85d58345a019f75f224058 | 78af51c3e079e011e6015acf4170c5175f15cc2b | refs/heads/master | 2022-11-10T00:31:39.147353 | 2020-07-05T13:34:49 | 2020-07-05T13:34:49 | 275,309,501 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 639 | py | # Generated by Django 2.1.15 on 2020-06-26 08:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('index', '0002_product'),
]
operations = [
migrations.AlterModelOptions(
name='department',
options={'ordering': ('department',)},
),
migrations.AlterModelOptions(
name='product',
options={'ordering': ('name',)},
),
migrations.AlterField(
model_name='product',
name='name',
field=models.CharField(max_length=20, unique=True),
),
]
| [
"[email protected]"
] | |
b56a8887499549bddcddf330ff397a1eb4b0c35c | 5dffd214621058efbc28a47968be2f728f78b98c | /Tensorflow/tensorflow-builtin-estimators/classification-sigmoid-perceptron1.py | cb96c89c3bae54816bf25f55b2fccb0673393f98 | [] | no_license | SatishEddhu/Deep-Analytics | 25446466a27aa70d02dff3efc824af0cf37dac0c | a33a0ad7df1175e66af02e9c0758413d1de172da | refs/heads/master | 2021-01-13T15:51:03.044058 | 2017-08-05T06:44:54 | 2017-08-05T06:44:54 | 76,864,213 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,306 | py | # Import required packages
from tensorflow.contrib import learn
from tensorflow.contrib import layers
import tensorflow as tf
import pandas as pd
import numpy as np
import os
import seaborn as sb # for graphics
import matplotlib.pyplot as plt
# Set logging level to info to see detailed log output
tf.logging.set_verbosity(tf.logging.INFO)
os.chdir("/home/algo/Algorithmica/tensorflow-builtin-estimators")
os.getcwd()
# reading directly using tensorflow's api
# train2.csv does not have headers. Instead, first row has #rows, #columns
sample = learn.datasets.base.load_csv_with_header(
filename="train2.csv",
target_dtype=np.int,
features_dtype=np.float32, target_column=-1) # '-1' means last column
type(sample)
sample.data
sample.data.shape
type(sample.data)
sample.target
sample.target.shape
type(sample.target)
#feature_columns argument expects list of tensorflow feature types
feature_cols = [layers.real_valued_column("", dimension=2)]
# If n_classes > 2, it is multi-class classification. Although we are trying to learn about a
# single perceptron, LinearClassifier internally uses a layer of perceptrons to classify.
classifier = learn.LinearClassifier(feature_columns=feature_cols,
n_classes=2, # binary classificationi
model_dir="/home/algo/Algorithmica/tmp",
enable_centered_bias=False)
# By default, enable_centered_bias is True
# If enable_centered_bias = False, linear classifier equation is:
# f(x,y) = w_1*x + w_2*y + bias = 0
# To predict, f(x,y) < 0 ==> [x,y] is Class 0, else [x,y] is Class 1
# If enable_centered_bias = True, linear classifier equation is:
# f(x,y) = w_1*x + w_2*y + bias + centered_bias_weight = 0
# To predict, f(x,y) < 0 ==> [x,y] is Class 0, else [x,y] is Class 1
# Note that enabling/disabling the centered bias can result in different predictions for "border-case" points
classifier.fit(x=sample.data, y=sample.target, steps=1000)
#access the learned model parameters
classifier.weights_
classifier.bias_
# valid only when enable_centered_bias=True in learn.LinearClassifier()
# centered_bias_weight = classifier.get_variable_value("centered_bias_weight")
for var in classifier.get_variable_names():
print var, " - ", classifier.get_variable_value(var)
# w1*x + w2*y + b = 0.
p1 = [0,-classifier.bias_[0]/classifier.weights_[1]] # (0, -b/w2)
p2 = [-classifier.bias_[0]/classifier.weights_[0],0] # (-b/w1, 0)
df = pd.DataFrame(data=np.c_[sample.data, sample.target.astype(int)], columns=['x1','x2','label'])
sb.swarmplot(x='x1', y='x2', data=df, hue='label', size=10)
plt.plot(p1, p2, 'b-', linewidth = 2)
# predict the outcome using model
test = np.array([[60.4,21.5],[200.1,26.1],[50,62],[50,63],[70,37],[70,38]])
predictions = classifier.predict(test)
predictions # [0,1,0,1,0,1]
test[0,0]
test[0,1]
# Understanding how the predictions were made
# Since enable_centered_bias = False, linear classifier equation is:
# f(x,y) = w_1*x + w_2*y + bias = 0
# To predict, f(x,y) < 0 ==> [x,y] is Class 0, else [x,y] is Class 1
test[0,0]*classifier.weights_[0] + test[0,1]*classifier.weights_[1] + classifier.bias_ # -0.817 ==> class 0
test[1,0]*classifier.weights_[0] + test[1,1]*classifier.weights_[1] + classifier.bias_ # 4.432 ==> class 1
test[2,0]*classifier.weights_[0] + test[2,1]*classifier.weights_[1] + classifier.bias_ # -0.021 ==> class 0
test[3,0]*classifier.weights_[0] + test[3,1]*classifier.weights_[1] + classifier.bias_ # 0.008 ==> class 1
test[4,0]*classifier.weights_[0] + test[4,1]*classifier.weights_[1] + classifier.bias_ # -0.015 ==> class 0
test[5,0]*classifier.weights_[0] + test[5,1]*classifier.weights_[1] + classifier.bias_ # 0.013 ==> class 1
# Predict the class of random points (when enable_centered_bias = False)
x = 70
y = 37
classifier.weights_[0]*x + classifier.weights_[1]*y + classifier.bias_
classifier.predict(np.array([[x,y]]))
# loop version for seeing the individual points and predictions in test data
for i in range(len(test)):
value = test[i,0]*classifier.weights_[0] + test[i,1]*classifier.weights_[1] + classifier.bias_
print "Score: ", value, " ==> ", predictions[i] | [
"[email protected]"
] | |
9523ac8b61860442304daa426b823b5a950dd867 | afc9d4d8f62abbfe319905d58c7517b70e6a7794 | /mortgage_insight/urls.py | 55ba57c126cfa145bf3b9bfaec17742e1318de10 | [] | no_license | chris-nlnz/mortgage_insight | c5b8608305d9d00e5c2f666a5456d4c44af265ae | 24b247c1a388487bc76ea095b5f03bb5dbbe45d4 | refs/heads/master | 2022-02-27T04:57:10.301940 | 2019-12-21T03:08:32 | 2019-12-21T03:08:32 | 229,021,508 | 0 | 0 | null | 2022-01-21T20:10:33 | 2019-12-19T09:35:15 | HTML | UTF-8 | Python | false | false | 805 | py | from django.contrib import admin
from django.contrib.auth import views as auth_views
from django.urls import path, re_path
from mortgages.views import IndexView, MortgageListView, MortgageDetailView
urlpatterns = [
path('admin/', admin.site.urls),
# See https://developer.mozilla.org/en-US/docs/Learn/Server-side/Django/Authentication
# for good examples of how to extend / customise the Django auth flow
path('accounts/login/', auth_views.LoginView.as_view(), name='login'),
path('accounts/logout/', auth_views.LogoutView.as_view(), name='logout'),
path('', IndexView.as_view(), name='index'),
path('mortgages/', MortgageListView.as_view(), name='mortgages-list'),
re_path('mortgages/(?P<pk>[0-9A-Fa-f-]+)/', MortgageDetailView.as_view(), name='mortgages-detail'),
]
| [
"[email protected]"
] | |
71267b8cf3be5526d75182327fab41e31206548d | 46cdce72fe4cf1f63ec3a8f4cd1890e316de4ee1 | /data/Datasets.py | fd61417d4bee327e8e956da7d5b2be1fa1056dc5 | [
"MIT"
] | permissive | Guim3/StackGAN | 520de10f189633a88f7e2c933f750a7e967082fa | f42ebfa7379fd8472b873c9dad0bf6bb9c03588e | refs/heads/master | 2021-01-12T07:00:21.969243 | 2016-12-24T17:35:06 | 2016-12-24T17:35:06 | 76,892,436 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,634 | py | import numpy as np
"""Dataset: given dataset images and their labels, constructs a dataset and generates mini-batches. """
class Dataset:
def __init__(self, images, labels):
""" Input:
- images: numpy array N x height x width x color, where N is the number of samples
- labels: numpy array NxM, where M is the dimensionality of the labels.
"""
self._num_examples = len(labels)
self._index_in_epoch = 0
self._epochs_completed = 0
perm = np.arange(self._num_examples)
np.random.shuffle(perm)
self._images = images[perm]
self._labels = labels[perm]
def set_data(self, images, labels):
self._images = images
self._labels = labels
def get_images(self):
return self._images
def get_labels(self):
return self._labels
def next_batch(self, batch_size):
"""Return the next `batch_size` examples from this data set."""
start = self._index_in_epoch
self._index_in_epoch += batch_size
if self._index_in_epoch > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Shuffle the data
perm = np.arange(self._num_examples)
np.random.shuffle(perm)
self._images = self._images[perm]
self._labels = self._labels[perm]
# Start next epoch
start = 0
self._index_in_epoch = batch_size
assert batch_size <= self._num_examples
end = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
| [
"[email protected]"
] | |
e98d20d5572a244abbb3af09a27c99b20802283c | f680851b5704cb60c9a21589ee123b7adc474124 | /2015/Database/pyTK/view/__init__.py | 9fc19c9b2001f5ff79306e4b1cebc12ef31a857e | [] | no_license | joeyLewis/HighSchoolPortfolio | 999c3841c27d36f4988be424d808e89299d3e007 | c2e380506637d9f52ddbb956d4cf6b2c095cad84 | refs/heads/master | 2021-01-10T09:58:11.043094 | 2016-02-01T21:32:46 | 2016-02-01T21:32:46 | 50,872,538 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 48 | py | __all__=["customs","portals","windows","view"]
| [
"[email protected]"
] | |
f164d4d35d6ba2cb3d5766e27840cb4977d6d9ad | 04054ede9aa76745c9680e3b1d0d68d25b2d16e6 | /POSProject/sale/admin.py | e887b0950545fd0d94f4abffb264aed0f227fcf4 | [] | no_license | Kittiwat17/POSProject | 486ffe8f433d4b206f6e7bb071becb4366f8ddc1 | 3ab59e95725641c22e7678512562da9e90e67f88 | refs/heads/master | 2021-03-04T11:20:34.369852 | 2020-03-09T12:30:51 | 2020-03-09T12:30:51 | 246,029,716 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 245 | py | from django.contrib import admin
from sale.models import Order, Order_Products, Type, Product
# Register your models here.
admin.site.register(Order)
admin.site.register(Order_Products)
admin.site.register(Type)
admin.site.register(Product) | [
"[email protected]"
] | |
25dcc255986a15191b7d8adffac8fd5a28d49057 | 697ed34849347359628e8c8a36ba4d29715753ad | /minishift/minishift/wsgi.py | 6a8226f1068d2d6518c341d2e83f63b6401d7451 | [] | no_license | elrik/minishift | 5e9a61fdbc67d9064db2a21f8afaad24915a9de0 | 37aef58c1bbb41cb5e1c0a98dd127fc4ab7727ab | refs/heads/master | 2020-06-22T00:12:54.719051 | 2019-07-19T06:55:56 | 2019-07-19T06:55:56 | 197,586,580 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 395 | py | """
WSGI config for minishift project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'minishift.settings')
application = get_wsgi_application()
| [
"[email protected]"
] | |
4b74bef4bfc273b7967bbac170ea748de830187a | 3899dd3debab668ef0c4b91c12127e714bdf3d6d | /venv/Lib/site-packages/tensorflow/python/profiler/model_analyzer.py | 538717edb0c2f79205cc2784a9ba9a2865e71cc4 | [] | no_license | SphericalPotatoInVacuum/CNNDDDD | b2f79521581a15d522d8bb52f81b731a3c6a4db4 | 03c5c0e7cb922f53f31025b7dd78287a19392824 | refs/heads/master | 2020-04-21T16:10:25.909319 | 2019-02-08T06:04:42 | 2019-02-08T06:04:42 | 169,691,960 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 15,436 | py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Model Analyzer.
Analyze model, including shape, params, time, memory, structure, etc.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import six
from google.protobuf import message
from tensorflow.core.profiler import tfprof_options_pb2
from tensorflow.core.profiler import tfprof_output_pb2
from tensorflow.python import pywrap_tensorflow as print_mdl
from tensorflow.python.eager import context
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.profiler import option_builder
from tensorflow.python.profiler import tfprof_logger
from tensorflow.python.util.tf_export import tf_export
_DEFAULT_PROFILE_OPTIONS = 0
_DEFAULT_ADVISE_OPTIONS = 0
# The following options are for 'advise' cmd.
# Show all advice.
ALL_ADVICE = {
'ExpensiveOperationChecker': {},
'AcceleratorUtilizationChecker': {},
'JobChecker': {}, # Only available internally.
'OperationChecker': {},
}
def _graph_string(graph):
"""Helper to serialize a graph to string."""
if graph:
return graph.as_graph_def(add_shapes=True).SerializeToString()
else:
return b''
def _build_options(options):
"""Build tfprof.OptionsProto.
Args:
options: A dictionary of options.
Returns:
tfprof.OptionsProto.
"""
opts = tfprof_options_pb2.OptionsProto()
opts.max_depth = options.get('max_depth', 10)
opts.min_bytes = options.get('min_bytes', 0)
opts.min_peak_bytes = options.get('min_peak_bytes', 0)
opts.min_residual_bytes = options.get('min_residual_bytes', 0)
opts.min_output_bytes = options.get('min_output_bytes', 0)
opts.min_micros = options.get('min_micros', 0)
opts.min_accelerator_micros = options.get('min_accelerator_micros', 0)
opts.min_cpu_micros = options.get('min_cpu_micros', 0)
opts.min_params = options.get('min_params', 0)
opts.min_float_ops = options.get('min_float_ops', 0)
opts.min_occurrence = options.get('min_occurrence', 0)
opts.step = options.get('step', -1)
opts.order_by = options.get('order_by', 'name')
for p in options.get('account_type_regexes', []):
opts.account_type_regexes.append(p)
for p in options.get('start_name_regexes', []):
opts.start_name_regexes.append(p)
for p in options.get('trim_name_regexes', []):
opts.trim_name_regexes.append(p)
for p in options.get('show_name_regexes', []):
opts.show_name_regexes.append(p)
for p in options.get('hide_name_regexes', []):
opts.hide_name_regexes.append(p)
opts.account_displayed_op_only = options.get('account_displayed_op_only',
False)
for p in options.get('select', []):
opts.select.append(p)
opts.output = options.get('output', 'stdout')
opts.dump_to_file = options.get('dump_to_file', '')
return opts
def _build_advisor_options(options):
"""Build tfprof.AdvisorOptionsProto.
Args:
options: A dictionary of options. See ALL_ADVICE example.
Returns:
tfprof.AdvisorOptionsProto.
"""
opts = tfprof_options_pb2.AdvisorOptionsProto()
if options is None:
return opts
for checker, checker_opts in six.iteritems(options):
checker_ops_pb = tfprof_options_pb2.AdvisorOptionsProto.CheckerOption()
for k, v in six.iteritems(checker_opts):
checker_ops_pb[k] = v
opts.checkers[checker].MergeFrom(checker_ops_pb)
return opts
@tf_export('profiler.Profiler')
class Profiler(object):
"""TensorFlow multi-step profiler.
https://github.com/tensorflow/tensorflow/tree/master/tensorflow/core/profiler/README.md
```python
Typical use case:
# Currently we are only allowed to create 1 profiler per process.
profiler = Profiler(sess.graph)
for i in xrange(total_steps):
if i % 10000 == 0:
run_meta = tf.RunMetadata()
_ = sess.run(...,
options=tf.RunOptions(
trace_level=tf.RunOptions.FULL_TRACE),
run_metadata=run_meta)
profiler.add_step(i, run_meta)
# Profile the parameters of your model.
profiler.profile_name_scope(options=(option_builder.ProfileOptionBuilder
.trainable_variables_parameter()))
# Or profile the timing of your model operations.
opts = option_builder.ProfileOptionBuilder.time_and_memory()
profiler.profile_operations(options=opts)
# Or you can generate a timeline:
opts = (option_builder.ProfileOptionBuilder(
option_builder.ProfileOptionBuilder.time_and_memory())
.with_step(i)
.with_timeline_output(filename).build())
profiler.profile_graph(options=opts)
else:
_ = sess.run(...)
# Auto detect problems and generate advice.
profiler.advise()
```
"""
def __init__(self, graph=None, op_log=None):
"""Constructor.
Args:
graph: tf.Graph. If None and eager execution is not enabled, use
default graph.
op_log: optional. tensorflow::tfprof::OpLogProto proto. Used to define
extra op types.
"""
if not graph and not context.executing_eagerly():
graph = ops.get_default_graph()
self._coverage = 0.0
self._graph = graph
# pylint: disable=protected-access
op_log = tfprof_logger.merge_default_with_oplog(
self._graph, op_log=op_log)
# pylint: enable=protected-access
print_mdl.NewProfiler(
_graph_string(self._graph), op_log.SerializeToString())
def __del__(self):
print_mdl.DeleteProfiler()
def add_step(self, step, run_meta):
"""Add statistics of a step.
Args:
step: int, An id used to group one or more different `run_meta` together.
When profiling with the profile_xxx APIs, user can use the `step`
id in the `options` to profile these `run_meta` together.
run_meta: RunMetadata proto that contains statistics of a session run.
"""
# pylint: disable=protected-access
op_log = tfprof_logger.merge_default_with_oplog(
self._graph, run_meta=run_meta)
# pylint: enable=protected-access
# TODO(xpan): P1: Better to find the current graph.
self._coverage = print_mdl.AddStep(step, _graph_string(self._graph),
run_meta.SerializeToString(),
op_log.SerializeToString())
def profile_python(self, options):
"""Profile the statistics of the Python codes.
By default, it shows the call stack from root. To avoid
redundant output, you may use options to filter as below
options['show_name_regexes'] = ['.*my_code.py.*']
Args:
options: A dict of options. See core/profiler/g3doc/options.md.
Returns:
a MultiGraphNodeProto that records the results.
"""
opts = _build_options(options)
tfprof_node = tfprof_output_pb2.MultiGraphNodeProto()
try:
tfprof_node.ParseFromString(
print_mdl.Profile('code'.encode('utf-8'), opts.SerializeToString()))
except message.DecodeError as e:
sys.stderr.write('Cannot parse returned proto: %s.\n' % e)
return tfprof_node
def profile_operations(self, options):
"""Profile the statistics of the Operation types (e.g. MatMul, Conv2D).
Args:
options: A dict of options. See core/profiler/g3doc/options.md.
Returns:
a MultiGraphNodeProto that records the results.
"""
opts = _build_options(options)
tfprof_node = tfprof_output_pb2.MultiGraphNodeProto()
try:
tfprof_node.ParseFromString(
print_mdl.Profile('op'.encode('utf-8'), opts.SerializeToString()))
except message.DecodeError as e:
sys.stderr.write('Cannot parse returned proto: %s.\n' % e)
return tfprof_node
def profile_name_scope(self, options):
"""Profile the statistics of graph nodes, organized by name scope.
Args:
options: A dict of options. See core/profiler/g3doc/options.md.
Returns:
a GraphNodeProto that records the results.
"""
opts = _build_options(options)
tfprof_node = tfprof_output_pb2.GraphNodeProto()
try:
tfprof_node.ParseFromString(
print_mdl.Profile('scope'.encode('utf-8'), opts.SerializeToString()))
except message.DecodeError as e:
sys.stderr.write('Cannot parse returned proto: %s.\n' % e)
return tfprof_node
def profile_graph(self, options):
"""Profile the statistics of graph nodes, organized by dataflow graph.
Args:
options: A dict of options. See core/profiler/g3doc/options.md.
Returns:
a GraphNodeProto that records the results.
"""
opts = _build_options(options)
tfprof_node = tfprof_output_pb2.GraphNodeProto()
try:
tfprof_node.ParseFromString(
print_mdl.Profile('graph'.encode('utf-8'), opts.SerializeToString()))
except message.DecodeError as e:
sys.stderr.write('Cannot parse returned proto: %s.\n' % e)
return tfprof_node
def advise(self, options):
"""Automatically detect problems and generate reports.
Args:
options: A dict of options. See ALL_ADVICE example above.
Returns:
A Advise proto that conains the reports from all checkers.
"""
advise_pb = tfprof_output_pb2.AdviceProto()
opts = _build_advisor_options(options)
advise_pb.ParseFromString(
print_mdl.Profile('advise'.encode('utf-8'), opts.SerializeToString()))
return advise_pb
def serialize_to_string(self):
"""Serialize the ProfileProto to a binary string.
Users can write it to file for offline analysis by tfprof commandline
or graphical interface.
Returns:
ProfileProto binary string.
"""
return print_mdl.SerializeToString()
def _write_profile(self, filename):
"""Writes the profile to a file."""
print_mdl.WriteProfile(filename)
@tf_export('profiler.profile')
def profile(graph=None,
run_meta=None,
op_log=None,
cmd='scope',
options=_DEFAULT_PROFILE_OPTIONS):
"""Profile model.
Tutorials and examples can be found in:
https://github.com/tensorflow/tensorflow/tree/master/tensorflow/core/profiler/README.md
Args:
graph: tf.Graph. If None and eager execution is not enabled, use
default graph.
run_meta: optional tensorflow.RunMetadata proto. It is necessary to
to support run time information profiling, such as time and memory.
op_log: tensorflow.tfprof.OpLogProto proto. User can assign "types" to
graph nodes with op_log. "types" allow user to flexibly group and
account profiles using options['accounted_type_regexes'].
cmd: string. Either 'op', 'scope', 'graph' or 'code'.
'op' view organizes profile using operation type. (e.g. MatMul)
'scope' view organizes profile using graph node name scope.
'graph' view organizes profile using graph node inputs/outputs.
'code' view organizes profile using Python call stack.
options: A dict of options. See core/profiler/g3doc/options.md.
Returns:
If cmd is 'scope' or 'graph', returns GraphNodeProto proto.
If cmd is 'op' or 'code', returns MultiGraphNodeProto proto.
Side effect: stdout/file/timeline.json depending on options['output']
"""
if not graph and not context.executing_eagerly():
graph = ops.get_default_graph()
if options == _DEFAULT_PROFILE_OPTIONS:
options = (option_builder.ProfileOptionBuilder
.trainable_variables_parameter())
# pylint: disable=protected-access
op_log = tfprof_logger.merge_default_with_oplog(
graph, op_log, run_meta, add_trace=cmd == 'code')
# pylint: enable=protected-access
opts = _build_options(options)
run_meta_str = run_meta.SerializeToString() if run_meta else b''
graph_str = _graph_string(graph)
if cmd == 'code' or cmd == 'op':
tfprof_node = tfprof_output_pb2.MultiGraphNodeProto()
ret = print_mdl.PrintModelAnalysis(graph_str, run_meta_str,
op_log.SerializeToString(),
cmd.encode('utf-8'),
opts.SerializeToString())
try:
tfprof_node.ParseFromString(ret)
except message.DecodeError as e:
sys.stderr.write('Cannot parse returned proto: %s.\n' % e)
elif cmd == 'graph' or cmd == 'scope':
tfprof_node = tfprof_output_pb2.GraphNodeProto()
ret = print_mdl.PrintModelAnalysis(graph_str, run_meta_str,
op_log.SerializeToString(),
cmd.encode('utf-8'),
opts.SerializeToString())
try:
tfprof_node.ParseFromString(ret)
except message.DecodeError as e:
sys.stderr.write('Cannot parse returned proto: %s.\n' % e)
else:
raise errors.InvalidArgumentError(
None, None, 'unknown cmd: %s\n' % cmd)
return tfprof_node
@tf_export('profiler.advise')
def advise(graph=None, run_meta=None, options=_DEFAULT_ADVISE_OPTIONS):
"""Auto profile and advise.
Builds profiles and automatically check anomalies of various
aspects. For more details:
https://github.com/tensorflow/tensorflow/tree/master/tensorflow/core/profiler/README.md
Args:
graph: tf.Graph. If None and eager execution is not enabled, use
default graph.
run_meta: optional tensorflow.RunMetadata proto. It is necessary to
to support run time information profiling, such as time and memory.
options: see ALL_ADVICE example above. Default checks everything.
Returns:
Returns AdviceProto proto
"""
if not graph and context.in_eager_execution():
graph = ops.get_default_graph()
if options == _DEFAULT_ADVISE_OPTIONS:
options = ALL_ADVICE.copy()
# pylint: disable=protected-access
op_log = tfprof_logger.merge_default_with_oplog(
graph, None, run_meta, add_trace=True)
# pylint: enable=protected-access
run_meta_str = run_meta.SerializeToString() if run_meta else b''
opts = _build_advisor_options(options)
ret = tfprof_output_pb2.AdviceProto()
ret.ParseFromString(
print_mdl.PrintModelAnalysis(
_graph_string(graph), run_meta_str, op_log.SerializeToString(),
'advise'.encode('utf-8'), opts.SerializeToString()))
return ret
| [
"[email protected]"
] | |
13756ff278f9a4ac63da0589411824f4ae1acb25 | b0e0843d8410ea59ba01fb923fce78921d3b6cf6 | /TrainDataset.py | 3bdbe02d6abeb50bdb823f3b96d3b791f409ea62 | [] | no_license | mosfiqunnahid/Trainning-Dataset | 8306158dbd0c303d0dee07ff42770b61e107094e | 24c368d13aa6a095a9235add4b5a7cc38316b837 | refs/heads/main | 2023-03-12T16:06:20.831572 | 2021-03-05T15:10:54 | 2021-03-05T15:10:54 | 344,847,276 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,352 | py | import cv2, os
import numpy as np
from PIL import Image
recognizer = cv2.face.LBPHFaceRecognizer_create()
detector = cv2.CascadeClassifier("haarcascade_frontalface_default.xml");
def getImagesAndLabels(path):
# get the path of all the files in the folder
imagePaths = [os.path.join(path, f) for f in os.listdir(path)]
# create empth face list
faceSamples = []
# create empty ID list
Ids = []
# now looping through all the image paths and loading the Ids and the images
for imagePath in imagePaths:
# loading the image and converting it to gray scale
pilImage = Image.open(imagePath).convert('L')
# Now we are converting the PIL image into numpy array
imageNp = np.array(pilImage, 'uint8')
# getting the Id from the image
Id = int(os.path.split(imagePath)[-1].split(".")[1])
# extract the face from the training image sample
faces = detector.detectMultiScale(imageNp)
# If a face is there then append that in the list as well as Id of it
for (x, y, w, h) in faces:
faceSamples.append(imageNp[y:y + h, x:x + w])
Ids.append(Id)
return faceSamples, Ids
faces, Ids = getImagesAndLabels('dataSet')
recognizer.train(faces, np.array(Ids))
recognizer.save('trainner/trainner.yml')
| [
"[email protected]"
] | |
4df84ef4b569e91799830f0ab4d861684e4beb33 | d8aead947769557bd9c231095f0e1e01568d2407 | /app/calibrate.py | f5ab75635865a879df68c4e92ae10a79ae081b8f | [
"MIT"
] | permissive | arvind-india/UCD-Research-Practicum-WiFi-Occupancy-Platform | d48542a672feb767c69b5ee9e75ae2a078096a24 | 5a1841ac961403f6b39ce6ca985ce2eee1d0bbdf | refs/heads/master | 2023-03-18T16:04:09.166206 | 2016-08-27T15:46:20 | 2016-08-27T15:46:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 123 | py | from .mod_sensors import *
calibration.calibrate_rssi_baseline("app/mod_sensors/rssi/baseline_readings/test.csv", "B002")
| [
"[email protected]"
] | |
4ecca9d00e626da5c1cb01ab25691b1ad5dc13cb | c577f5380b4799b4db54722749cc33f9346eacc1 | /BugSwarm/scikit-learn-scikit-learn-405135881/buggy_files/sklearn/ensemble/bagging.py | 777e25edec064811e2752739a180b74dfd3e8754 | [] | no_license | tdurieux/BugSwarm-dissection | 55db683fd95f071ff818f9ca5c7e79013744b27b | ee6b57cfef2119523a083e82d902a6024e0d995a | refs/heads/master | 2020-04-30T17:11:52.050337 | 2019-05-09T13:42:03 | 2019-05-09T13:42:03 | 176,972,414 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 38,755 | py | """Bagging meta-estimator."""
# Author: Gilles Louppe <[email protected]>
# License: BSD 3 clause
from __future__ import division
import itertools
import numbers
import numpy as np
from abc import ABCMeta, abstractmethod
from warnings import warn
from .base import BaseEnsemble, _partition_estimators
from ..base import ClassifierMixin, RegressorMixin
from ..utils import Parallel, delayed
from ..externals.six import with_metaclass
from ..externals.six.moves import zip
from ..metrics import r2_score, accuracy_score
from ..tree import DecisionTreeClassifier, DecisionTreeRegressor
from ..utils import check_random_state, check_X_y, check_array, column_or_1d
from ..utils import indices_to_mask, check_consistent_length
from ..utils.metaestimators import if_delegate_has_method
from ..utils.multiclass import check_classification_targets
from ..utils.random import sample_without_replacement
from ..utils.validation import has_fit_parameter, check_is_fitted
__all__ = ["BaggingClassifier",
"BaggingRegressor"]
MAX_INT = np.iinfo(np.int32).max
def _generate_indices(random_state, bootstrap, n_population, n_samples):
"""Draw randomly sampled indices."""
# Draw sample indices
if bootstrap:
indices = random_state.randint(0, n_population, n_samples)
else:
indices = sample_without_replacement(n_population, n_samples,
random_state=random_state)
return indices
def _generate_bagging_indices(random_state, bootstrap_features,
bootstrap_samples, n_features, n_samples,
max_features, max_samples):
"""Randomly draw feature and sample indices."""
# Get valid random state
random_state = check_random_state(random_state)
# Draw indices
feature_indices = _generate_indices(random_state, bootstrap_features,
n_features, max_features)
sample_indices = _generate_indices(random_state, bootstrap_samples,
n_samples, max_samples)
return feature_indices, sample_indices
def _parallel_build_estimators(n_estimators, ensemble, X, y, sample_weight,
seeds, total_n_estimators, verbose):
"""Private function used to build a batch of estimators within a job."""
# Retrieve settings
n_samples, n_features = X.shape
max_features = ensemble._max_features
max_samples = ensemble._max_samples
bootstrap = ensemble.bootstrap
bootstrap_features = ensemble.bootstrap_features
support_sample_weight = has_fit_parameter(ensemble.base_estimator_,
"sample_weight")
if not support_sample_weight and sample_weight is not None:
raise ValueError("The base estimator doesn't support sample weight")
# Build estimators
estimators = []
estimators_features = []
for i in range(n_estimators):
if verbose > 1:
print("Building estimator %d of %d for this parallel run "
"(total %d)..." % (i + 1, n_estimators, total_n_estimators))
random_state = np.random.RandomState(seeds[i])
estimator = ensemble._make_estimator(append=False,
random_state=random_state)
# Draw random feature, sample indices
features, indices = _generate_bagging_indices(random_state,
bootstrap_features,
bootstrap, n_features,
n_samples, max_features,
max_samples)
# Draw samples, using sample weights, and then fit
if support_sample_weight:
if sample_weight is None:
curr_sample_weight = np.ones((n_samples,))
else:
curr_sample_weight = sample_weight.copy()
if bootstrap:
sample_counts = np.bincount(indices, minlength=n_samples)
curr_sample_weight *= sample_counts
else:
not_indices_mask = ~indices_to_mask(indices, n_samples)
curr_sample_weight[not_indices_mask] = 0
estimator.fit(X[:, features], y, sample_weight=curr_sample_weight)
# Draw samples, using a mask, and then fit
else:
estimator.fit((X[indices])[:, features], y[indices])
estimators.append(estimator)
estimators_features.append(features)
return estimators, estimators_features
def _parallel_predict_proba(estimators, estimators_features, X, n_classes):
"""Private function used to compute (proba-)predictions within a job."""
n_samples = X.shape[0]
proba = np.zeros((n_samples, n_classes))
for estimator, features in zip(estimators, estimators_features):
if hasattr(estimator, "predict_proba"):
proba_estimator = estimator.predict_proba(X[:, features])
if n_classes == len(estimator.classes_):
proba += proba_estimator
else:
proba[:, estimator.classes_] += \
proba_estimator[:, range(len(estimator.classes_))]
else:
# Resort to voting
predictions = estimator.predict(X[:, features])
for i in range(n_samples):
proba[i, predictions[i]] += 1
return proba
def _parallel_predict_log_proba(estimators, estimators_features, X, n_classes):
"""Private function used to compute log probabilities within a job."""
n_samples = X.shape[0]
log_proba = np.empty((n_samples, n_classes))
log_proba.fill(-np.inf)
all_classes = np.arange(n_classes, dtype=np.int)
for estimator, features in zip(estimators, estimators_features):
log_proba_estimator = estimator.predict_log_proba(X[:, features])
if n_classes == len(estimator.classes_):
log_proba = np.logaddexp(log_proba, log_proba_estimator)
else:
log_proba[:, estimator.classes_] = np.logaddexp(
log_proba[:, estimator.classes_],
log_proba_estimator[:, range(len(estimator.classes_))])
missing = np.setdiff1d(all_classes, estimator.classes_)
log_proba[:, missing] = np.logaddexp(log_proba[:, missing],
-np.inf)
return log_proba
def _parallel_decision_function(estimators, estimators_features, X):
"""Private function used to compute decisions within a job."""
return sum(estimator.decision_function(X[:, features])
for estimator, features in zip(estimators,
estimators_features))
def _parallel_predict_regression(estimators, estimators_features, X):
"""Private function used to compute predictions within a job."""
return sum(estimator.predict(X[:, features])
for estimator, features in zip(estimators,
estimators_features))
class BaseBagging(with_metaclass(ABCMeta, BaseEnsemble)):
"""Base class for Bagging meta-estimator.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator=None,
n_estimators=10,
max_samples=1.0,
max_features=1.0,
bootstrap=True,
bootstrap_features=False,
oob_score=False,
warm_start=False,
n_jobs=1,
random_state=None,
verbose=0):
super(BaseBagging, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators)
self.max_samples = max_samples
self.max_features = max_features
self.bootstrap = bootstrap
self.bootstrap_features = bootstrap_features
self.oob_score = oob_score
self.warm_start = warm_start
self.n_jobs = n_jobs
self.random_state = random_state
self.verbose = verbose
def fit(self, X, y, sample_weight=None):
"""Build a Bagging ensemble of estimators from the training
set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrices are accepted only if
they are supported by the base estimator.
y : array-like, shape = [n_samples]
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted.
Note that this is supported only if the base estimator supports
sample weighting.
Returns
-------
self : object
"""
return self._fit(X, y, self.max_samples, sample_weight=sample_weight)
def _fit(self, X, y, max_samples=None, max_depth=None, sample_weight=None):
"""Build a Bagging ensemble of estimators from the training
set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrices are accepted only if
they are supported by the base estimator.
y : array-like, shape = [n_samples]
The target values (class labels in classification, real numbers in
regression).
max_samples : int or float, optional (default=None)
Argument to use instead of self.max_samples.
max_depth : int, optional (default=None)
Override value used when constructing base estimator. Only
supported if the base estimator has a max_depth parameter.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted.
Note that this is supported only if the base estimator supports
sample weighting.
Returns
-------
self : object
"""
random_state = check_random_state(self.random_state)
# Convert data (X is required to be 2d and indexable)
X, y = check_X_y(
X, y, ['csr', 'csc'], dtype=None, force_all_finite=False,
multi_output=True
)
if sample_weight is not None:
sample_weight = check_array(sample_weight, ensure_2d=False)
check_consistent_length(y, sample_weight)
# Remap output
n_samples, self.n_features_ = X.shape
self._n_samples = n_samples
y = self._validate_y(y)
# Check parameters
self._validate_estimator()
if max_depth is not None:
self.base_estimator_.max_depth = max_depth
# Validate max_samples
if max_samples is None:
max_samples = self.max_samples
elif not isinstance(max_samples, (numbers.Integral, np.integer)):
max_samples = int(max_samples * X.shape[0])
if not (0 < max_samples <= X.shape[0]):
raise ValueError("max_samples must be in (0, n_samples]")
# Store validated integer row sampling value
self._max_samples = max_samples
# Validate max_features
if isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
max_features = int(self.max_features * self.n_features_)
if not (0 < max_features <= self.n_features_):
raise ValueError("max_features must be in (0, n_features]")
# Store validated integer feature sampling value
self._max_features = max_features
# Other checks
if not self.bootstrap and self.oob_score:
raise ValueError("Out of bag estimation only available"
" if bootstrap=True")
if self.warm_start and self.oob_score:
raise ValueError("Out of bag estimate only available"
" if warm_start=False")
if hasattr(self, "oob_score_") and self.warm_start:
del self.oob_score_
if not self.warm_start or not hasattr(self, 'estimators_'):
# Free allocated memory, if any
self.estimators_ = []
self.estimators_features_ = []
n_more_estimators = self.n_estimators - len(self.estimators_)
if n_more_estimators < 0:
raise ValueError('n_estimators=%d must be larger or equal to '
'len(estimators_)=%d when warm_start==True'
% (self.n_estimators, len(self.estimators_)))
elif n_more_estimators == 0:
warn("Warm-start fitting without increasing n_estimators does not "
"fit new trees.")
return self
# Parallel loop
n_jobs, n_estimators, starts = _partition_estimators(n_more_estimators,
self.n_jobs)
total_n_estimators = sum(n_estimators)
# Advance random state to state after training
# the first n_estimators
if self.warm_start and len(self.estimators_) > 0:
random_state.randint(MAX_INT, size=len(self.estimators_))
seeds = random_state.randint(MAX_INT, size=n_more_estimators)
self._seeds = seeds
all_results = Parallel(n_jobs=n_jobs, verbose=self.verbose)(
delayed(_parallel_build_estimators)(
n_estimators[i],
self,
X,
y,
sample_weight,
seeds[starts[i]:starts[i + 1]],
total_n_estimators,
verbose=self.verbose)
for i in range(n_jobs))
# Reduce
self.estimators_ += list(itertools.chain.from_iterable(
t[0] for t in all_results))
self.estimators_features_ += list(itertools.chain.from_iterable(
t[1] for t in all_results))
if self.oob_score:
self._set_oob_score(X, y)
return self
@abstractmethod
def _set_oob_score(self, X, y):
"""Calculate out of bag predictions and score."""
def _validate_y(self, y):
if len(y.shape) == 1 or y.shape[1] == 1:
return column_or_1d(y, warn=True)
else:
return y
def _get_estimators_indices(self):
# Get drawn indices along both sample and feature axes
for seed in self._seeds:
# Operations accessing random_state must be performed identically
# to those in `_parallel_build_estimators()`
random_state = np.random.RandomState(seed)
feature_indices, sample_indices = _generate_bagging_indices(
random_state, self.bootstrap_features, self.bootstrap,
self.n_features_, self._n_samples, self._max_features,
self._max_samples)
yield feature_indices, sample_indices
@property
def estimators_samples_(self):
"""The subset of drawn samples for each base estimator.
Returns a dynamically generated list of boolean masks identifying
the samples used for fitting each member of the ensemble, i.e.,
the in-bag samples.
Note: the list is re-created at each call to the property in order
to reduce the object memory footprint by not storing the sampling
data. Thus fetching the property may be slower than expected.
"""
sample_masks = []
for _, sample_indices in self._get_estimators_indices():
mask = indices_to_mask(sample_indices, self._n_samples)
sample_masks.append(mask)
return sample_masks
class BaggingClassifier(BaseBagging, ClassifierMixin):
"""A Bagging classifier.
A Bagging classifier is an ensemble meta-estimator that fits base
classifiers each on random subsets of the original dataset and then
aggregate their individual predictions (either by voting or by averaging)
to form a final prediction. Such a meta-estimator can typically be used as
a way to reduce the variance of a black-box estimator (e.g., a decision
tree), by introducing randomization into its construction procedure and
then making an ensemble out of it.
This algorithm encompasses several works from the literature. When random
subsets of the dataset are drawn as random subsets of the samples, then
this algorithm is known as Pasting [1]_. If samples are drawn with
replacement, then the method is known as Bagging [2]_. When random subsets
of the dataset are drawn as random subsets of the features, then the method
is known as Random Subspaces [3]_. Finally, when base estimators are built
on subsets of both samples and features, then the method is known as
Random Patches [4]_.
Read more in the :ref:`User Guide <bagging>`.
Parameters
----------
base_estimator : object or None, optional (default=None)
The base estimator to fit on random subsets of the dataset.
If None, then the base estimator is a decision tree.
n_estimators : int, optional (default=10)
The number of base estimators in the ensemble.
max_samples : int or float, optional (default=1.0)
The number of samples to draw from X to train each base estimator.
- If int, then draw `max_samples` samples.
- If float, then draw `max_samples * X.shape[0]` samples.
max_features : int or float, optional (default=1.0)
The number of features to draw from X to train each base estimator.
- If int, then draw `max_features` features.
- If float, then draw `max_features * X.shape[1]` features.
bootstrap : boolean, optional (default=True)
Whether samples are drawn with replacement.
bootstrap_features : boolean, optional (default=False)
Whether features are drawn with replacement.
oob_score : bool
Whether to use out-of-bag samples to estimate
the generalization error.
warm_start : bool, optional (default=False)
When set to True, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit
a whole new ensemble. See :term:`the Glossary <warm_start>`.
.. versionadded:: 0.17
*warm_start* constructor parameter.
n_jobs : int, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity when fitting and predicting.
Attributes
----------
base_estimator_ : estimator
The base estimator from which the ensemble is grown.
estimators_ : list of estimators
The collection of fitted base estimators.
estimators_samples_ : list of arrays
The subset of drawn samples (i.e., the in-bag samples) for each base
estimator. Each subset is defined by a boolean mask.
estimators_features_ : list of arrays
The subset of drawn features for each base estimator.
classes_ : array of shape = [n_classes]
The classes labels.
n_classes_ : int or list
The number of classes.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_decision_function_ : array of shape = [n_samples, n_classes]
Decision function computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_decision_function_` might contain NaN.
References
----------
.. [1] L. Breiman, "Pasting small votes for classification in large
databases and on-line", Machine Learning, 36(1), 85-103, 1999.
.. [2] L. Breiman, "Bagging predictors", Machine Learning, 24(2), 123-140,
1996.
.. [3] T. Ho, "The random subspace method for constructing decision
forests", Pattern Analysis and Machine Intelligence, 20(8), 832-844,
1998.
.. [4] G. Louppe and P. Geurts, "Ensembles on Random Patches", Machine
Learning and Knowledge Discovery in Databases, 346-361, 2012.
"""
def __init__(self,
base_estimator=None,
n_estimators=10,
max_samples=1.0,
max_features=1.0,
bootstrap=True,
bootstrap_features=False,
oob_score=False,
warm_start=False,
n_jobs=1,
random_state=None,
verbose=0):
super(BaggingClassifier, self).__init__(
base_estimator,
n_estimators=n_estimators,
max_samples=max_samples,
max_features=max_features,
bootstrap=bootstrap,
bootstrap_features=bootstrap_features,
oob_score=oob_score,
warm_start=warm_start,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose)
def _validate_estimator(self):
"""Check the estimator and set the base_estimator_ attribute."""
super(BaggingClassifier, self)._validate_estimator(
default=DecisionTreeClassifier())
def _set_oob_score(self, X, y):
n_samples = y.shape[0]
n_classes_ = self.n_classes_
classes_ = self.classes_
predictions = np.zeros((n_samples, n_classes_))
for estimator, samples, features in zip(self.estimators_,
self.estimators_samples_,
self.estimators_features_):
# Create mask for OOB samples
mask = ~samples
if hasattr(estimator, "predict_proba"):
predictions[mask, :] += estimator.predict_proba(
(X[mask, :])[:, features])
else:
p = estimator.predict((X[mask, :])[:, features])
j = 0
for i in range(n_samples):
if mask[i]:
predictions[i, p[j]] += 1
j += 1
if (predictions.sum(axis=1) == 0).any():
warn("Some inputs do not have OOB scores. "
"This probably means too few estimators were used "
"to compute any reliable oob estimates.")
oob_decision_function = (predictions /
predictions.sum(axis=1)[:, np.newaxis])
oob_score = accuracy_score(y, np.argmax(predictions, axis=1))
self.oob_decision_function_ = oob_decision_function
self.oob_score_ = oob_score
def _validate_y(self, y):
y = column_or_1d(y, warn=True)
check_classification_targets(y)
self.classes_, y = np.unique(y, return_inverse=True)
self.n_classes_ = len(self.classes_)
return y
def predict(self, X):
"""Predict class for X.
The predicted class of an input sample is computed as the class with
the highest mean predicted probability. If base estimators do not
implement a ``predict_proba`` method, then it resorts to voting.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrices are accepted only if
they are supported by the base estimator.
Returns
-------
y : array of shape = [n_samples]
The predicted classes.
"""
predicted_probabilitiy = self.predict_proba(X)
return self.classes_.take((np.argmax(predicted_probabilitiy, axis=1)),
axis=0)
def predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the mean predicted class probabilities of the base estimators in the
ensemble. If base estimators do not implement a ``predict_proba``
method, then it resorts to voting and the predicted class probabilities
of an input sample represents the proportion of estimators predicting
each class.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrices are accepted only if
they are supported by the base estimator.
Returns
-------
p : array of shape = [n_samples, n_classes]
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
check_is_fitted(self, "classes_")
# Check data
X = check_array(
X, accept_sparse=['csr', 'csc'], dtype=None,
force_all_finite=False
)
if self.n_features_ != X.shape[1]:
raise ValueError("Number of features of the model must "
"match the input. Model n_features is {0} and "
"input n_features is {1}."
"".format(self.n_features_, X.shape[1]))
# Parallel loop
n_jobs, n_estimators, starts = _partition_estimators(self.n_estimators,
self.n_jobs)
all_proba = Parallel(n_jobs=n_jobs, verbose=self.verbose)(
delayed(_parallel_predict_proba)(
self.estimators_[starts[i]:starts[i + 1]],
self.estimators_features_[starts[i]:starts[i + 1]],
X,
self.n_classes_)
for i in range(n_jobs))
# Reduce
proba = sum(all_proba) / self.n_estimators
return proba
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
The predicted class log-probabilities of an input sample is computed as
the log of the mean predicted class probabilities of the base
estimators in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrices are accepted only if
they are supported by the base estimator.
Returns
-------
p : array of shape = [n_samples, n_classes]
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
check_is_fitted(self, "classes_")
if hasattr(self.base_estimator_, "predict_log_proba"):
# Check data
X = check_array(
X, accept_sparse=['csr', 'csc'], dtype=None,
force_all_finite=False
)
if self.n_features_ != X.shape[1]:
raise ValueError("Number of features of the model must "
"match the input. Model n_features is {0} "
"and input n_features is {1} "
"".format(self.n_features_, X.shape[1]))
# Parallel loop
n_jobs, n_estimators, starts = _partition_estimators(
self.n_estimators, self.n_jobs)
all_log_proba = Parallel(n_jobs=n_jobs, verbose=self.verbose)(
delayed(_parallel_predict_log_proba)(
self.estimators_[starts[i]:starts[i + 1]],
self.estimators_features_[starts[i]:starts[i + 1]],
X,
self.n_classes_)
for i in range(n_jobs))
# Reduce
log_proba = all_log_proba[0]
for j in range(1, len(all_log_proba)):
log_proba = np.logaddexp(log_proba, all_log_proba[j])
log_proba -= np.log(self.n_estimators)
return log_proba
else:
return np.log(self.predict_proba(X))
@if_delegate_has_method(delegate='base_estimator')
def decision_function(self, X):
"""Average of the decision functions of the base classifiers.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrices are accepted only if
they are supported by the base estimator.
Returns
-------
score : array, shape = [n_samples, k]
The decision function of the input samples. The columns correspond
to the classes in sorted order, as they appear in the attribute
``classes_``. Regression and binary classification are special
cases with ``k == 1``, otherwise ``k==n_classes``.
"""
check_is_fitted(self, "classes_")
# Check data
X = check_array(
X, accept_sparse=['csr', 'csc'], dtype=None,
force_all_finite=False
)
if self.n_features_ != X.shape[1]:
raise ValueError("Number of features of the model must "
"match the input. Model n_features is {0} and "
"input n_features is {1} "
"".format(self.n_features_, X.shape[1]))
# Parallel loop
n_jobs, n_estimators, starts = _partition_estimators(self.n_estimators,
self.n_jobs)
all_decisions = Parallel(n_jobs=n_jobs, verbose=self.verbose)(
delayed(_parallel_decision_function)(
self.estimators_[starts[i]:starts[i + 1]],
self.estimators_features_[starts[i]:starts[i + 1]],
X)
for i in range(n_jobs))
# Reduce
decisions = sum(all_decisions) / self.n_estimators
return decisions
class BaggingRegressor(BaseBagging, RegressorMixin):
"""A Bagging regressor.
A Bagging regressor is an ensemble meta-estimator that fits base
regressors each on random subsets of the original dataset and then
aggregate their individual predictions (either by voting or by averaging)
to form a final prediction. Such a meta-estimator can typically be used as
a way to reduce the variance of a black-box estimator (e.g., a decision
tree), by introducing randomization into its construction procedure and
then making an ensemble out of it.
This algorithm encompasses several works from the literature. When random
subsets of the dataset are drawn as random subsets of the samples, then
this algorithm is known as Pasting [1]_. If samples are drawn with
replacement, then the method is known as Bagging [2]_. When random subsets
of the dataset are drawn as random subsets of the features, then the method
is known as Random Subspaces [3]_. Finally, when base estimators are built
on subsets of both samples and features, then the method is known as
Random Patches [4]_.
Read more in the :ref:`User Guide <bagging>`.
Parameters
----------
base_estimator : object or None, optional (default=None)
The base estimator to fit on random subsets of the dataset.
If None, then the base estimator is a decision tree.
n_estimators : int, optional (default=10)
The number of base estimators in the ensemble.
max_samples : int or float, optional (default=1.0)
The number of samples to draw from X to train each base estimator.
- If int, then draw `max_samples` samples.
- If float, then draw `max_samples * X.shape[0]` samples.
max_features : int or float, optional (default=1.0)
The number of features to draw from X to train each base estimator.
- If int, then draw `max_features` features.
- If float, then draw `max_features * X.shape[1]` features.
bootstrap : boolean, optional (default=True)
Whether samples are drawn with replacement.
bootstrap_features : boolean, optional (default=False)
Whether features are drawn with replacement.
oob_score : bool
Whether to use out-of-bag samples to estimate
the generalization error.
warm_start : bool, optional (default=False)
When set to True, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit
a whole new ensemble. See :term:`the Glossary <warm_start>`.
n_jobs : int, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity when fitting and predicting.
Attributes
----------
estimators_ : list of estimators
The collection of fitted sub-estimators.
estimators_samples_ : list of arrays
The subset of drawn samples (i.e., the in-bag samples) for each base
estimator. Each subset is defined by a boolean mask.
estimators_features_ : list of arrays
The subset of drawn features for each base estimator.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_prediction_ : array of shape = [n_samples]
Prediction computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_prediction_` might contain NaN.
References
----------
.. [1] L. Breiman, "Pasting small votes for classification in large
databases and on-line", Machine Learning, 36(1), 85-103, 1999.
.. [2] L. Breiman, "Bagging predictors", Machine Learning, 24(2), 123-140,
1996.
.. [3] T. Ho, "The random subspace method for constructing decision
forests", Pattern Analysis and Machine Intelligence, 20(8), 832-844,
1998.
.. [4] G. Louppe and P. Geurts, "Ensembles on Random Patches", Machine
Learning and Knowledge Discovery in Databases, 346-361, 2012.
"""
def __init__(self,
base_estimator=None,
n_estimators=10,
max_samples=1.0,
max_features=1.0,
bootstrap=True,
bootstrap_features=False,
oob_score=False,
warm_start=False,
n_jobs=1,
random_state=None,
verbose=0):
super(BaggingRegressor, self).__init__(
base_estimator,
n_estimators=n_estimators,
max_samples=max_samples,
max_features=max_features,
bootstrap=bootstrap,
bootstrap_features=bootstrap_features,
oob_score=oob_score,
warm_start=warm_start,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose)
def predict(self, X):
"""Predict regression target for X.
The predicted regression target of an input sample is computed as the
mean predicted regression targets of the estimators in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrices are accepted only if
they are supported by the base estimator.
Returns
-------
y : array of shape = [n_samples]
The predicted values.
"""
check_is_fitted(self, "estimators_features_")
# Check data
X = check_array(
X, accept_sparse=['csr', 'csc'], dtype=None,
force_all_finite=False
)
# Parallel loop
n_jobs, n_estimators, starts = _partition_estimators(self.n_estimators,
self.n_jobs)
all_y_hat = Parallel(n_jobs=n_jobs, verbose=self.verbose)(
delayed(_parallel_predict_regression)(
self.estimators_[starts[i]:starts[i + 1]],
self.estimators_features_[starts[i]:starts[i + 1]],
X)
for i in range(n_jobs))
# Reduce
y_hat = sum(all_y_hat) / self.n_estimators
return y_hat
def _validate_estimator(self):
"""Check the estimator and set the base_estimator_ attribute."""
super(BaggingRegressor, self)._validate_estimator(
default=DecisionTreeRegressor())
def _set_oob_score(self, X, y):
n_samples = y.shape[0]
predictions = np.zeros((n_samples,))
n_predictions = np.zeros((n_samples,))
for estimator, samples, features in zip(self.estimators_,
self.estimators_samples_,
self.estimators_features_):
# Create mask for OOB samples
mask = ~samples
predictions[mask] += estimator.predict((X[mask, :])[:, features])
n_predictions[mask] += 1
if (n_predictions == 0).any():
warn("Some inputs do not have OOB scores. "
"This probably means too few estimators were used "
"to compute any reliable oob estimates.")
n_predictions[n_predictions == 0] = 1
predictions /= n_predictions
self.oob_prediction_ = predictions
self.oob_score_ = r2_score(y, predictions)
| [
"[email protected]"
] | |
9f9ad8ebb6fedbc7c53a79ce92698a7cc44d3b29 | d532b85841b459c61d88d380e88dd08d29836d43 | /solutions/1137_n_th_tribonacci_number.py | 213daa978cf7e1bbfd004aa81928729015dbfb37 | [
"MIT"
] | permissive | YiqunPeng/leetcode_pro | ad942468df5506de9dc48a4019933f658e2a3121 | 4a508a982b125a3a90ea893ae70863df7c99cc70 | refs/heads/master | 2022-05-15T09:32:02.699180 | 2022-05-14T16:32:17 | 2022-05-14T16:32:17 | 182,453,966 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 235 | py | class Solution:
def tribonacci(self, n: int) -> int:
"""Array.
Running time: O(n).
"""
t = [0, 1, 1]
for i in range(3, n + 1):
t[i%3] = sum(t)
return t[n%3]
| [
"[email protected]"
] | |
c1b66e6ceeca994c6c42b9d66d43dc2c32636294 | d57c448fb5d711717912e002ac62bfe658045068 | /djsr/frontend/urls.py | ee73869d8c19a5c5fe66c2a3fdd630236ad73a65 | [] | no_license | Mysta3/django-react-sjwt-app | 5ba1f0d9e77c62c564d753eef9ac0b44e9712bdb | a7ef5bd3f3db8a33527071ede5fcbe916011ff0c | refs/heads/master | 2023-03-08T00:39:03.200767 | 2022-02-15T20:46:36 | 2022-02-15T20:46:36 | 249,324,609 | 0 | 0 | null | 2023-03-04T07:32:58 | 2020-03-23T03:13:29 | Python | UTF-8 | Python | false | false | 244 | py |
from django.contrib import admin
from django.urls import path
from django.conf.urls import url
from .views import index_view
urlpatterns = [
path('', index_view), # for the empty url
url(r'^.*/$', index_view) # for all other urls
]
| [
"[email protected]"
] | |
dec008f73c4a1a5746f1cef660b824097051ac6a | 571a89f94f3ebd9ec8e6b618cddb7d05811e0d62 | /abc178/f/main.py | e33571678bbcc68cd02255451e160931f0c863be | [] | no_license | ryu19-1/atcoder_python | 57de9e1db8ff13a107b5861f8f6a231e40366313 | cc24b3c2895aad71d40cefbb8e2893dc397b8f4f | refs/heads/master | 2023-05-10T05:32:16.507207 | 2021-05-19T17:48:10 | 2021-05-19T17:48:10 | 368,954,430 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 668 | py | #!/usr/bin/env python3
from collections import Counter
INF = 1e12
def main():
N = int(input())
A = list(map(int, input().split()))
B = list(map(int, input().split()))
lA = Counter(A)
lB = Counter(B)
if max(Counter(A+B).values()) > N:
print('No')
exit()
C = [0] * (N + 1)
D = [0] * (N + 1)
for i in range(N):
C[i + 1] = lA[i + 1] + C[i]
D[i + 1] = lB[i + 1] + D[i]
shift = - INF
for i in range(1, N + 1):
shift = max(shift, C[i] - D[i - 1])
print('Yes')
ans = (B + B + B)[N-shift:2*N-shift]
print(' '.join([str(a) for a in ans]))
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
9476e7cff83f6852d1dfbdb1d49609b98621d26c | 2867897534993b590c4fb4bd90b02f2c188f9a11 | /db/upload_design_docs.py | bf2a3dfdc179522fc8a9a649b3ac63dd6d916194 | [
"MIT"
] | permissive | nfvs/idavoll | 82ad7807e9bdb2772ec1bda24feea84b86714ea5 | 5423584d062705ffd60f088d31b8d6a301fe3742 | refs/heads/master | 2021-01-16T21:57:25.647674 | 2013-04-17T12:42:43 | 2013-04-17T12:42:43 | 873,655 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 629 | py | #!/usr/bin/env python
import sys
from couchdbkit import Server
if len(sys.argv) <= 3:
print 'Usage: %s <couchdb-url> <couchdb-db> <path-to-couchdb-design-docs>' % sys.argv[0]
print
exit(-1)
url = sys.argv[1]
db = sys.argv[2]
p = sys.argv[3]
print
print 'Uploading design docs'
print 'from \t folder = "%s"' % p
print 'to \t url = "%s", db = "%s"' % (url, db)
print 'Ok? (y/n)'
a = raw_input()
if a != 'y' and a != 'Y':
exit(-1)
# upload design docs
from couchdbkit.loaders import FileSystemDocsLoader
loader = FileSystemDocsLoader(p)
server = Server(url)
cdb = server.get_or_create_db(db)
loader.sync(cdb)
| [
"[email protected]"
] | |
d4bff4cd9bbe8248787a6d57c2a536df58de18c8 | 45ac47507b05b8b08c95a94fea31765343ca278a | /tespost.py | a85e24f748338d8e6453b8d8bcd8aa056b16953c | [] | no_license | nsohit/Sriver | c0e179133a63581ad83c7104b4478a7aa04a1fb4 | c3ea1d0d8a0994546fbe4272441a28d0f397e7f7 | refs/heads/master | 2020-11-29T18:48:22.232464 | 2020-07-04T03:03:19 | 2020-07-04T03:03:19 | 230,193,070 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,129 | py | import requests
import json
servername = 'http://inoprex.com/post-esp-data.php'
apiKeyValue = 'tPmAT5Ab3j7F9'
sensorName = 'tampa-nama'
sensorLocation = 'Universitas_Raharja'
#data = 'api_key=' + apiKeyValue + '&sensor=' + sensorName+ '&location=' + sensorLocation + '&value1=' + str(321)+ '&value2=' + str(987) + '&value3=' + str(123) + ""
url = 'http://inoprex.com/post-esp-data.php'
'''
ldrvalue = '50'
data='ldrvalue='+ldrvalue
'''
#data = { 'api_key' : apiKeyValue, 'sensor' : "12", 'location' : "12",'value1':"12", 'value2':"12",'value3':"12" }
#dataJson = json.dumps(data)
#payload = {"json_payload": dataJson}
#url ='http://www.inoprex.com/server.php'
#data = "{\"value1\":\"19\",\"value2\":\"67\",\"value3\":\"78\"}"
data="{\"api_key\"=\"tPmAT5Ab3j7F9\",\"sensor\"=\"abs\",\"location\"=\"acv\",\"value1\"=\"19\",\"value2\"=\"67\",\"value3\"=\"78\",\"\"}"
headers = {'Content-Type':'application/json'}
requests.post(url,headers=headers,data=data)
response=requests.post(url,headers=headers,data=data)
print(response)
print(data)
#"{\"api_key\":\"tPmAT5Ab3j7F9\","\"value1\":\"19\",\"value2\":\"67\",\"value3\":\"78\"}"
| [
"[email protected]"
] | |
b37be2064d3accc9ab5dd6bdbb1bf3353fe77af3 | 912cb61eaa768716d30844990ebbdd80ab2c2f4e | /ex046.py | e55b7ccacaec6e147ab1c140c23a4c353c0a1c65 | [] | no_license | luizaacampos/exerciciosCursoEmVideoPython | 5fc9bed736300916e1c26d115eb2e703ba1dd4ca | 398bfa5243adae00fb58056d1672cc20ff4a31d6 | refs/heads/main | 2023-01-06T21:48:17.068478 | 2020-11-11T12:29:10 | 2020-11-11T12:29:10 | 311,964,179 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 101 | py | from time import sleep
for i in range(10, -1, -1):
print(i)
sleep(1)
print('PA PA POW BUM!!') | [
"[email protected]"
] | |
ead6a48ab9b1dc29f28974572c698cf90040a882 | 6d9a754296b5ffbbe82c2594793532155f37a7e0 | /python/virtualEnvironments/myEnvironments/flaskEnv/lib/python2.7/site-packages/sqlalchemy/orm/relationships.py | 1a25fbc012c9cd155ff9e6fa08e13f3a9033e4d7 | [] | no_license | rnwokoye/Dojo-Assignments | 5ed941224df699b8cd988cc076c2ba8075803b2c | 47350f73b9dd270e88ba0877266d16d21a73f635 | refs/heads/master | 2021-09-22T10:10:19.407835 | 2018-09-07T17:56:54 | 2018-09-07T17:56:54 | 120,479,814 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 118,222 | py | # orm/relationships.py
# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Heuristics related to join conditions as used in
:func:`.relationship`.
Provides the :class:`.JoinCondition` object, which encapsulates
SQL annotation and aliasing behavior focused on the `primaryjoin`
and `secondaryjoin` aspects of :func:`.relationship`.
"""
from __future__ import absolute_import
from .. import sql, util, exc as sa_exc, schema, log
import weakref
from .util import CascadeOptions, _orm_annotate, _orm_deannotate
from . import dependency
from . import attributes
from ..sql.util import (
ClauseAdapter,
join_condition, _shallow_annotate, visit_binary_product,
_deep_deannotate, selectables_overlap, adapt_criterion_to_null
)
from ..sql import operators, expression, visitors
from .interfaces import (MANYTOMANY, MANYTOONE, ONETOMANY,
StrategizedProperty, PropComparator)
from ..inspection import inspect
from . import mapper as mapperlib
import collections
def remote(expr):
"""Annotate a portion of a primaryjoin expression
with a 'remote' annotation.
See the section :ref:`relationship_custom_foreign` for a
description of use.
.. versionadded:: 0.8
.. seealso::
:ref:`relationship_custom_foreign`
:func:`.foreign`
"""
return _annotate_columns(expression._clause_element_as_expr(expr),
{"remote": True})
def foreign(expr):
"""Annotate a portion of a primaryjoin expression
with a 'foreign' annotation.
See the section :ref:`relationship_custom_foreign` for a
description of use.
.. versionadded:: 0.8
.. seealso::
:ref:`relationship_custom_foreign`
:func:`.remote`
"""
return _annotate_columns(expression._clause_element_as_expr(expr),
{"foreign": True})
@log.class_logger
@util.langhelpers.dependency_for("sqlalchemy.orm.properties")
class RelationshipProperty(StrategizedProperty):
"""Describes an object property that holds a single item or list
of items that correspond to a related database table.
Public constructor is the :func:`.orm.relationship` function.
See also:
:ref:`relationship_config_toplevel`
"""
strategy_wildcard_key = 'relationship'
_dependency_processor = None
def __init__(self, argument,
secondary=None, primaryjoin=None,
secondaryjoin=None,
foreign_keys=None,
uselist=None,
order_by=False,
backref=None,
back_populates=None,
post_update=False,
cascade=False, extension=None,
viewonly=False, lazy=True,
collection_class=None, passive_deletes=False,
passive_updates=True, remote_side=None,
enable_typechecks=True, join_depth=None,
comparator_factory=None,
single_parent=False, innerjoin=False,
distinct_target_key=None,
doc=None,
active_history=False,
cascade_backrefs=True,
load_on_pending=False,
bake_queries=True,
_local_remote_pairs=None,
query_class=None,
info=None):
"""Provide a relationship between two mapped classes.
This corresponds to a parent-child or associative table relationship.
The constructed class is an instance of
:class:`.RelationshipProperty`.
A typical :func:`.relationship`, used in a classical mapping::
mapper(Parent, properties={
'children': relationship(Child)
})
Some arguments accepted by :func:`.relationship` optionally accept a
callable function, which when called produces the desired value.
The callable is invoked by the parent :class:`.Mapper` at "mapper
initialization" time, which happens only when mappers are first used,
and is assumed to be after all mappings have been constructed. This
can be used to resolve order-of-declaration and other dependency
issues, such as if ``Child`` is declared below ``Parent`` in the same
file::
mapper(Parent, properties={
"children":relationship(lambda: Child,
order_by=lambda: Child.id)
})
When using the :ref:`declarative_toplevel` extension, the Declarative
initializer allows string arguments to be passed to
:func:`.relationship`. These string arguments are converted into
callables that evaluate the string as Python code, using the
Declarative class-registry as a namespace. This allows the lookup of
related classes to be automatic via their string name, and removes the
need to import related classes at all into the local module space::
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class Parent(Base):
__tablename__ = 'parent'
id = Column(Integer, primary_key=True)
children = relationship("Child", order_by="Child.id")
.. seealso::
:ref:`relationship_config_toplevel` - Full introductory and
reference documentation for :func:`.relationship`.
:ref:`orm_tutorial_relationship` - ORM tutorial introduction.
:param argument:
a mapped class, or actual :class:`.Mapper` instance, representing
the target of the relationship.
:paramref:`~.relationship.argument` may also be passed as a callable
function which is evaluated at mapper initialization time, and may
be passed as a Python-evaluable string when using Declarative.
.. seealso::
:ref:`declarative_configuring_relationships` - further detail
on relationship configuration when using Declarative.
:param secondary:
for a many-to-many relationship, specifies the intermediary
table, and is typically an instance of :class:`.Table`.
In less common circumstances, the argument may also be specified
as an :class:`.Alias` construct, or even a :class:`.Join` construct.
:paramref:`~.relationship.secondary` may
also be passed as a callable function which is evaluated at
mapper initialization time. When using Declarative, it may also
be a string argument noting the name of a :class:`.Table` that is
present in the :class:`.MetaData` collection associated with the
parent-mapped :class:`.Table`.
The :paramref:`~.relationship.secondary` keyword argument is
typically applied in the case where the intermediary :class:`.Table`
is not otherwise expressed in any direct class mapping. If the
"secondary" table is also explicitly mapped elsewhere (e.g. as in
:ref:`association_pattern`), one should consider applying the
:paramref:`~.relationship.viewonly` flag so that this
:func:`.relationship` is not used for persistence operations which
may conflict with those of the association object pattern.
.. seealso::
:ref:`relationships_many_to_many` - Reference example of "many
to many".
:ref:`orm_tutorial_many_to_many` - ORM tutorial introduction to
many-to-many relationships.
:ref:`self_referential_many_to_many` - Specifics on using
many-to-many in a self-referential case.
:ref:`declarative_many_to_many` - Additional options when using
Declarative.
:ref:`association_pattern` - an alternative to
:paramref:`~.relationship.secondary` when composing association
table relationships, allowing additional attributes to be
specified on the association table.
:ref:`composite_secondary_join` - a lesser-used pattern which
in some cases can enable complex :func:`.relationship` SQL
conditions to be used.
.. versionadded:: 0.9.2 :paramref:`~.relationship.secondary` works
more effectively when referring to a :class:`.Join` instance.
:param active_history=False:
When ``True``, indicates that the "previous" value for a
many-to-one reference should be loaded when replaced, if
not already loaded. Normally, history tracking logic for
simple many-to-ones only needs to be aware of the "new"
value in order to perform a flush. This flag is available
for applications that make use of
:func:`.attributes.get_history` which also need to know
the "previous" value of the attribute.
:param backref:
indicates the string name of a property to be placed on the related
mapper's class that will handle this relationship in the other
direction. The other property will be created automatically
when the mappers are configured. Can also be passed as a
:func:`.backref` object to control the configuration of the
new relationship.
.. seealso::
:ref:`relationships_backref` - Introductory documentation and
examples.
:paramref:`~.relationship.back_populates` - alternative form
of backref specification.
:func:`.backref` - allows control over :func:`.relationship`
configuration when using :paramref:`~.relationship.backref`.
:param back_populates:
Takes a string name and has the same meaning as
:paramref:`~.relationship.backref`, except the complementing
property is **not** created automatically, and instead must be
configured explicitly on the other mapper. The complementing
property should also indicate
:paramref:`~.relationship.back_populates` to this relationship to
ensure proper functioning.
.. seealso::
:ref:`relationships_backref` - Introductory documentation and
examples.
:paramref:`~.relationship.backref` - alternative form
of backref specification.
:param bake_queries=True:
Use the :class:`.BakedQuery` cache to cache the construction of SQL
used in lazy loads, when the :func:`.bake_lazy_loaders` function has
first been called. Defaults to True and is intended to provide an
"opt out" flag per-relationship when the baked query cache system is
in use.
.. warning::
This flag **only** has an effect when the application-wide
:func:`.bake_lazy_loaders` function has been called. It
defaults to True so is an "opt out" flag.
Setting this flag to False when baked queries are otherwise in
use might be to reduce
ORM memory use for this :func:`.relationship`, or to work around
unresolved stability issues observed within the baked query
cache system.
.. versionadded:: 1.0.0
.. seealso::
:ref:`baked_toplevel`
:param cascade:
a comma-separated list of cascade rules which determines how
Session operations should be "cascaded" from parent to child.
This defaults to ``False``, which means the default cascade
should be used - this default cascade is ``"save-update, merge"``.
The available cascades are ``save-update``, ``merge``,
``expunge``, ``delete``, ``delete-orphan``, and ``refresh-expire``.
An additional option, ``all`` indicates shorthand for
``"save-update, merge, refresh-expire,
expunge, delete"``, and is often used as in ``"all, delete-orphan"``
to indicate that related objects should follow along with the
parent object in all cases, and be deleted when de-associated.
.. seealso::
:ref:`unitofwork_cascades` - Full detail on each of the available
cascade options.
:ref:`tutorial_delete_cascade` - Tutorial example describing
a delete cascade.
:param cascade_backrefs=True:
a boolean value indicating if the ``save-update`` cascade should
operate along an assignment event intercepted by a backref.
When set to ``False``, the attribute managed by this relationship
will not cascade an incoming transient object into the session of a
persistent parent, if the event is received via backref.
.. seealso::
:ref:`backref_cascade` - Full discussion and examples on how
the :paramref:`~.relationship.cascade_backrefs` option is used.
:param collection_class:
a class or callable that returns a new list-holding object. will
be used in place of a plain list for storing elements.
.. seealso::
:ref:`custom_collections` - Introductory documentation and
examples.
:param comparator_factory:
a class which extends :class:`.RelationshipProperty.Comparator`
which provides custom SQL clause generation for comparison
operations.
.. seealso::
:class:`.PropComparator` - some detail on redefining comparators
at this level.
:ref:`custom_comparators` - Brief intro to this feature.
:param distinct_target_key=None:
Indicate if a "subquery" eager load should apply the DISTINCT
keyword to the innermost SELECT statement. When left as ``None``,
the DISTINCT keyword will be applied in those cases when the target
columns do not comprise the full primary key of the target table.
When set to ``True``, the DISTINCT keyword is applied to the
innermost SELECT unconditionally.
It may be desirable to set this flag to False when the DISTINCT is
reducing performance of the innermost subquery beyond that of what
duplicate innermost rows may be causing.
.. versionadded:: 0.8.3 -
:paramref:`~.relationship.distinct_target_key` allows the
subquery eager loader to apply a DISTINCT modifier to the
innermost SELECT.
.. versionchanged:: 0.9.0 -
:paramref:`~.relationship.distinct_target_key` now defaults to
``None``, so that the feature enables itself automatically for
those cases where the innermost query targets a non-unique
key.
.. seealso::
:ref:`loading_toplevel` - includes an introduction to subquery
eager loading.
:param doc:
docstring which will be applied to the resulting descriptor.
:param extension:
an :class:`.AttributeExtension` instance, or list of extensions,
which will be prepended to the list of attribute listeners for
the resulting descriptor placed on the class.
.. deprecated:: 0.7 Please see :class:`.AttributeEvents`.
:param foreign_keys:
a list of columns which are to be used as "foreign key"
columns, or columns which refer to the value in a remote
column, within the context of this :func:`.relationship`
object's :paramref:`~.relationship.primaryjoin` condition.
That is, if the :paramref:`~.relationship.primaryjoin`
condition of this :func:`.relationship` is ``a.id ==
b.a_id``, and the values in ``b.a_id`` are required to be
present in ``a.id``, then the "foreign key" column of this
:func:`.relationship` is ``b.a_id``.
In normal cases, the :paramref:`~.relationship.foreign_keys`
parameter is **not required.** :func:`.relationship` will
automatically determine which columns in the
:paramref:`~.relationship.primaryjoin` conditition are to be
considered "foreign key" columns based on those
:class:`.Column` objects that specify :class:`.ForeignKey`,
or are otherwise listed as referencing columns in a
:class:`.ForeignKeyConstraint` construct.
:paramref:`~.relationship.foreign_keys` is only needed when:
1. There is more than one way to construct a join from the local
table to the remote table, as there are multiple foreign key
references present. Setting ``foreign_keys`` will limit the
:func:`.relationship` to consider just those columns specified
here as "foreign".
.. versionchanged:: 0.8
A multiple-foreign key join ambiguity can be resolved by
setting the :paramref:`~.relationship.foreign_keys`
parameter alone, without the need to explicitly set
:paramref:`~.relationship.primaryjoin` as well.
2. The :class:`.Table` being mapped does not actually have
:class:`.ForeignKey` or :class:`.ForeignKeyConstraint`
constructs present, often because the table
was reflected from a database that does not support foreign key
reflection (MySQL MyISAM).
3. The :paramref:`~.relationship.primaryjoin` argument is used to
construct a non-standard join condition, which makes use of
columns or expressions that do not normally refer to their
"parent" column, such as a join condition expressed by a
complex comparison using a SQL function.
The :func:`.relationship` construct will raise informative
error messages that suggest the use of the
:paramref:`~.relationship.foreign_keys` parameter when
presented with an ambiguous condition. In typical cases,
if :func:`.relationship` doesn't raise any exceptions, the
:paramref:`~.relationship.foreign_keys` parameter is usually
not needed.
:paramref:`~.relationship.foreign_keys` may also be passed as a
callable function which is evaluated at mapper initialization time,
and may be passed as a Python-evaluable string when using
Declarative.
.. seealso::
:ref:`relationship_foreign_keys`
:ref:`relationship_custom_foreign`
:func:`.foreign` - allows direct annotation of the "foreign"
columns within a :paramref:`~.relationship.primaryjoin` condition.
.. versionadded:: 0.8
The :func:`.foreign` annotation can also be applied
directly to the :paramref:`~.relationship.primaryjoin`
expression, which is an alternate, more specific system of
describing which columns in a particular
:paramref:`~.relationship.primaryjoin` should be considered
"foreign".
:param info: Optional data dictionary which will be populated into the
:attr:`.MapperProperty.info` attribute of this object.
.. versionadded:: 0.8
:param innerjoin=False:
when ``True``, joined eager loads will use an inner join to join
against related tables instead of an outer join. The purpose
of this option is generally one of performance, as inner joins
generally perform better than outer joins.
This flag can be set to ``True`` when the relationship references an
object via many-to-one using local foreign keys that are not
nullable, or when the reference is one-to-one or a collection that
is guaranteed to have one or at least one entry.
The option supports the same "nested" and "unnested" options as
that of :paramref:`.joinedload.innerjoin`. See that flag
for details on nested / unnested behaviors.
.. seealso::
:paramref:`.joinedload.innerjoin` - the option as specified by
loader option, including detail on nesting behavior.
:ref:`what_kind_of_loading` - Discussion of some details of
various loader options.
:param join_depth:
when non-``None``, an integer value indicating how many levels
deep "eager" loaders should join on a self-referring or cyclical
relationship. The number counts how many times the same Mapper
shall be present in the loading condition along a particular join
branch. When left at its default of ``None``, eager loaders
will stop chaining when they encounter a the same target mapper
which is already higher up in the chain. This option applies
both to joined- and subquery- eager loaders.
.. seealso::
:ref:`self_referential_eager_loading` - Introductory documentation
and examples.
:param lazy='select': specifies
how the related items should be loaded. Default value is
``select``. Values include:
* ``select`` - items should be loaded lazily when the property is
first accessed, using a separate SELECT statement, or identity map
fetch for simple many-to-one references.
* ``immediate`` - items should be loaded as the parents are loaded,
using a separate SELECT statement, or identity map fetch for
simple many-to-one references.
* ``joined`` - items should be loaded "eagerly" in the same query as
that of the parent, using a JOIN or LEFT OUTER JOIN. Whether
the join is "outer" or not is determined by the
:paramref:`~.relationship.innerjoin` parameter.
* ``subquery`` - items should be loaded "eagerly" as the parents are
loaded, using one additional SQL statement, which issues a JOIN to
a subquery of the original statement, for each collection
requested.
* ``noload`` - no loading should occur at any time. This is to
support "write-only" attributes, or attributes which are
populated in some manner specific to the application.
* ``raise`` - lazy loading is disallowed; accessing
the attribute, if its value were not already loaded via eager
loading, will raise an :exc:`~sqlalchemy.exc.InvalidRequestError`.
This strategy can be used when objects are to be detached from
their attached :class:`.Session` after they are loaded.
.. versionadded:: 1.1
* ``raise_on_sql`` - lazy loading that emits SQL is disallowed;
accessing the attribute, if its value were not already loaded via
eager loading, will raise an
:exc:`~sqlalchemy.exc.InvalidRequestError`, **if the lazy load
needs to emit SQL**. If the lazy load can pull the related value
from the identity map or determine that it should be None, the
value is loaded. This strategy can be used when objects will
remain associated with the attached :class:`.Session`, however
additional SELECT statements should be blocked.
.. versionadded:: 1.1
* ``dynamic`` - the attribute will return a pre-configured
:class:`.Query` object for all read
operations, onto which further filtering operations can be
applied before iterating the results. See
the section :ref:`dynamic_relationship` for more details.
* True - a synonym for 'select'
* False - a synonym for 'joined'
* None - a synonym for 'noload'
.. seealso::
:doc:`/orm/loading_relationships` - Full documentation on relationship loader
configuration.
:ref:`dynamic_relationship` - detail on the ``dynamic`` option.
:ref:`collections_noload_raiseload` - notes on "noload" and "raise"
:param load_on_pending=False:
Indicates loading behavior for transient or pending parent objects.
When set to ``True``, causes the lazy-loader to
issue a query for a parent object that is not persistent, meaning it
has never been flushed. This may take effect for a pending object
when autoflush is disabled, or for a transient object that has been
"attached" to a :class:`.Session` but is not part of its pending
collection.
The :paramref:`~.relationship.load_on_pending` flag does not improve
behavior when the ORM is used normally - object references should be
constructed at the object level, not at the foreign key level, so
that they are present in an ordinary way before a flush proceeds.
This flag is not not intended for general use.
.. seealso::
:meth:`.Session.enable_relationship_loading` - this method
establishes "load on pending" behavior for the whole object, and
also allows loading on objects that remain transient or
detached.
:param order_by:
indicates the ordering that should be applied when loading these
items. :paramref:`~.relationship.order_by` is expected to refer to
one of the :class:`.Column` objects to which the target class is
mapped, or the attribute itself bound to the target class which
refers to the column.
:paramref:`~.relationship.order_by` may also be passed as a callable
function which is evaluated at mapper initialization time, and may
be passed as a Python-evaluable string when using Declarative.
:param passive_deletes=False:
Indicates loading behavior during delete operations.
A value of True indicates that unloaded child items should not
be loaded during a delete operation on the parent. Normally,
when a parent item is deleted, all child items are loaded so
that they can either be marked as deleted, or have their
foreign key to the parent set to NULL. Marking this flag as
True usually implies an ON DELETE <CASCADE|SET NULL> rule is in
place which will handle updating/deleting child rows on the
database side.
Additionally, setting the flag to the string value 'all' will
disable the "nulling out" of the child foreign keys, when the parent
object is deleted and there is no delete or delete-orphan cascade
enabled. This is typically used when a triggering or error raise
scenario is in place on the database side. Note that the foreign
key attributes on in-session child objects will not be changed after
a flush occurs so this is a very special use-case setting.
Additionally, the "nulling out" will still occur if the child
object is de-associated with the parent.
.. seealso::
:ref:`passive_deletes` - Introductory documentation
and examples.
:param passive_updates=True:
Indicates the persistence behavior to take when a referenced
primary key value changes in place, indicating that the referencing
foreign key columns will also need their value changed.
When True, it is assumed that ``ON UPDATE CASCADE`` is configured on
the foreign key in the database, and that the database will
handle propagation of an UPDATE from a source column to
dependent rows. When False, the SQLAlchemy :func:`.relationship`
construct will attempt to emit its own UPDATE statements to
modify related targets. However note that SQLAlchemy **cannot**
emit an UPDATE for more than one level of cascade. Also,
setting this flag to False is not compatible in the case where
the database is in fact enforcing referential integrity, unless
those constraints are explicitly "deferred", if the target backend
supports it.
It is highly advised that an application which is employing
mutable primary keys keeps ``passive_updates`` set to True,
and instead uses the referential integrity features of the database
itself in order to handle the change efficiently and fully.
.. seealso::
:ref:`passive_updates` - Introductory documentation and
examples.
:paramref:`.mapper.passive_updates` - a similar flag which
takes effect for joined-table inheritance mappings.
:param post_update:
this indicates that the relationship should be handled by a
second UPDATE statement after an INSERT or before a
DELETE. Currently, it also will issue an UPDATE after the
instance was UPDATEd as well, although this technically should
be improved. This flag is used to handle saving bi-directional
dependencies between two individual rows (i.e. each row
references the other), where it would otherwise be impossible to
INSERT or DELETE both rows fully since one row exists before the
other. Use this flag when a particular mapping arrangement will
incur two rows that are dependent on each other, such as a table
that has a one-to-many relationship to a set of child rows, and
also has a column that references a single child row within that
list (i.e. both tables contain a foreign key to each other). If
a flush operation returns an error that a "cyclical
dependency" was detected, this is a cue that you might want to
use :paramref:`~.relationship.post_update` to "break" the cycle.
.. seealso::
:ref:`post_update` - Introductory documentation and examples.
:param primaryjoin:
a SQL expression that will be used as the primary
join of this child object against the parent object, or in a
many-to-many relationship the join of the primary object to the
association table. By default, this value is computed based on the
foreign key relationships of the parent and child tables (or
association table).
:paramref:`~.relationship.primaryjoin` may also be passed as a
callable function which is evaluated at mapper initialization time,
and may be passed as a Python-evaluable string when using
Declarative.
.. seealso::
:ref:`relationship_primaryjoin`
:param remote_side:
used for self-referential relationships, indicates the column or
list of columns that form the "remote side" of the relationship.
:paramref:`.relationship.remote_side` may also be passed as a
callable function which is evaluated at mapper initialization time,
and may be passed as a Python-evaluable string when using
Declarative.
.. versionchanged:: 0.8
The :func:`.remote` annotation can also be applied
directly to the ``primaryjoin`` expression, which is an
alternate, more specific system of describing which columns in a
particular ``primaryjoin`` should be considered "remote".
.. seealso::
:ref:`self_referential` - in-depth explanation of how
:paramref:`~.relationship.remote_side`
is used to configure self-referential relationships.
:func:`.remote` - an annotation function that accomplishes the
same purpose as :paramref:`~.relationship.remote_side`, typically
when a custom :paramref:`~.relationship.primaryjoin` condition
is used.
:param query_class:
a :class:`.Query` subclass that will be used as the base of the
"appender query" returned by a "dynamic" relationship, that
is, a relationship that specifies ``lazy="dynamic"`` or was
otherwise constructed using the :func:`.orm.dynamic_loader`
function.
.. seealso::
:ref:`dynamic_relationship` - Introduction to "dynamic"
relationship loaders.
:param secondaryjoin:
a SQL expression that will be used as the join of
an association table to the child object. By default, this value is
computed based on the foreign key relationships of the association
and child tables.
:paramref:`~.relationship.secondaryjoin` may also be passed as a
callable function which is evaluated at mapper initialization time,
and may be passed as a Python-evaluable string when using
Declarative.
.. seealso::
:ref:`relationship_primaryjoin`
:param single_parent:
when True, installs a validator which will prevent objects
from being associated with more than one parent at a time.
This is used for many-to-one or many-to-many relationships that
should be treated either as one-to-one or one-to-many. Its usage
is optional, except for :func:`.relationship` constructs which
are many-to-one or many-to-many and also
specify the ``delete-orphan`` cascade option. The
:func:`.relationship` construct itself will raise an error
instructing when this option is required.
.. seealso::
:ref:`unitofwork_cascades` - includes detail on when the
:paramref:`~.relationship.single_parent` flag may be appropriate.
:param uselist:
a boolean that indicates if this property should be loaded as a
list or a scalar. In most cases, this value is determined
automatically by :func:`.relationship` at mapper configuration
time, based on the type and direction
of the relationship - one to many forms a list, many to one
forms a scalar, many to many is a list. If a scalar is desired
where normally a list would be present, such as a bi-directional
one-to-one relationship, set :paramref:`~.relationship.uselist` to
False.
The :paramref:`~.relationship.uselist` flag is also available on an
existing :func:`.relationship` construct as a read-only attribute,
which can be used to determine if this :func:`.relationship` deals
with collections or scalar attributes::
>>> User.addresses.property.uselist
True
.. seealso::
:ref:`relationships_one_to_one` - Introduction to the "one to
one" relationship pattern, which is typically when the
:paramref:`~.relationship.uselist` flag is needed.
:param viewonly=False:
when set to True, the relationship is used only for loading objects,
and not for any persistence operation. A :func:`.relationship`
which specifies :paramref:`~.relationship.viewonly` can work
with a wider range of SQL operations within the
:paramref:`~.relationship.primaryjoin` condition, including
operations that feature the use of a variety of comparison operators
as well as SQL functions such as :func:`~.sql.expression.cast`. The
:paramref:`~.relationship.viewonly` flag is also of general use when
defining any kind of :func:`~.relationship` that doesn't represent
the full set of related objects, to prevent modifications of the
collection from resulting in persistence operations.
"""
super(RelationshipProperty, self).__init__()
self.uselist = uselist
self.argument = argument
self.secondary = secondary
self.primaryjoin = primaryjoin
self.secondaryjoin = secondaryjoin
self.post_update = post_update
self.direction = None
self.viewonly = viewonly
self.lazy = lazy
self.single_parent = single_parent
self._user_defined_foreign_keys = foreign_keys
self.collection_class = collection_class
self.passive_deletes = passive_deletes
self.cascade_backrefs = cascade_backrefs
self.passive_updates = passive_updates
self.remote_side = remote_side
self.enable_typechecks = enable_typechecks
self.query_class = query_class
self.innerjoin = innerjoin
self.distinct_target_key = distinct_target_key
self.doc = doc
self.active_history = active_history
self.join_depth = join_depth
self.local_remote_pairs = _local_remote_pairs
self.extension = extension
self.bake_queries = bake_queries
self.load_on_pending = load_on_pending
self.comparator_factory = comparator_factory or \
RelationshipProperty.Comparator
self.comparator = self.comparator_factory(self, None)
util.set_creation_order(self)
if info is not None:
self.info = info
self.strategy_key = (("lazy", self.lazy), )
self._reverse_property = set()
self.cascade = cascade if cascade is not False \
else "save-update, merge"
self.order_by = order_by
self.back_populates = back_populates
if self.back_populates:
if backref:
raise sa_exc.ArgumentError(
"backref and back_populates keyword arguments "
"are mutually exclusive")
self.backref = None
else:
self.backref = backref
def instrument_class(self, mapper):
attributes.register_descriptor(
mapper.class_,
self.key,
comparator=self.comparator_factory(self, mapper),
parententity=mapper,
doc=self.doc,
)
class Comparator(PropComparator):
"""Produce boolean, comparison, and other operators for
:class:`.RelationshipProperty` attributes.
See the documentation for :class:`.PropComparator` for a brief
overview of ORM level operator definition.
See also:
:class:`.PropComparator`
:class:`.ColumnProperty.Comparator`
:class:`.ColumnOperators`
:ref:`types_operators`
:attr:`.TypeEngine.comparator_factory`
"""
_of_type = None
def __init__(
self, prop, parentmapper, adapt_to_entity=None, of_type=None):
"""Construction of :class:`.RelationshipProperty.Comparator`
is internal to the ORM's attribute mechanics.
"""
self.prop = prop
self._parententity = parentmapper
self._adapt_to_entity = adapt_to_entity
if of_type:
self._of_type = of_type
def adapt_to_entity(self, adapt_to_entity):
return self.__class__(self.property, self._parententity,
adapt_to_entity=adapt_to_entity,
of_type=self._of_type)
@util.memoized_property
def mapper(self):
"""The target :class:`.Mapper` referred to by this
:class:`.RelationshipProperty.Comparator`.
This is the "target" or "remote" side of the
:func:`.relationship`.
"""
return self.property.mapper
@util.memoized_property
def _parententity(self):
return self.property.parent
def _source_selectable(self):
if self._adapt_to_entity:
return self._adapt_to_entity.selectable
else:
return self.property.parent._with_polymorphic_selectable
def __clause_element__(self):
adapt_from = self._source_selectable()
if self._of_type:
of_type = inspect(self._of_type).mapper
else:
of_type = None
pj, sj, source, dest, \
secondary, target_adapter = self.property._create_joins(
source_selectable=adapt_from,
source_polymorphic=True,
of_type=of_type)
if sj is not None:
return pj & sj
else:
return pj
def of_type(self, cls):
"""Produce a construct that represents a particular 'subtype' of
attribute for the parent class.
Currently this is usable in conjunction with :meth:`.Query.join`
and :meth:`.Query.outerjoin`.
"""
return RelationshipProperty.Comparator(
self.property,
self._parententity,
adapt_to_entity=self._adapt_to_entity,
of_type=cls)
def in_(self, other):
"""Produce an IN clause - this is not implemented
for :func:`~.orm.relationship`-based attributes at this time.
"""
raise NotImplementedError('in_() not yet supported for '
'relationships. For a simple '
'many-to-one, use in_() against '
'the set of foreign key values.')
__hash__ = None
def __eq__(self, other):
"""Implement the ``==`` operator.
In a many-to-one context, such as::
MyClass.some_prop == <some object>
this will typically produce a
clause such as::
mytable.related_id == <some id>
Where ``<some id>`` is the primary key of the given
object.
The ``==`` operator provides partial functionality for non-
many-to-one comparisons:
* Comparisons against collections are not supported.
Use :meth:`~.RelationshipProperty.Comparator.contains`.
* Compared to a scalar one-to-many, will produce a
clause that compares the target columns in the parent to
the given target.
* Compared to a scalar many-to-many, an alias
of the association table will be rendered as
well, forming a natural join that is part of the
main body of the query. This will not work for
queries that go beyond simple AND conjunctions of
comparisons, such as those which use OR. Use
explicit joins, outerjoins, or
:meth:`~.RelationshipProperty.Comparator.has` for
more comprehensive non-many-to-one scalar
membership tests.
* Comparisons against ``None`` given in a one-to-many
or many-to-many context produce a NOT EXISTS clause.
"""
if isinstance(other, (util.NoneType, expression.Null)):
if self.property.direction in [ONETOMANY, MANYTOMANY]:
return ~self._criterion_exists()
else:
return _orm_annotate(self.property._optimized_compare(
None, adapt_source=self.adapter))
elif self.property.uselist:
raise sa_exc.InvalidRequestError(
"Can't compare a collection to an object or collection; "
"use contains() to test for membership.")
else:
return _orm_annotate(
self.property._optimized_compare(
other, adapt_source=self.adapter))
def _criterion_exists(self, criterion=None, **kwargs):
if getattr(self, '_of_type', None):
info = inspect(self._of_type)
target_mapper, to_selectable, is_aliased_class = \
info.mapper, info.selectable, info.is_aliased_class
if self.property._is_self_referential and not \
is_aliased_class:
to_selectable = to_selectable.alias()
single_crit = target_mapper._single_table_criterion
if single_crit is not None:
if criterion is not None:
criterion = single_crit & criterion
else:
criterion = single_crit
else:
is_aliased_class = False
to_selectable = None
if self.adapter:
source_selectable = self._source_selectable()
else:
source_selectable = None
pj, sj, source, dest, secondary, target_adapter = \
self.property._create_joins(
dest_polymorphic=True,
dest_selectable=to_selectable,
source_selectable=source_selectable)
for k in kwargs:
crit = getattr(self.property.mapper.class_, k) == kwargs[k]
if criterion is None:
criterion = crit
else:
criterion = criterion & crit
# annotate the *local* side of the join condition, in the case
# of pj + sj this is the full primaryjoin, in the case of just
# pj its the local side of the primaryjoin.
if sj is not None:
j = _orm_annotate(pj) & sj
else:
j = _orm_annotate(pj, exclude=self.property.remote_side)
if criterion is not None and target_adapter and not \
is_aliased_class:
# limit this adapter to annotated only?
criterion = target_adapter.traverse(criterion)
# only have the "joined left side" of what we
# return be subject to Query adaption. The right
# side of it is used for an exists() subquery and
# should not correlate or otherwise reach out
# to anything in the enclosing query.
if criterion is not None:
criterion = criterion._annotate(
{'no_replacement_traverse': True})
crit = j & sql.True_._ifnone(criterion)
ex = sql.exists([1], crit, from_obj=dest).correlate_except(dest)
if secondary is not None:
ex = ex.correlate_except(secondary)
return ex
def any(self, criterion=None, **kwargs):
"""Produce an expression that tests a collection against
particular criterion, using EXISTS.
An expression like::
session.query(MyClass).filter(
MyClass.somereference.any(SomeRelated.x==2)
)
Will produce a query like::
SELECT * FROM my_table WHERE
EXISTS (SELECT 1 FROM related WHERE related.my_id=my_table.id
AND related.x=2)
Because :meth:`~.RelationshipProperty.Comparator.any` uses
a correlated subquery, its performance is not nearly as
good when compared against large target tables as that of
using a join.
:meth:`~.RelationshipProperty.Comparator.any` is particularly
useful for testing for empty collections::
session.query(MyClass).filter(
~MyClass.somereference.any()
)
will produce::
SELECT * FROM my_table WHERE
NOT EXISTS (SELECT 1 FROM related WHERE
related.my_id=my_table.id)
:meth:`~.RelationshipProperty.Comparator.any` is only
valid for collections, i.e. a :func:`.relationship`
that has ``uselist=True``. For scalar references,
use :meth:`~.RelationshipProperty.Comparator.has`.
"""
if not self.property.uselist:
raise sa_exc.InvalidRequestError(
"'any()' not implemented for scalar "
"attributes. Use has()."
)
return self._criterion_exists(criterion, **kwargs)
def has(self, criterion=None, **kwargs):
"""Produce an expression that tests a scalar reference against
particular criterion, using EXISTS.
An expression like::
session.query(MyClass).filter(
MyClass.somereference.has(SomeRelated.x==2)
)
Will produce a query like::
SELECT * FROM my_table WHERE
EXISTS (SELECT 1 FROM related WHERE
related.id==my_table.related_id AND related.x=2)
Because :meth:`~.RelationshipProperty.Comparator.has` uses
a correlated subquery, its performance is not nearly as
good when compared against large target tables as that of
using a join.
:meth:`~.RelationshipProperty.Comparator.has` is only
valid for scalar references, i.e. a :func:`.relationship`
that has ``uselist=False``. For collection references,
use :meth:`~.RelationshipProperty.Comparator.any`.
"""
if self.property.uselist:
raise sa_exc.InvalidRequestError(
"'has()' not implemented for collections. "
"Use any().")
return self._criterion_exists(criterion, **kwargs)
def contains(self, other, **kwargs):
"""Return a simple expression that tests a collection for
containment of a particular item.
:meth:`~.RelationshipProperty.Comparator.contains` is
only valid for a collection, i.e. a
:func:`~.orm.relationship` that implements
one-to-many or many-to-many with ``uselist=True``.
When used in a simple one-to-many context, an
expression like::
MyClass.contains(other)
Produces a clause like::
mytable.id == <some id>
Where ``<some id>`` is the value of the foreign key
attribute on ``other`` which refers to the primary
key of its parent object. From this it follows that
:meth:`~.RelationshipProperty.Comparator.contains` is
very useful when used with simple one-to-many
operations.
For many-to-many operations, the behavior of
:meth:`~.RelationshipProperty.Comparator.contains`
has more caveats. The association table will be
rendered in the statement, producing an "implicit"
join, that is, includes multiple tables in the FROM
clause which are equated in the WHERE clause::
query(MyClass).filter(MyClass.contains(other))
Produces a query like::
SELECT * FROM my_table, my_association_table AS
my_association_table_1 WHERE
my_table.id = my_association_table_1.parent_id
AND my_association_table_1.child_id = <some id>
Where ``<some id>`` would be the primary key of
``other``. From the above, it is clear that
:meth:`~.RelationshipProperty.Comparator.contains`
will **not** work with many-to-many collections when
used in queries that move beyond simple AND
conjunctions, such as multiple
:meth:`~.RelationshipProperty.Comparator.contains`
expressions joined by OR. In such cases subqueries or
explicit "outer joins" will need to be used instead.
See :meth:`~.RelationshipProperty.Comparator.any` for
a less-performant alternative using EXISTS, or refer
to :meth:`.Query.outerjoin` as well as :ref:`ormtutorial_joins`
for more details on constructing outer joins.
"""
if not self.property.uselist:
raise sa_exc.InvalidRequestError(
"'contains' not implemented for scalar "
"attributes. Use ==")
clause = self.property._optimized_compare(
other, adapt_source=self.adapter)
if self.property.secondaryjoin is not None:
clause.negation_clause = \
self.__negated_contains_or_equals(other)
return clause
def __negated_contains_or_equals(self, other):
if self.property.direction == MANYTOONE:
state = attributes.instance_state(other)
def state_bindparam(x, state, col):
dict_ = state.dict
return sql.bindparam(
x, unique=True,
callable_=self.property._get_attr_w_warn_on_none(
col,
self.property.mapper._get_state_attr_by_column,
state, dict_, col, passive=attributes.PASSIVE_OFF
)
)
def adapt(col):
if self.adapter:
return self.adapter(col)
else:
return col
if self.property._use_get:
return sql.and_(*[
sql.or_(
adapt(x) != state_bindparam(adapt(x), state, y),
adapt(x) == None)
for (x, y) in self.property.local_remote_pairs])
criterion = sql.and_(*[
x == y for (x, y) in
zip(
self.property.mapper.primary_key,
self.property.mapper.primary_key_from_instance(other)
)
])
return ~self._criterion_exists(criterion)
def __ne__(self, other):
"""Implement the ``!=`` operator.
In a many-to-one context, such as::
MyClass.some_prop != <some object>
This will typically produce a clause such as::
mytable.related_id != <some id>
Where ``<some id>`` is the primary key of the
given object.
The ``!=`` operator provides partial functionality for non-
many-to-one comparisons:
* Comparisons against collections are not supported.
Use
:meth:`~.RelationshipProperty.Comparator.contains`
in conjunction with :func:`~.expression.not_`.
* Compared to a scalar one-to-many, will produce a
clause that compares the target columns in the parent to
the given target.
* Compared to a scalar many-to-many, an alias
of the association table will be rendered as
well, forming a natural join that is part of the
main body of the query. This will not work for
queries that go beyond simple AND conjunctions of
comparisons, such as those which use OR. Use
explicit joins, outerjoins, or
:meth:`~.RelationshipProperty.Comparator.has` in
conjunction with :func:`~.expression.not_` for
more comprehensive non-many-to-one scalar
membership tests.
* Comparisons against ``None`` given in a one-to-many
or many-to-many context produce an EXISTS clause.
"""
if isinstance(other, (util.NoneType, expression.Null)):
if self.property.direction == MANYTOONE:
return _orm_annotate(~self.property._optimized_compare(
None, adapt_source=self.adapter))
else:
return self._criterion_exists()
elif self.property.uselist:
raise sa_exc.InvalidRequestError(
"Can't compare a collection"
" to an object or collection; use "
"contains() to test for membership.")
else:
return _orm_annotate(self.__negated_contains_or_equals(other))
@util.memoized_property
def property(self):
if mapperlib.Mapper._new_mappers:
mapperlib.Mapper._configure_all()
return self.prop
def _with_parent(self, instance, alias_secondary=True):
assert instance is not None
return self._optimized_compare(
instance, value_is_parent=True, alias_secondary=alias_secondary)
def _optimized_compare(self, state, value_is_parent=False,
adapt_source=None,
alias_secondary=True):
if state is not None:
state = attributes.instance_state(state)
reverse_direction = not value_is_parent
if state is None:
return self._lazy_none_clause(
reverse_direction,
adapt_source=adapt_source)
if not reverse_direction:
criterion, bind_to_col = \
self._lazy_strategy._lazywhere, \
self._lazy_strategy._bind_to_col
else:
criterion, bind_to_col = \
self._lazy_strategy._rev_lazywhere, \
self._lazy_strategy._rev_bind_to_col
if reverse_direction:
mapper = self.mapper
else:
mapper = self.parent
dict_ = attributes.instance_dict(state.obj())
def visit_bindparam(bindparam):
if bindparam._identifying_key in bind_to_col:
bindparam.callable = self._get_attr_w_warn_on_none(
bind_to_col[bindparam._identifying_key],
mapper._get_state_attr_by_column,
state, dict_,
bind_to_col[bindparam._identifying_key],
passive=attributes.PASSIVE_OFF)
if self.secondary is not None and alias_secondary:
criterion = ClauseAdapter(
self.secondary.alias()).\
traverse(criterion)
criterion = visitors.cloned_traverse(
criterion, {}, {'bindparam': visit_bindparam})
if adapt_source:
criterion = adapt_source(criterion)
return criterion
def _get_attr_w_warn_on_none(self, column, fn, *arg, **kw):
def _go():
value = fn(*arg, **kw)
if value is None:
util.warn(
"Got None for value of column %s; this is unsupported "
"for a relationship comparison and will not "
"currently produce an IS comparison "
"(but may in a future release)" % column)
return value
return _go
def _lazy_none_clause(self, reverse_direction=False, adapt_source=None):
if not reverse_direction:
criterion, bind_to_col = \
self._lazy_strategy._lazywhere, \
self._lazy_strategy._bind_to_col
else:
criterion, bind_to_col = \
self._lazy_strategy._rev_lazywhere, \
self._lazy_strategy._rev_bind_to_col
criterion = adapt_criterion_to_null(criterion, bind_to_col)
if adapt_source:
criterion = adapt_source(criterion)
return criterion
def __str__(self):
return str(self.parent.class_.__name__) + "." + self.key
def merge(self,
session,
source_state,
source_dict,
dest_state,
dest_dict,
load, _recursive, _resolve_conflict_map):
if load:
for r in self._reverse_property:
if (source_state, r) in _recursive:
return
if "merge" not in self._cascade:
return
if self.key not in source_dict:
return
if self.uselist:
instances = source_state.get_impl(self.key).\
get(source_state, source_dict)
if hasattr(instances, '_sa_adapter'):
# convert collections to adapters to get a true iterator
instances = instances._sa_adapter
if load:
# for a full merge, pre-load the destination collection,
# so that individual _merge of each item pulls from identity
# map for those already present.
# also assumes CollectionAttrbiuteImpl behavior of loading
# "old" list in any case
dest_state.get_impl(self.key).get(dest_state, dest_dict)
dest_list = []
for current in instances:
current_state = attributes.instance_state(current)
current_dict = attributes.instance_dict(current)
_recursive[(current_state, self)] = True
obj = session._merge(
current_state, current_dict,
load=load, _recursive=_recursive,
_resolve_conflict_map=_resolve_conflict_map)
if obj is not None:
dest_list.append(obj)
if not load:
coll = attributes.init_state_collection(dest_state,
dest_dict, self.key)
for c in dest_list:
coll.append_without_event(c)
else:
dest_state.get_impl(self.key).set(
dest_state, dest_dict, dest_list,
_adapt=False)
else:
current = source_dict[self.key]
if current is not None:
current_state = attributes.instance_state(current)
current_dict = attributes.instance_dict(current)
_recursive[(current_state, self)] = True
obj = session._merge(
current_state, current_dict,
load=load, _recursive=_recursive,
_resolve_conflict_map=_resolve_conflict_map)
else:
obj = None
if not load:
dest_dict[self.key] = obj
else:
dest_state.get_impl(self.key).set(dest_state,
dest_dict, obj, None)
def _value_as_iterable(self, state, dict_, key,
passive=attributes.PASSIVE_OFF):
"""Return a list of tuples (state, obj) for the given
key.
returns an empty list if the value is None/empty/PASSIVE_NO_RESULT
"""
impl = state.manager[key].impl
x = impl.get(state, dict_, passive=passive)
if x is attributes.PASSIVE_NO_RESULT or x is None:
return []
elif hasattr(impl, 'get_collection'):
return [
(attributes.instance_state(o), o) for o in
impl.get_collection(state, dict_, x, passive=passive)
]
else:
return [(attributes.instance_state(x), x)]
def cascade_iterator(self, type_, state, dict_,
visited_states, halt_on=None):
# assert type_ in self._cascade
# only actively lazy load on the 'delete' cascade
if type_ != 'delete' or self.passive_deletes:
passive = attributes.PASSIVE_NO_INITIALIZE
else:
passive = attributes.PASSIVE_OFF
if type_ == 'save-update':
tuples = state.manager[self.key].impl.\
get_all_pending(state, dict_)
else:
tuples = self._value_as_iterable(state, dict_, self.key,
passive=passive)
skip_pending = type_ == 'refresh-expire' and 'delete-orphan' \
not in self._cascade
for instance_state, c in tuples:
if instance_state in visited_states:
continue
if c is None:
# would like to emit a warning here, but
# would not be consistent with collection.append(None)
# current behavior of silently skipping.
# see [ticket:2229]
continue
instance_dict = attributes.instance_dict(c)
if halt_on and halt_on(instance_state):
continue
if skip_pending and not instance_state.key:
continue
instance_mapper = instance_state.manager.mapper
if not instance_mapper.isa(self.mapper.class_manager.mapper):
raise AssertionError("Attribute '%s' on class '%s' "
"doesn't handle objects "
"of type '%s'" % (
self.key,
self.parent.class_,
c.__class__
))
visited_states.add
yield c, instance_mapper, instance_state, instance_dict
def _add_reverse_property(self, key):
other = self.mapper.get_property(key, _configure_mappers=False)
self._reverse_property.add(other)
other._reverse_property.add
if not other.mapper.common_parent(self.parent):
raise sa_exc.ArgumentError(
'reverse_property %r on '
'relationship %s references relationship %s, which '
'does not reference mapper %s' %
(key, self, other, self.parent))
if self.direction in (ONETOMANY, MANYTOONE) and self.direction \
== other.direction:
raise sa_exc.ArgumentError(
'%s and back-reference %s are '
'both of the same direction %r. Did you mean to '
'set remote_side on the many-to-one side ?' %
(other, self, self.direction))
@util.memoized_property
def mapper(self):
"""Return the targeted :class:`.Mapper` for this
:class:`.RelationshipProperty`.
This is a lazy-initializing static attribute.
"""
if util.callable(self.argument) and \
not isinstance(self.argument, (type, mapperlib.Mapper)):
argument = self.argument()
else:
argument = self.argument
if isinstance(argument, type):
mapper_ = mapperlib.class_mapper(argument,
configure=False)
elif isinstance(self.argument, mapperlib.Mapper):
mapper_ = argument
else:
raise sa_exc.ArgumentError(
"relationship '%s' expects "
"a class or a mapper argument (received: %s)"
% (self.key, type(argument)))
return mapper_
@util.memoized_property
@util.deprecated("0.7", "Use .target")
def table(self):
"""Return the selectable linked to this
:class:`.RelationshipProperty` object's target
:class:`.Mapper`.
"""
return self.target
def do_init(self):
self._check_conflicts()
self._process_dependent_arguments()
self._setup_join_conditions()
self._check_cascade_settings(self._cascade)
self._post_init()
self._generate_backref()
self._join_condition._warn_for_conflicting_sync_targets()
super(RelationshipProperty, self).do_init()
self._lazy_strategy = self._get_strategy((("lazy", "select"),))
def _process_dependent_arguments(self):
"""Convert incoming configuration arguments to their
proper form.
Callables are resolved, ORM annotations removed.
"""
# accept callables for other attributes which may require
# deferred initialization. This technique is used
# by declarative "string configs" and some recipes.
for attr in (
'order_by', 'primaryjoin', 'secondaryjoin',
'secondary', '_user_defined_foreign_keys', 'remote_side',
):
attr_value = getattr(self, attr)
if util.callable(attr_value):
setattr(self, attr, attr_value())
# remove "annotations" which are present if mapped class
# descriptors are used to create the join expression.
for attr in 'primaryjoin', 'secondaryjoin':
val = getattr(self, attr)
if val is not None:
setattr(self, attr, _orm_deannotate(
expression._only_column_elements(val, attr))
)
# ensure expressions in self.order_by, foreign_keys,
# remote_side are all columns, not strings.
if self.order_by is not False and self.order_by is not None:
self.order_by = [
expression._only_column_elements(x, "order_by")
for x in
util.to_list(self.order_by)]
self._user_defined_foreign_keys = \
util.column_set(
expression._only_column_elements(x, "foreign_keys")
for x in util.to_column_set(
self._user_defined_foreign_keys
))
self.remote_side = \
util.column_set(
expression._only_column_elements(x, "remote_side")
for x in
util.to_column_set(self.remote_side))
self.target = self.mapper.mapped_table
def _setup_join_conditions(self):
self._join_condition = jc = JoinCondition(
parent_selectable=self.parent.mapped_table,
child_selectable=self.mapper.mapped_table,
parent_local_selectable=self.parent.local_table,
child_local_selectable=self.mapper.local_table,
primaryjoin=self.primaryjoin,
secondary=self.secondary,
secondaryjoin=self.secondaryjoin,
parent_equivalents=self.parent._equivalent_columns,
child_equivalents=self.mapper._equivalent_columns,
consider_as_foreign_keys=self._user_defined_foreign_keys,
local_remote_pairs=self.local_remote_pairs,
remote_side=self.remote_side,
self_referential=self._is_self_referential,
prop=self,
support_sync=not self.viewonly,
can_be_synced_fn=self._columns_are_mapped
)
self.primaryjoin = jc.deannotated_primaryjoin
self.secondaryjoin = jc.deannotated_secondaryjoin
self.direction = jc.direction
self.local_remote_pairs = jc.local_remote_pairs
self.remote_side = jc.remote_columns
self.local_columns = jc.local_columns
self.synchronize_pairs = jc.synchronize_pairs
self._calculated_foreign_keys = jc.foreign_key_columns
self.secondary_synchronize_pairs = jc.secondary_synchronize_pairs
def _check_conflicts(self):
"""Test that this relationship is legal, warn about
inheritance conflicts."""
if self.parent.non_primary and not mapperlib.class_mapper(
self.parent.class_,
configure=False).has_property(self.key):
raise sa_exc.ArgumentError(
"Attempting to assign a new "
"relationship '%s' to a non-primary mapper on "
"class '%s'. New relationships can only be added "
"to the primary mapper, i.e. the very first mapper "
"created for class '%s' " %
(self.key, self.parent.class_.__name__,
self.parent.class_.__name__))
def _get_cascade(self):
"""Return the current cascade setting for this
:class:`.RelationshipProperty`.
"""
return self._cascade
def _set_cascade(self, cascade):
cascade = CascadeOptions(cascade)
if 'mapper' in self.__dict__:
self._check_cascade_settings(cascade)
self._cascade = cascade
if self._dependency_processor:
self._dependency_processor.cascade = cascade
cascade = property(_get_cascade, _set_cascade)
def _check_cascade_settings(self, cascade):
if cascade.delete_orphan and not self.single_parent \
and (self.direction is MANYTOMANY or self.direction
is MANYTOONE):
raise sa_exc.ArgumentError(
'On %s, delete-orphan cascade is not supported '
'on a many-to-many or many-to-one relationship '
'when single_parent is not set. Set '
'single_parent=True on the relationship().'
% self)
if self.direction is MANYTOONE and self.passive_deletes:
util.warn("On %s, 'passive_deletes' is normally configured "
"on one-to-many, one-to-one, many-to-many "
"relationships only."
% self)
if self.passive_deletes == 'all' and \
("delete" in cascade or
"delete-orphan" in cascade):
raise sa_exc.ArgumentError(
"On %s, can't set passive_deletes='all' in conjunction "
"with 'delete' or 'delete-orphan' cascade" % self)
if cascade.delete_orphan:
self.mapper.primary_mapper()._delete_orphans.append(
(self.key, self.parent.class_)
)
def _persists_for(self, mapper):
"""Return True if this property will persist values on behalf
of the given mapper.
"""
return self.key in mapper.relationships and \
mapper.relationships[self.key] is self
def _columns_are_mapped(self, *cols):
"""Return True if all columns in the given collection are
mapped by the tables referenced by this :class:`.Relationship`.
"""
for c in cols:
if self.secondary is not None \
and self.secondary.c.contains_column(c):
continue
if not self.parent.mapped_table.c.contains_column(c) and \
not self.target.c.contains_column(c):
return False
return True
def _generate_backref(self):
"""Interpret the 'backref' instruction to create a
:func:`.relationship` complementary to this one."""
if self.parent.non_primary:
return
if self.backref is not None and not self.back_populates:
if isinstance(self.backref, util.string_types):
backref_key, kwargs = self.backref, {}
else:
backref_key, kwargs = self.backref
mapper = self.mapper.primary_mapper()
if not mapper.concrete:
check = set(mapper.iterate_to_root()).\
union(mapper.self_and_descendants)
for m in check:
if m.has_property(backref_key) and not m.concrete:
raise sa_exc.ArgumentError(
"Error creating backref "
"'%s' on relationship '%s': property of that "
"name exists on mapper '%s'" %
(backref_key, self, m))
# determine primaryjoin/secondaryjoin for the
# backref. Use the one we had, so that
# a custom join doesn't have to be specified in
# both directions.
if self.secondary is not None:
# for many to many, just switch primaryjoin/
# secondaryjoin. use the annotated
# pj/sj on the _join_condition.
pj = kwargs.pop(
'primaryjoin',
self._join_condition.secondaryjoin_minus_local)
sj = kwargs.pop(
'secondaryjoin',
self._join_condition.primaryjoin_minus_local)
else:
pj = kwargs.pop(
'primaryjoin',
self._join_condition.primaryjoin_reverse_remote)
sj = kwargs.pop('secondaryjoin', None)
if sj:
raise sa_exc.InvalidRequestError(
"Can't assign 'secondaryjoin' on a backref "
"against a non-secondary relationship."
)
foreign_keys = kwargs.pop('foreign_keys',
self._user_defined_foreign_keys)
parent = self.parent.primary_mapper()
kwargs.setdefault('viewonly', self.viewonly)
kwargs.setdefault('post_update', self.post_update)
kwargs.setdefault('passive_updates', self.passive_updates)
self.back_populates = backref_key
relationship = RelationshipProperty(
parent, self.secondary,
pj, sj,
foreign_keys=foreign_keys,
back_populates=self.key,
**kwargs)
mapper._configure_property(backref_key, relationship)
if self.back_populates:
self._add_reverse_property(self.back_populates)
def _post_init(self):
if self.uselist is None:
self.uselist = self.direction is not MANYTOONE
if not self.viewonly:
self._dependency_processor = \
dependency.DependencyProcessor.from_relationship(self)
@util.memoized_property
def _use_get(self):
"""memoize the 'use_get' attribute of this RelationshipLoader's
lazyloader."""
strategy = self._lazy_strategy
return strategy.use_get
@util.memoized_property
def _is_self_referential(self):
return self.mapper.common_parent(self.parent)
def _create_joins(self, source_polymorphic=False,
source_selectable=None, dest_polymorphic=False,
dest_selectable=None, of_type=None):
if source_selectable is None:
if source_polymorphic and self.parent.with_polymorphic:
source_selectable = self.parent._with_polymorphic_selectable
aliased = False
if dest_selectable is None:
if dest_polymorphic and self.mapper.with_polymorphic:
dest_selectable = self.mapper._with_polymorphic_selectable
aliased = True
else:
dest_selectable = self.mapper.mapped_table
if self._is_self_referential and source_selectable is None:
dest_selectable = dest_selectable.alias()
aliased = True
else:
aliased = True
dest_mapper = of_type or self.mapper
single_crit = dest_mapper._single_table_criterion
aliased = aliased or (source_selectable is not None)
primaryjoin, secondaryjoin, secondary, target_adapter, dest_selectable = \
self._join_condition.join_targets(
source_selectable, dest_selectable, aliased, single_crit
)
if source_selectable is None:
source_selectable = self.parent.local_table
if dest_selectable is None:
dest_selectable = self.mapper.local_table
return (primaryjoin, secondaryjoin, source_selectable,
dest_selectable, secondary, target_adapter)
def _annotate_columns(element, annotations):
def clone(elem):
if isinstance(elem, expression.ColumnClause):
elem = elem._annotate(annotations.copy())
elem._copy_internals(clone=clone)
return elem
if element is not None:
element = clone(element)
return element
class JoinCondition(object):
def __init__(self,
parent_selectable,
child_selectable,
parent_local_selectable,
child_local_selectable,
primaryjoin=None,
secondary=None,
secondaryjoin=None,
parent_equivalents=None,
child_equivalents=None,
consider_as_foreign_keys=None,
local_remote_pairs=None,
remote_side=None,
self_referential=False,
prop=None,
support_sync=True,
can_be_synced_fn=lambda *c: True
):
self.parent_selectable = parent_selectable
self.parent_local_selectable = parent_local_selectable
self.child_selectable = child_selectable
self.child_local_selectable = child_local_selectable
self.parent_equivalents = parent_equivalents
self.child_equivalents = child_equivalents
self.primaryjoin = primaryjoin
self.secondaryjoin = secondaryjoin
self.secondary = secondary
self.consider_as_foreign_keys = consider_as_foreign_keys
self._local_remote_pairs = local_remote_pairs
self._remote_side = remote_side
self.prop = prop
self.self_referential = self_referential
self.support_sync = support_sync
self.can_be_synced_fn = can_be_synced_fn
self._determine_joins()
self._annotate_fks()
self._annotate_remote()
self._annotate_local()
self._setup_pairs()
self._check_foreign_cols(self.primaryjoin, True)
if self.secondaryjoin is not None:
self._check_foreign_cols(self.secondaryjoin, False)
self._determine_direction()
self._check_remote_side()
self._log_joins()
def _log_joins(self):
if self.prop is None:
return
log = self.prop.logger
log.info('%s setup primary join %s', self.prop,
self.primaryjoin)
log.info('%s setup secondary join %s', self.prop,
self.secondaryjoin)
log.info('%s synchronize pairs [%s]', self.prop,
','.join('(%s => %s)' % (l, r) for (l, r) in
self.synchronize_pairs))
log.info('%s secondary synchronize pairs [%s]', self.prop,
','.join('(%s => %s)' % (l, r) for (l, r) in
self.secondary_synchronize_pairs or []))
log.info('%s local/remote pairs [%s]', self.prop,
','.join('(%s / %s)' % (l, r) for (l, r) in
self.local_remote_pairs))
log.info('%s remote columns [%s]', self.prop,
','.join('%s' % col for col in self.remote_columns)
)
log.info('%s local columns [%s]', self.prop,
','.join('%s' % col for col in self.local_columns)
)
log.info('%s relationship direction %s', self.prop,
self.direction)
def _determine_joins(self):
"""Determine the 'primaryjoin' and 'secondaryjoin' attributes,
if not passed to the constructor already.
This is based on analysis of the foreign key relationships
between the parent and target mapped selectables.
"""
if self.secondaryjoin is not None and self.secondary is None:
raise sa_exc.ArgumentError(
"Property %s specified with secondary "
"join condition but "
"no secondary argument" % self.prop)
# find a join between the given mapper's mapped table and
# the given table. will try the mapper's local table first
# for more specificity, then if not found will try the more
# general mapped table, which in the case of inheritance is
# a join.
try:
consider_as_foreign_keys = self.consider_as_foreign_keys or None
if self.secondary is not None:
if self.secondaryjoin is None:
self.secondaryjoin = \
join_condition(
self.child_selectable,
self.secondary,
a_subset=self.child_local_selectable,
consider_as_foreign_keys=consider_as_foreign_keys
)
if self.primaryjoin is None:
self.primaryjoin = \
join_condition(
self.parent_selectable,
self.secondary,
a_subset=self.parent_local_selectable,
consider_as_foreign_keys=consider_as_foreign_keys
)
else:
if self.primaryjoin is None:
self.primaryjoin = \
join_condition(
self.parent_selectable,
self.child_selectable,
a_subset=self.parent_local_selectable,
consider_as_foreign_keys=consider_as_foreign_keys
)
except sa_exc.NoForeignKeysError:
if self.secondary is not None:
raise sa_exc.NoForeignKeysError(
"Could not determine join "
"condition between parent/child tables on "
"relationship %s - there are no foreign keys "
"linking these tables via secondary table '%s'. "
"Ensure that referencing columns are associated "
"with a ForeignKey or ForeignKeyConstraint, or "
"specify 'primaryjoin' and 'secondaryjoin' "
"expressions." % (self.prop, self.secondary))
else:
raise sa_exc.NoForeignKeysError(
"Could not determine join "
"condition between parent/child tables on "
"relationship %s - there are no foreign keys "
"linking these tables. "
"Ensure that referencing columns are associated "
"with a ForeignKey or ForeignKeyConstraint, or "
"specify a 'primaryjoin' expression." % self.prop)
except sa_exc.AmbiguousForeignKeysError:
if self.secondary is not None:
raise sa_exc.AmbiguousForeignKeysError(
"Could not determine join "
"condition between parent/child tables on "
"relationship %s - there are multiple foreign key "
"paths linking the tables via secondary table '%s'. "
"Specify the 'foreign_keys' "
"argument, providing a list of those columns which "
"should be counted as containing a foreign key "
"reference from the secondary table to each of the "
"parent and child tables."
% (self.prop, self.secondary))
else:
raise sa_exc.AmbiguousForeignKeysError(
"Could not determine join "
"condition between parent/child tables on "
"relationship %s - there are multiple foreign key "
"paths linking the tables. Specify the "
"'foreign_keys' argument, providing a list of those "
"columns which should be counted as containing a "
"foreign key reference to the parent table."
% self.prop)
@property
def primaryjoin_minus_local(self):
return _deep_deannotate(self.primaryjoin, values=("local", "remote"))
@property
def secondaryjoin_minus_local(self):
return _deep_deannotate(self.secondaryjoin,
values=("local", "remote"))
@util.memoized_property
def primaryjoin_reverse_remote(self):
"""Return the primaryjoin condition suitable for the
"reverse" direction.
If the primaryjoin was delivered here with pre-existing
"remote" annotations, the local/remote annotations
are reversed. Otherwise, the local/remote annotations
are removed.
"""
if self._has_remote_annotations:
def replace(element):
if "remote" in element._annotations:
v = element._annotations.copy()
del v['remote']
v['local'] = True
return element._with_annotations(v)
elif "local" in element._annotations:
v = element._annotations.copy()
del v['local']
v['remote'] = True
return element._with_annotations(v)
return visitors.replacement_traverse(
self.primaryjoin, {}, replace)
else:
if self._has_foreign_annotations:
# TODO: coverage
return _deep_deannotate(self.primaryjoin,
values=("local", "remote"))
else:
return _deep_deannotate(self.primaryjoin)
def _has_annotation(self, clause, annotation):
for col in visitors.iterate(clause, {}):
if annotation in col._annotations:
return True
else:
return False
@util.memoized_property
def _has_foreign_annotations(self):
return self._has_annotation(self.primaryjoin, "foreign")
@util.memoized_property
def _has_remote_annotations(self):
return self._has_annotation(self.primaryjoin, "remote")
def _annotate_fks(self):
"""Annotate the primaryjoin and secondaryjoin
structures with 'foreign' annotations marking columns
considered as foreign.
"""
if self._has_foreign_annotations:
return
if self.consider_as_foreign_keys:
self._annotate_from_fk_list()
else:
self._annotate_present_fks()
def _annotate_from_fk_list(self):
def check_fk(col):
if col in self.consider_as_foreign_keys:
return col._annotate({"foreign": True})
self.primaryjoin = visitors.replacement_traverse(
self.primaryjoin,
{},
check_fk
)
if self.secondaryjoin is not None:
self.secondaryjoin = visitors.replacement_traverse(
self.secondaryjoin,
{},
check_fk
)
def _annotate_present_fks(self):
if self.secondary is not None:
secondarycols = util.column_set(self.secondary.c)
else:
secondarycols = set()
def is_foreign(a, b):
if isinstance(a, schema.Column) and \
isinstance(b, schema.Column):
if a.references(b):
return a
elif b.references(a):
return b
if secondarycols:
if a in secondarycols and b not in secondarycols:
return a
elif b in secondarycols and a not in secondarycols:
return b
def visit_binary(binary):
if not isinstance(binary.left, sql.ColumnElement) or \
not isinstance(binary.right, sql.ColumnElement):
return
if "foreign" not in binary.left._annotations and \
"foreign" not in binary.right._annotations:
col = is_foreign(binary.left, binary.right)
if col is not None:
if col.compare(binary.left):
binary.left = binary.left._annotate(
{"foreign": True})
elif col.compare(binary.right):
binary.right = binary.right._annotate(
{"foreign": True})
self.primaryjoin = visitors.cloned_traverse(
self.primaryjoin,
{},
{"binary": visit_binary}
)
if self.secondaryjoin is not None:
self.secondaryjoin = visitors.cloned_traverse(
self.secondaryjoin,
{},
{"binary": visit_binary}
)
def _refers_to_parent_table(self):
"""Return True if the join condition contains column
comparisons where both columns are in both tables.
"""
pt = self.parent_selectable
mt = self.child_selectable
result = [False]
def visit_binary(binary):
c, f = binary.left, binary.right
if (
isinstance(c, expression.ColumnClause) and
isinstance(f, expression.ColumnClause) and
pt.is_derived_from(c.table) and
pt.is_derived_from(f.table) and
mt.is_derived_from(c.table) and
mt.is_derived_from(f.table)
):
result[0] = True
visitors.traverse(
self.primaryjoin,
{},
{"binary": visit_binary}
)
return result[0]
def _tables_overlap(self):
"""Return True if parent/child tables have some overlap."""
return selectables_overlap(
self.parent_selectable, self.child_selectable)
def _annotate_remote(self):
"""Annotate the primaryjoin and secondaryjoin
structures with 'remote' annotations marking columns
considered as part of the 'remote' side.
"""
if self._has_remote_annotations:
return
if self.secondary is not None:
self._annotate_remote_secondary()
elif self._local_remote_pairs or self._remote_side:
self._annotate_remote_from_args()
elif self._refers_to_parent_table():
self._annotate_selfref(lambda col: "foreign" in col._annotations, False)
elif self._tables_overlap():
self._annotate_remote_with_overlap()
else:
self._annotate_remote_distinct_selectables()
def _annotate_remote_secondary(self):
"""annotate 'remote' in primaryjoin, secondaryjoin
when 'secondary' is present.
"""
def repl(element):
if self.secondary.c.contains_column(element):
return element._annotate({"remote": True})
self.primaryjoin = visitors.replacement_traverse(
self.primaryjoin, {}, repl)
self.secondaryjoin = visitors.replacement_traverse(
self.secondaryjoin, {}, repl)
def _annotate_selfref(self, fn, remote_side_given):
"""annotate 'remote' in primaryjoin, secondaryjoin
when the relationship is detected as self-referential.
"""
def visit_binary(binary):
equated = binary.left.compare(binary.right)
if isinstance(binary.left, expression.ColumnClause) and \
isinstance(binary.right, expression.ColumnClause):
# assume one to many - FKs are "remote"
if fn(binary.left):
binary.left = binary.left._annotate({"remote": True})
if fn(binary.right) and not equated:
binary.right = binary.right._annotate(
{"remote": True})
elif not remote_side_given:
self._warn_non_column_elements()
self.primaryjoin = visitors.cloned_traverse(
self.primaryjoin, {},
{"binary": visit_binary})
def _annotate_remote_from_args(self):
"""annotate 'remote' in primaryjoin, secondaryjoin
when the 'remote_side' or '_local_remote_pairs'
arguments are used.
"""
if self._local_remote_pairs:
if self._remote_side:
raise sa_exc.ArgumentError(
"remote_side argument is redundant "
"against more detailed _local_remote_side "
"argument.")
remote_side = [r for (l, r) in self._local_remote_pairs]
else:
remote_side = self._remote_side
if self._refers_to_parent_table():
self._annotate_selfref(lambda col: col in remote_side, True)
else:
def repl(element):
if element in remote_side:
return element._annotate({"remote": True})
self.primaryjoin = visitors.replacement_traverse(
self.primaryjoin, {}, repl)
def _annotate_remote_with_overlap(self):
"""annotate 'remote' in primaryjoin, secondaryjoin
when the parent/child tables have some set of
tables in common, though is not a fully self-referential
relationship.
"""
def visit_binary(binary):
binary.left, binary.right = proc_left_right(binary.left,
binary.right)
binary.right, binary.left = proc_left_right(binary.right,
binary.left)
check_entities = self.prop is not None and \
self.prop.mapper is not self.prop.parent
def proc_left_right(left, right):
if isinstance(left, expression.ColumnClause) and \
isinstance(right, expression.ColumnClause):
if self.child_selectable.c.contains_column(right) and \
self.parent_selectable.c.contains_column(left):
right = right._annotate({"remote": True})
elif check_entities and \
right._annotations.get('parentmapper') is self.prop.mapper:
right = right._annotate({"remote": True})
elif check_entities and \
left._annotations.get('parentmapper') is self.prop.mapper:
left = left._annotate({"remote": True})
else:
self._warn_non_column_elements()
return left, right
self.primaryjoin = visitors.cloned_traverse(
self.primaryjoin, {},
{"binary": visit_binary})
def _annotate_remote_distinct_selectables(self):
"""annotate 'remote' in primaryjoin, secondaryjoin
when the parent/child tables are entirely
separate.
"""
def repl(element):
if self.child_selectable.c.contains_column(element) and \
(not self.parent_local_selectable.c.
contains_column(element) or
self.child_local_selectable.c.
contains_column(element)):
return element._annotate({"remote": True})
self.primaryjoin = visitors.replacement_traverse(
self.primaryjoin, {}, repl)
def _warn_non_column_elements(self):
util.warn(
"Non-simple column elements in primary "
"join condition for property %s - consider using "
"remote() annotations to mark the remote side."
% self.prop
)
def _annotate_local(self):
"""Annotate the primaryjoin and secondaryjoin
structures with 'local' annotations.
This annotates all column elements found
simultaneously in the parent table
and the join condition that don't have a
'remote' annotation set up from
_annotate_remote() or user-defined.
"""
if self._has_annotation(self.primaryjoin, "local"):
return
if self._local_remote_pairs:
local_side = util.column_set([l for (l, r)
in self._local_remote_pairs])
else:
local_side = util.column_set(self.parent_selectable.c)
def locals_(elem):
if "remote" not in elem._annotations and \
elem in local_side:
return elem._annotate({"local": True})
self.primaryjoin = visitors.replacement_traverse(
self.primaryjoin, {}, locals_
)
def _check_remote_side(self):
if not self.local_remote_pairs:
raise sa_exc.ArgumentError(
'Relationship %s could '
'not determine any unambiguous local/remote column '
'pairs based on join condition and remote_side '
'arguments. '
'Consider using the remote() annotation to '
'accurately mark those elements of the join '
'condition that are on the remote side of '
'the relationship.' % (self.prop, ))
def _check_foreign_cols(self, join_condition, primary):
"""Check the foreign key columns collected and emit error
messages."""
can_sync = False
foreign_cols = self._gather_columns_with_annotation(
join_condition, "foreign")
has_foreign = bool(foreign_cols)
if primary:
can_sync = bool(self.synchronize_pairs)
else:
can_sync = bool(self.secondary_synchronize_pairs)
if self.support_sync and can_sync or \
(not self.support_sync and has_foreign):
return
# from here below is just determining the best error message
# to report. Check for a join condition using any operator
# (not just ==), perhaps they need to turn on "viewonly=True".
if self.support_sync and has_foreign and not can_sync:
err = "Could not locate any simple equality expressions "\
"involving locally mapped foreign key columns for "\
"%s join condition "\
"'%s' on relationship %s." % (
primary and 'primary' or 'secondary',
join_condition,
self.prop
)
err += \
" Ensure that referencing columns are associated "\
"with a ForeignKey or ForeignKeyConstraint, or are "\
"annotated in the join condition with the foreign() "\
"annotation. To allow comparison operators other than "\
"'==', the relationship can be marked as viewonly=True."
raise sa_exc.ArgumentError(err)
else:
err = "Could not locate any relevant foreign key columns "\
"for %s join condition '%s' on relationship %s." % (
primary and 'primary' or 'secondary',
join_condition,
self.prop
)
err += \
' Ensure that referencing columns are associated '\
'with a ForeignKey or ForeignKeyConstraint, or are '\
'annotated in the join condition with the foreign() '\
'annotation.'
raise sa_exc.ArgumentError(err)
def _determine_direction(self):
"""Determine if this relationship is one to many, many to one,
many to many.
"""
if self.secondaryjoin is not None:
self.direction = MANYTOMANY
else:
parentcols = util.column_set(self.parent_selectable.c)
targetcols = util.column_set(self.child_selectable.c)
# fk collection which suggests ONETOMANY.
onetomany_fk = targetcols.intersection(
self.foreign_key_columns)
# fk collection which suggests MANYTOONE.
manytoone_fk = parentcols.intersection(
self.foreign_key_columns)
if onetomany_fk and manytoone_fk:
# fks on both sides. test for overlap of local/remote
# with foreign key.
# we will gather columns directly from their annotations
# without deannotating, so that we can distinguish on a column
# that refers to itself.
# 1. columns that are both remote and FK suggest
# onetomany.
onetomany_local = self._gather_columns_with_annotation(
self.primaryjoin, "remote", "foreign")
# 2. columns that are FK but are not remote (e.g. local)
# suggest manytoone.
manytoone_local = set([c for c in
self._gather_columns_with_annotation(
self.primaryjoin,
"foreign")
if "remote" not in c._annotations])
# 3. if both collections are present, remove columns that
# refer to themselves. This is for the case of
# and_(Me.id == Me.remote_id, Me.version == Me.version)
if onetomany_local and manytoone_local:
self_equated = self.remote_columns.intersection(
self.local_columns
)
onetomany_local = onetomany_local.difference(self_equated)
manytoone_local = manytoone_local.difference(self_equated)
# at this point, if only one or the other collection is
# present, we know the direction, otherwise it's still
# ambiguous.
if onetomany_local and not manytoone_local:
self.direction = ONETOMANY
elif manytoone_local and not onetomany_local:
self.direction = MANYTOONE
else:
raise sa_exc.ArgumentError(
"Can't determine relationship"
" direction for relationship '%s' - foreign "
"key columns within the join condition are present "
"in both the parent and the child's mapped tables. "
"Ensure that only those columns referring "
"to a parent column are marked as foreign, "
"either via the foreign() annotation or "
"via the foreign_keys argument." % self.prop)
elif onetomany_fk:
self.direction = ONETOMANY
elif manytoone_fk:
self.direction = MANYTOONE
else:
raise sa_exc.ArgumentError(
"Can't determine relationship "
"direction for relationship '%s' - foreign "
"key columns are present in neither the parent "
"nor the child's mapped tables" % self.prop)
def _deannotate_pairs(self, collection):
"""provide deannotation for the various lists of
pairs, so that using them in hashes doesn't incur
high-overhead __eq__() comparisons against
original columns mapped.
"""
return [(x._deannotate(), y._deannotate())
for x, y in collection]
def _setup_pairs(self):
sync_pairs = []
lrp = util.OrderedSet([])
secondary_sync_pairs = []
def go(joincond, collection):
def visit_binary(binary, left, right):
if "remote" in right._annotations and \
"remote" not in left._annotations and \
self.can_be_synced_fn(left):
lrp.add((left, right))
elif "remote" in left._annotations and \
"remote" not in right._annotations and \
self.can_be_synced_fn(right):
lrp.add((right, left))
if binary.operator is operators.eq and \
self.can_be_synced_fn(left, right):
if "foreign" in right._annotations:
collection.append((left, right))
elif "foreign" in left._annotations:
collection.append((right, left))
visit_binary_product(visit_binary, joincond)
for joincond, collection in [
(self.primaryjoin, sync_pairs),
(self.secondaryjoin, secondary_sync_pairs)
]:
if joincond is None:
continue
go(joincond, collection)
self.local_remote_pairs = self._deannotate_pairs(lrp)
self.synchronize_pairs = self._deannotate_pairs(sync_pairs)
self.secondary_synchronize_pairs = \
self._deannotate_pairs(secondary_sync_pairs)
_track_overlapping_sync_targets = weakref.WeakKeyDictionary()
def _warn_for_conflicting_sync_targets(self):
if not self.support_sync:
return
# we would like to detect if we are synchronizing any column
# pairs in conflict with another relationship that wishes to sync
# an entirely different column to the same target. This is a
# very rare edge case so we will try to minimize the memory/overhead
# impact of this check
for from_, to_ in [
(from_, to_) for (from_, to_) in self.synchronize_pairs
] + [
(from_, to_) for (from_, to_) in self.secondary_synchronize_pairs
]:
# save ourselves a ton of memory and overhead by only
# considering columns that are subject to a overlapping
# FK constraints at the core level. This condition can arise
# if multiple relationships overlap foreign() directly, but
# we're going to assume it's typically a ForeignKeyConstraint-
# level configuration that benefits from this warning.
if len(to_.foreign_keys) < 2:
continue
if to_ not in self._track_overlapping_sync_targets:
self._track_overlapping_sync_targets[to_] = \
weakref.WeakKeyDictionary({self.prop: from_})
else:
other_props = []
prop_to_from = self._track_overlapping_sync_targets[to_]
for pr, fr_ in prop_to_from.items():
if pr.mapper in mapperlib._mapper_registry and \
(
self.prop._persists_for(pr.parent) or
pr._persists_for(self.prop.parent)
) and \
fr_ is not from_ and \
pr not in self.prop._reverse_property:
other_props.append((pr, fr_))
if other_props:
util.warn(
"relationship '%s' will copy column %s to column %s, "
"which conflicts with relationship(s): %s. "
"Consider applying "
"viewonly=True to read-only relationships, or provide "
"a primaryjoin condition marking writable columns "
"with the foreign() annotation." % (
self.prop,
from_, to_,
", ".join(
"'%s' (copies %s to %s)" % (pr, fr_, to_)
for (pr, fr_) in other_props)
)
)
self._track_overlapping_sync_targets[to_][self.prop] = from_
@util.memoized_property
def remote_columns(self):
return self._gather_join_annotations("remote")
@util.memoized_property
def local_columns(self):
return self._gather_join_annotations("local")
@util.memoized_property
def foreign_key_columns(self):
return self._gather_join_annotations("foreign")
@util.memoized_property
def deannotated_primaryjoin(self):
return _deep_deannotate(self.primaryjoin)
@util.memoized_property
def deannotated_secondaryjoin(self):
if self.secondaryjoin is not None:
return _deep_deannotate(self.secondaryjoin)
else:
return None
def _gather_join_annotations(self, annotation):
s = set(
self._gather_columns_with_annotation(
self.primaryjoin, annotation)
)
if self.secondaryjoin is not None:
s.update(
self._gather_columns_with_annotation(
self.secondaryjoin, annotation)
)
return set([x._deannotate() for x in s])
def _gather_columns_with_annotation(self, clause, *annotation):
annotation = set(annotation)
return set([
col for col in visitors.iterate(clause, {})
if annotation.issubset(col._annotations)
])
def join_targets(self, source_selectable,
dest_selectable,
aliased,
single_crit=None):
"""Given a source and destination selectable, create a
join between them.
This takes into account aliasing the join clause
to reference the appropriate corresponding columns
in the target objects, as well as the extra child
criterion, equivalent column sets, etc.
"""
# place a barrier on the destination such that
# replacement traversals won't ever dig into it.
# its internal structure remains fixed
# regardless of context.
dest_selectable = _shallow_annotate(
dest_selectable,
{'no_replacement_traverse': True})
primaryjoin, secondaryjoin, secondary = self.primaryjoin, \
self.secondaryjoin, self.secondary
# adjust the join condition for single table inheritance,
# in the case that the join is to a subclass
# this is analogous to the
# "_adjust_for_single_table_inheritance()" method in Query.
if single_crit is not None:
if secondaryjoin is not None:
secondaryjoin = secondaryjoin & single_crit
else:
primaryjoin = primaryjoin & single_crit
if aliased:
if secondary is not None:
secondary = secondary.alias(flat=True)
primary_aliasizer = ClauseAdapter(secondary)
secondary_aliasizer = \
ClauseAdapter(dest_selectable,
equivalents=self.child_equivalents).\
chain(primary_aliasizer)
if source_selectable is not None:
primary_aliasizer = \
ClauseAdapter(secondary).\
chain(ClauseAdapter(
source_selectable,
equivalents=self.parent_equivalents))
secondaryjoin = \
secondary_aliasizer.traverse(secondaryjoin)
else:
primary_aliasizer = ClauseAdapter(
dest_selectable,
exclude_fn=_ColInAnnotations("local"),
equivalents=self.child_equivalents)
if source_selectable is not None:
primary_aliasizer.chain(
ClauseAdapter(source_selectable,
exclude_fn=_ColInAnnotations("remote"),
equivalents=self.parent_equivalents))
secondary_aliasizer = None
primaryjoin = primary_aliasizer.traverse(primaryjoin)
target_adapter = secondary_aliasizer or primary_aliasizer
target_adapter.exclude_fn = None
else:
target_adapter = None
return primaryjoin, secondaryjoin, secondary, \
target_adapter, dest_selectable
def create_lazy_clause(self, reverse_direction=False):
binds = util.column_dict()
equated_columns = util.column_dict()
has_secondary = self.secondaryjoin is not None
if has_secondary:
lookup = collections.defaultdict(list)
for l, r in self.local_remote_pairs:
lookup[l].append((l, r))
equated_columns[r] = l
elif not reverse_direction:
for l, r in self.local_remote_pairs:
equated_columns[r] = l
else:
for l, r in self.local_remote_pairs:
equated_columns[l] = r
def col_to_bind(col):
if (
(not reverse_direction and 'local' in col._annotations) or
reverse_direction and (
(has_secondary and col in lookup) or
(not has_secondary and 'remote' in col._annotations)
)
):
if col not in binds:
binds[col] = sql.bindparam(
None, None, type_=col.type, unique=True)
return binds[col]
return None
lazywhere = self.primaryjoin
if self.secondaryjoin is None or not reverse_direction:
lazywhere = visitors.replacement_traverse(
lazywhere, {}, col_to_bind)
if self.secondaryjoin is not None:
secondaryjoin = self.secondaryjoin
if reverse_direction:
secondaryjoin = visitors.replacement_traverse(
secondaryjoin, {}, col_to_bind)
lazywhere = sql.and_(lazywhere, secondaryjoin)
bind_to_col = dict((binds[col].key, col) for col in binds)
# this is probably not necessary
lazywhere = _deep_deannotate(lazywhere)
return lazywhere, bind_to_col, equated_columns
class _ColInAnnotations(object):
"""Seralizable equivalent to:
lambda c: "name" in c._annotations
"""
def __init__(self, name):
self.name = name
def __call__(self, c):
return self.name in c._annotations
| [
"[email protected]"
] | |
4868a3b1aa639cd755689a4481b1b90f1b278bc3 | 138bf6981ade36858e94074597daf6483083de63 | /MethodsRanker.py | b0b19811d7cdbe23f5334167ef74fbd2729c792e | [] | no_license | muralikrishnasn/BGSKdash | 7c11921989957ff4ee85791bc3ba705d8b5c8439 | a7774be2c77f467e55409d4e9b00fd9d9c74a49b | refs/heads/master | 2021-04-15T17:36:20.317213 | 2020-12-12T09:06:37 | 2020-12-12T09:06:37 | 126,199,598 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,744 | py | #!/usr/bin/env python
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
#AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
#IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
#DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
#FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
#DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
#SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
#CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
#OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
#OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from Result import Result, createResultsFromMethod, createResultsFromFolder
from resultsCalculator import videoResults2categoryResults
__author__ = "Nil Goyette"
__copyright__ = "Copyright 2012, Universite de Sherbrooke"
def main():
# Get the results from gmm*.txt file in the gmm folder
rawResults = createResultsFromMethod('C:\\Users\\Nil\\Desktop\\Results', 'gmm')
# Get the results from all txt files in the gmm folder
# rawResults = createResultsFromFolder('C:\\Users\\Nil\\Desktop\\Results\\gmm')
# Convert the results to Category Results, which I can sort
stats = videoResults2categoryResults(rawResults)
l = []
for method, result in stats['Overall'].items():
l.append( (result.avgOverallRanking, method) )
l.sort()
for t in l: # If you have a lot of results, replace with l[:15] or any low number
print(t)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
d60b8848647073fb6debacd86e0751ebbc55838b | 0a21d5e72b4afbabcbf4ec0d65ea84cd8d6159c7 | /Python/899_orderly-queue.py | 0ed36e03d5f37b04e0c9fa9b063527040a5e06b2 | [] | no_license | LuoJiaji/LeetCode-Demo | 193f27ba36c93f9030435874a145c63a81d3c0f8 | 78e6e87c01848a1dc71b7dc0716029ece5f35863 | refs/heads/master | 2020-06-24T03:03:33.366537 | 2020-04-05T02:09:41 | 2020-04-05T02:09:41 | 198,830,590 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 561 | py | class Solution(object):
def orderlyQueue(self, S, K):
"""
:type S: str
:type K: int
:rtype: str
"""
if K >= 2:
s = sorted(S)
s = ''.join(s)
# print(s)
return s
else:
s_min = S
for _ in range(len(S)):
S = S[1:] + S[0]
s_min = min(s_min, S)
return s_min
S = "cba"; K = 1
res = Solution().orderlyQueue(S, K)
print(res)
S = "baaca"; K = 3
res = Solution().orderlyQueue(S, K)
print(res)
| [
"[email protected]"
] | |
67a0640150e639d50308878ea58580cff5f9bc62 | eeaf464197e3fab5599334bfaf4b31cd3883b2d0 | /ejercicio12.py | 3a8b9d9d5f7927ae1ee61b57c10efbe99bb23167 | [] | no_license | henry2023/ejercicios | d96231a4d06c2878d92e75ce1398505ed56c79f6 | 279c67a004233d4b47cb87873c3ed9b190cd3dd7 | refs/heads/main | 2023-08-14T06:15:02.715395 | 2021-09-27T22:18:46 | 2021-09-27T22:18:46 | 406,185,241 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 106 | py | c = float(input("costo unitario: "))
d = int(input("numero de docenas: "))
print("Monto a pagar: ",d*12*c) | [
"[email protected]"
] | |
18af37301aaae47db6b2794060444127633d9f54 | 42dedcc81d5dc9a61a79dbcea9bdd7363cad97be | /figures/fig_04/save_A1_cwas+global_pysurfer_easythresh.py | b50fac031ecfaf56259988e8e0361a0addb8352a | [] | no_license | vishalmeeni/cwas-paper | 31f4bf36919bba6caf287eca2abd7b57f03d2c99 | 7d8fe59e68bc7c242f9b3cfcd1ebe6fe6918225c | refs/heads/master | 2020-04-05T18:32:46.641314 | 2015-09-02T18:45:10 | 2015-09-02T18:45:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,165 | py | #!/usr/bin/env python
import sys
sys.path.append("/home2/data/Projects/CWAS/share/lib/surfwrap")
import os
from os import path
#from surfwrap import SurfWrap
import numpy as np
import nibabel as nib
from newsurf import *
###
# Setup
strategy = "compcor"
scans = ["short", "medium"]
hemis = ["lh", "rh"]
cbarfile = "/home2/data/Projects/CWAS/share/lib/surfwrap/colorbars/red-yellow.txt"
study = "iq"
print "strategy: %s; scans: %s" % (strategy, ",".join(scans))
basedir = "/home2/data/Projects/CWAS/nki/cwas"
# Distance Directory
kstr = "kvoxs_fwhm08_to_kvoxs_fwhm08"
dirname = "%s_%s" % (strategy, kstr)
distdirs = [ path.join(basedir, scan, dirname) for scan in scans ]
# MDMR Directories
mname = "iq_age+sex+meanFD+meanGcor.mdmr"
cname = "cluster_correct_v05_c05"
factor = "FSIQ"
# Input pfile
mdmrdirs = [ path.join(distdir, mname) for distdir in distdirs ]
pfiles = [ path.join(mdmrdir, cname, "easythresh", "thresh_zstat_%s.nii.gz" % factor) for mdmrdir in mdmrdirs ]
# Intermediate surface files
easydirs = [ path.join(mdmrdir, cname, "easythresh") for mdmrdir in mdmrdirs ]
for easydir in easydirs:
surfdir = path.join(easydir, "surfs")
if not path.exists(surfdir):
os.mkdir(surfdir)
cmd = "./x_vol2surf.py %s/zstat_%s.nii.gz %s/thresh_zstat_%s.nii.gz %s/surf_thresh_zstat_%s" % (easydir, factor, easydir, factor, surfdir, factor)
print cmd
#os.system(cmd)
sfiles = [ path.join(easydir, "surfs/surf_thresh_zstat_%s" % factor) for easydir in easydirs ]
all_sfiles = []
for sfile in sfiles:
for hemi in hemis:
all_sfiles.append("%s_%s.nii.gz" % (sfile, hemi))
# Output prefixes
obase = "/home2/data/Projects/CWAS/figures"
odir = path.join(obase, "fig_04")
if not path.exists(odir): os.mkdir(odir)
###
###
# Get minimum and maximum values across the two scans
def get_range(fname):
img = nib.load(fname)
data = img.get_data()
data_max = data.max()
if data_max == 0:
data_min = data_max
else:
data_min = data[data.nonzero()].min()
return [data_min, data_max]
print 'getting range'
ranges = np.array([ get_range(pfile) for pfile in pfiles ])
dmin = ranges.min()
dmax = ranges.max()
print 'min=%.4f; max=%.4f' % (dmin,dmax)
###
###
# Surface Viz
# Color bar
cbar = load_colorbar(cbarfile)
for i,sfile in enumerate(sfiles):
print sfile
oprefix = path.join(odir, "A_IQ+global_easythresh_pysurfer_scan%i" % (i+1))
for hemi in hemis:
surf_data = io.read_scalar_data("%s_%s.nii.gz" % (sfile, hemi))
brain = fsaverage(hemi)
brain = add_overlay(study, brain, surf_data, cbar,
dmin, dmax, "pos")
save_imageset(brain, oprefix, hemi)
montage(oprefix, compilation='box')
montage(oprefix, compilation='horiz')
###
for i,pfile in enumerate(pfiles):
print pfile
oprefix = path.join(odir, "B_global_easythresh_surface_scan%i" % (i+1))
sw = SurfWrap(name=factor, infile=pfile, cbar="red-yellow",
outprefix=oprefix)
sw.min = dmin; sw.max = dmax
sw.run(compilation="box")
sw.montage(compilation="stick")
| [
"[email protected]"
] | |
1c4ed1090065f2ee2276387bc10be0ac903da2f4 | 7a19312d8b509baa9015432ab4004ffadbcdfd69 | /app.py | a5bfbabaf024206fd13b45f62f216aebb51de7fb | [] | no_license | eytorinn/lokaverk | 5d70f9bcaa830671518e9c5e915c3f03710e0813 | 9eb67f455d1bcb326732e48efde01d7023567588 | refs/heads/master | 2020-09-30T05:21:11.245084 | 2019-12-10T20:59:59 | 2019-12-10T20:59:59 | 227,214,443 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,381 | py |
import os
from flask import Flask, flash, redirect, render_template, request, url_for, make_response, escape, session, abort
import pymysql
app = Flask(__name__)
app.secret_key = os.urandom(12)
print(os.urandom(12))
conn = pymysql.connect(host='localhost', port=3306, user='root', password='root', database='lokaverkefni')
@app.route('/', methods=['GET', 'POST'])
def login():
error = None
if request.method == 'POST':
ui = request.form.get('userID')
psw = request.form.get('user_password')
conn = pymysql.connect(host='localhost', port=3306, user='root', password='root', database='lokaverkefni')
cur = conn.cursor()
cur.execute("SELECT * FROM lokaverkefni.users where userID=%s AND user_password=%s",(ui,psw))
result = cur.fetchone() #fáum tuple -fetchone
session['user'] = ui
# er user og psw til í db?
if ui == 'admin' and result[3] == psw:
cur.close()
conn.close()
flash('Innskráning tókst ')
session['logged_in'] = True
return redirect(url_for('homeAD',ui=ui))
elif result:
cur.close()
conn.close()
flash('Innskráning tókst ')
session['logged_in'] = True
return redirect(url_for('home',ui=ui))
else:
error = 'Innskráning mistókst - reyndu aftur'
return render_template('innskraning.tpl', error=error)
@app.route("/logout")
def logout():
session['logged_in'] = False
return render_template('logout.tpl')
@app.route('/nyskra', methods=['GET', 'POST'])
def nyr():
error = None
if request.method == 'POST':
userDetails = request.form
user = userDetails['userID']
name = userDetails['user_name']
email = userDetails['user_email']
password = userDetails['user_password']
try:
cur = conn.cursor()
cur.execute("INSERT INTO lokaverkefni.users(userID, user_name, user_email, user_password) VALUES(%s, %s, %s, %s)",(user, name, email, password))
conn.commit()
print(cur)
cur.close()
flash('Nýskráning tókst! Skráðu þig inn ')
return render_template('innskraning.tpl',name=name)
except pymysql.IntegrityError:
error = 'Notandi er þegar skráður með þessu nafni og/eða lykilorði'
return render_template('nyskraning.tpl', error=error)
@app.route('/homePage')
def home():
if 'logged_in' in session:
conn = pymysql.connect(host='localhost', port=3306, user='root', password='root', database='lokaverkefni')
cur = conn.cursor()
resultValue = cur.execute("SELECT * FROM lokaverkefni.posts;")
if resultValue > 0:
userDetails = cur.fetchall()
flash('Velkomin')
return render_template('homePage.tpl',userDetails=userDetails)
@app.route('/home_ad')
def homeAD():
if 'logged_in' in session:
conn = pymysql.connect(host='localhost', port=3306, user='root', password='root', database='lokaverkefni')
cur = conn.cursor()
users = cur.execute("SELECT * FROM lokaverkefni.users;")
if users:
users = cur.fetchall()
flash('Velkomin')
return render_template('home_ad.tpl',users=users,ui=session['user'])
@app.route('/new_post', methods=['GET', 'POST'])
def blog():
if 'logged_in' in session:
msg = ''
if request.method == 'POST' and 'postur' in request.form and 'userID' in request.form:
postID = request.form.get('postID')
postur = request.form.get('postur')
userID = request.form.get('userID')
cur = conn.cursor()
cur.execute("SELECT * FROM lokaverkefni.posts where userID = %s", (userID))
blogs = cur.fetchone()
if blogs:
cur.execute("INSERT INTO lokaverkefni.posts VALUES(%s,%s,%s)", (postID, postur, userID))
conn.commit()
cur.close()
msg = 'You wrote the blog!'
else:
msg = 'The blog already exist'
cur = conn.cursor()
cur.execute("SELECT * FROM lokaverkefni.posts")
blogs = cur.fetchall()
return render_template('blog.tpl', msg=msg, ui=session['user'])
return redirect(url_for('home'))
@app.route('/change', methods=['GET', 'POST'])
def edit():
if not session.get('logged_in'):
return render_template('innskraning.tpl')
else:
session['logged_in'] = True
error = None
if request.method == 'POST':
ui = request.form.get('userID')
try:
cur = conn.cursor()
userPosts = cur.execute("SELECT * FROM lokaverkefni.posts WHERE userID=%s", (ui))
if userPosts > 0:
userPosts = cur.fetchall()
flash('Veldu póstnúmer ')
print(ui)
return render_template('change.tpl', userPosts=userPosts, ui=ui)
except pymysql.IntegrityError:
error = 'Þú hefur ekki aðgang að þessari síðu'
#return render_template('logout.tpl')
@app.route('/changePost/<int:id>', methods=['GET', 'POST'])
def editpost(id):
if not session.get('logged_in'):
return render_template('innskraning.tpl')
else:
session['logged_in'] = True
try:
conn = pymysql.connect(host='localhost', port=3306, user='root', password='root', database='lokaverkefni')
cur = conn.cursor()
cur.execute("SELECT * FROM lokaverkefni.posts WHERE postID=%s", id)
conn.commit()
postur = cur.fetchall() # fáum gögnin í "tuple"
print(id)
if postur:
return render_template('changePost.tpl', postur=postur)
else:
return 'Villa! Póstur #{id} er ekki til'
finally:
cur.close()
conn.close()
@app.route('/update/', methods=['GET', 'POST'])
def post():
if not session.get('logged_in'):
return render_template('index.tpl')
else:
session['logged_in'] = True
pi = request.form.get('postID')
po = request.form.get('postur')
ui = request.form.get('userID')
button = request.form.get('breyta')
# input VALUE = Breyta else Eyða
if button == 'Breyta':
conn = pymysql.connect(host='localhost', port=3306, user='root', passwd='root', db='lokaverkefni')
cur = conn.cursor()
cur.execute("UPDATE lokaverkefni.posts SET postur=%s WHERE postID=%s AND userID=%s", (po, pi, ui))
conn.commit()
print(cur)
cur.close()
conn.close()
flash('Póstinum hefur< verið breytt ')
session['logged_in'] = True
return render_template('blog.tpl', ui=ui)
# return redirect(url_for('user',ui=ui))
else:
conn = pymysql.connect(host='localhost', port=3306, user='root', passwd='root', db='lokaverkefni')
cur = conn.cursor()
cur.execute("Delete FROM lokaverkefni.posts WHERE postID=%s", (pi))
conn.commit()
cur.close()
conn.close()
flash('Póstinum hefur verið eytt úr gagnagrunninum ')
# return redirect(url_for('user',ui=ui))
return render_template('blog.tpl', ui=ui)
if __name__ == '__main__':
app.run(debug=True)
# app.run(host='0.0.0.0')
| [
"[email protected]"
] | |
77428b4cae22fb06075246b610647ee94a08229b | e6abffb7afc1a7dc264974b21d759d53c53e3704 | /Proyecto/Util/FlaskServer.py | 230e83dbe886c9085b14705dddf13b9faff3c3fe | [] | no_license | AlexP97/Microservicios | bf9aab449dfa20afadc0f59722e25ac4f9619b05 | 662be2177248b725ee9493e5107261787c5e5348 | refs/heads/master | 2020-03-23T13:31:07.955241 | 2018-07-19T19:35:46 | 2018-07-19T19:35:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 288 | py | from flask import request
def shutdown_server():
"""
Funcion que para el servidor web
:raise RuntimeError:
"""
func = request.environ.get('werkzeug.server.shutdown')
if func is None:
raise RuntimeError('Not running with the Werkzeug Server')
func()
| [
"[email protected]"
] | |
eef50aee18acf71d992d3a49bb87f06c6a487227 | 811c674d3b77a7bb161c98d365fa5fc1645d7eca | /Ejercicios/Week3/imports/b/c/z.py | a4292d52523f169bd69c2a7c44b4b745b4423dff | [] | no_license | Adriagallardo/adria_gallardo_thebridge_ds | 8925f08c50ffd4f2990035e7033262a8510a4f06 | 7ba34f12a53ea9b41425d270a026b19090a20633 | refs/heads/main | 2023-06-25T11:13:57.436048 | 2021-07-29T10:46:37 | 2021-07-29T10:46:37 | 361,670,370 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 267 | py | import sys, os
ruta= __file__
print(ruta)
for i in range(3):
ruta= os.path.dirname(ruta)
print(ruta)
sys.path
sys.path.append(ruta)
import a.x as x
import b.y as y
def f1z():
print("f1z")
y.f1z
def f2z():
print("f2z")
x.f2x()
z = 3
z_z = 33
| [
"[email protected]"
] | |
fd67bb3eb27cb61ca670ea0a0bef7798d885a136 | d34391683da30dc5a58af43462774a9e90871bab | /particle_reading.py | 992a95732c98e3fbe881a2398754d575d4759aa5 | [] | no_license | davidrball/reconnection_plotting | d3dcb648002ab66df5406a00f684ce0cae2c25e2 | 8eb03eb056301236b65f7a92185cbed3891c2147 | refs/heads/master | 2020-09-12T18:52:48.297571 | 2019-11-18T18:28:51 | 2019-11-18T18:28:51 | 222,516,389 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,027 | py | import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
from tristan_funcs import get_val_filename, load_particle
import h5py
import matplotlib.patches as patches
plt.rcParams['mathtext.fontset'] = 'stix'
plt.rcParams['font.family'] = 'STIXGeneral'
plt.rcParams.update({'font.size': 10})
partnumstart = 6000
partnumstop = 6002
mypath = 'particle_txt/sig.3_delgam0005/bguide.3_earlytime/'
c_omp = 3
istep = 12
sigma=.3
va = np.sqrt(sigma/(1+sigma))
t_start = 1
t_stop = 25
fld_base = "../../tristan-mp_reconnection/guide_fields/sig.3/delgam0005/bguide.3/output/flds.tot."
for partnum in range(partnumstart, partnumstop):
d = load_particle(mypath, partnum)
t_list = d['time']
#print(t_list)
x_list = np.array(d['x'])
y_list = np.array(d['y'])
u_list = np.array(d['u'])
v_list = np.array(d['v'])
w_list = np.array(d['w'])
bx_list = np.array(d['bx'])
by_list = np.array(d['by'])
bz_list = np.array(d['bz'])
ex_list = np.array(d['ex'])
ey_list = np.array(d['ey'])
ez_list = np.array(d['ez'])
gamma_list = np.array(d['gamma'])
#print(gamma_list)
maxgam = max(gamma_list)
mingam = min(gamma_list)
#print(t_list)
for t in range(t_start, t_stop):
tstr = str(t)
print(tstr)
if len(tstr) == 2:
myfld = fld_base + "0"
elif len(tstr) == 1:
myfld = fld_base + "00"
myfld += tstr
print(myfld)
f_fld = h5py.File(myfld)
#pick out prtl values at the correct time:
for index in range(len(t_list)):
if t_list[index] == t:
i = index
break
#to extract particle values at this time, use [index]
x = d['x'][i]
y = d['y'][i]
u = d['u'][i]
v = d['v'][i]
w = d['w'][i]
bx = d['bx'][i]
by = d['by'][i]
bz = d['bz'][i]
ex = d['ex'][i]
ey = d['ey'][i]
ez = d['ez'][i]
gamma = d['gamma'][i]
gamcol = np.log10(gamma/maxgam) / np.log10(mingam/maxgam)
print(gamcol)
intx = int(x)
inty = int(y)
intx /= istep
inty /= istep
dens = np.array(get_val_filename("dens",f_fld))[0]
fig = plt.figure()
ax1 = plt.subplot2grid((1,2),(0,0),rowspan=1,colspan=1)
ax2 = plt.subplot2grid((1,2),(0,1),colspan=1)
xhlf = np.shape(dens)[1]/2
xscan = 100
xlow = int(xhlf - xscan)
xup = int(xhlf + xscan)
xext = istep*2*xscan / c_omp # in electron skin depths
yext = istep * np.shape(dens)[0] /c_omp
ax1.imshow(dens[:,xlow:xup],origin='lower',vmax=20, extent=[0,xext,0,yext])
ax1.set_xlabel('$c/\omega_{p}$',size=14)
ax1.set_ylabel('$c/\omega_{p}$',size=14)
ax2.plot(t_list[:i],gamma_list[:i],color="Red",label="$\gamma$")
#ax2.scatter(t,gamma,color="Red")
plt.legend(loc='lower left',prop={'size':12},frameon=False)
ax2.set_ylim(1e0,1e4)
ax2.set_yscale('log')
ax2.set_xlim(0,70)
ax2.set_xlabel('Time $(300\omega_{p}^{-1})$')
ax2.set_ylabel('$\gamma$')
ax3 = ax2.twinx()
ax3.plot(t_list[:i], ez_list[:i]/(va*np.sqrt(bx_list[:i]**2+by_list[:i]**2)), color = "Blue",label="$E_{z}/B_{xy}$")
ax3.set_ylim(-3,3)
#ax3.set_ylim(0,4)
ax3.set_ylabel('$E_{z}/(V_{a} B_{xy})$')
ax3.set_xlim(t_start-1, t_stop+1)
intx -= xlow
intx *= istep/c_omp
inty *= istep/c_omp
circle = patches.Circle((intx,inty),radius=20,fill=False,color=(gamcol, 1-gamcol,1))
plt.tight_layout()
ax1.add_patch(circle)
plt.legend(loc='lower right',prop={'size':12},frameon=False)
plt.savefig('plots/particle_plots/sig.3_delgam0005/bguide.3_earlytime/'+'E'+str(partnum)+'_'+str(t)+'.png',bbox_inches='tight',dpi=300)
f_fld.close()
plt.close()
| [
"[email protected]"
] | |
41d9f41f0768ef28ba15605b72de18bac5527208 | ad28505bf34ecb6b64c4f7cbcda431b35f3b4de4 | /SVC1_binary_fist.py | a9b545f596dbd83d76607d377d0ed98e36018520 | [] | no_license | zuchermann/skywalkerSVM | 88c29f4e4755100a606ea484e4b17f3f7cf7844e | 2b7d9b8ebe6656034c0a07c88f85c0ca5ad49739 | refs/heads/master | 2021-07-13T14:25:58.511694 | 2017-10-15T18:25:34 | 2017-10-15T18:25:34 | 104,824,809 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,081 | py | import random
from util import *
from sklearn import svm, metrics
from PIL import Image
##importing images
images, data = get_image_h5("./../data/with_brace/sonostar/binary_fist_small") # get images
images, shape = downsample_images(images)
##convert label data to two classes
fist_to_binary(data)
##shuffle and split into training/test sets
indexes = [i for i in range(len(images))]
random.shuffle(indexes)
train_prop = 0.25
train_images, test_images, train_data, test_data = split_sets(images, data, indexes, train_prop)
##test display image
#image = Image.fromarray(train_images[0].reshape(shape))
#image.show()
## Create a classifier: a support vector classifier
classifier = svm.SVC(gamma=0.000001)
classifier.fit(train_images, train_data)
## Predict fist open vs closed based on remaining images
expected = test_data
predicted = classifier.predict(test_images)
print("Classification report for classifier %s:\n%s\n"
% (classifier, metrics.classification_report(expected, predicted)))
print("Confusion matrix:\n%s" % metrics.confusion_matrix(expected, predicted)) | [
"[email protected]"
] | |
1190c7b70e65c07abc408fbb78766afb0e31a44b | 958033b337e44d9a862b216f739eca51422bf6ef | /deep-learning-nanodegree/first-neural-network/Your_first_neural_network.py | 47ed3eee2426f488c937cee7425d1d56686961ca | [] | no_license | zhongdesen/Udacity | c7756532ab790522d39d97e32deab142bf72f123 | 842e2bad0351bfb185cd9053c5be2a86ee44b46f | refs/heads/master | 2022-10-18T22:14:33.447859 | 2018-08-22T02:29:59 | 2018-08-22T02:29:59 | 145,640,473 | 0 | 1 | null | 2022-10-10T12:00:02 | 2018-08-22T01:42:22 | Jupyter Notebook | UTF-8 | Python | false | false | 16,640 | py |
# coding: utf-8
# # 你的第一个神经网络
#
# 在此项目中,你将构建你的第一个神经网络,并用该网络预测每日自行车租客人数。我们提供了一些代码,但是需要你来实现神经网络(大部分内容)。提交此项目后,欢迎进一步探索该数据和模型。
# In[1]:
get_ipython().magic('matplotlib inline')
get_ipython().magic("config InlineBackend.figure_format = 'retina'")
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# ## 加载和准备数据
#
# 构建神经网络的关键一步是正确地准备数据。不同尺度级别的变量使网络难以高效地掌握正确的权重。我们在下方已经提供了加载和准备数据的代码。你很快将进一步学习这些代码!
# In[2]:
data_path = 'Bike-Sharing-Dataset/hour.csv'
rides = pd.read_csv(data_path)
# In[3]:
rides.head()
# ## 数据简介
#
# 此数据集包含的是从 2011 年 1 月 1 日到 2012 年 12 月 31 日期间每天每小时的骑车人数。骑车用户分成临时用户和注册用户,cnt 列是骑车用户数汇总列。你可以在上方看到前几行数据。
#
# 下图展示的是数据集中前 10 天左右的骑车人数(某些天不一定是 24 个条目,所以不是精确的 10 天)。你可以在这里看到每小时租金。这些数据很复杂!周末的骑行人数少些,工作日上下班期间是骑行高峰期。我们还可以从上方的数据中看到温度、湿度和风速信息,所有这些信息都会影响骑行人数。你需要用你的模型展示所有这些数据。
# In[4]:
rides[:24*10].plot(x='dteday', y='cnt')
# ### 虚拟变量(哑变量)
#
# 下面是一些分类变量,例如季节、天气、月份。要在我们的模型中包含这些数据,我们需要创建二进制虚拟变量。用 Pandas 库中的 `get_dummies()` 就可以轻松实现。
# In[5]:
dummy_fields = ['season', 'weathersit', 'mnth', 'hr', 'weekday']
for each in dummy_fields:
dummies = pd.get_dummies(rides[each], prefix=each, drop_first=False)
rides = pd.concat([rides, dummies], axis=1)
fields_to_drop = ['instant', 'dteday', 'season', 'weathersit',
'weekday', 'atemp', 'mnth', 'workingday', 'hr']
data = rides.drop(fields_to_drop, axis=1)
data.head()
# ### 调整目标变量
#
# 为了更轻松地训练网络,我们将对每个连续变量标准化,即转换和调整变量,使它们的均值为 0,标准差为 1。
#
# 我们会保存换算因子,以便当我们使用网络进行预测时可以还原数据。
# In[6]:
quant_features = ['casual', 'registered', 'cnt', 'temp', 'hum', 'windspeed']
# Store scalings in a dictionary so we can convert back later
scaled_features = {}
for each in quant_features:
mean, std = data[each].mean(), data[each].std()
scaled_features[each] = [mean, std]
data.loc[:, each] = (data[each] - mean)/std
# ### 将数据拆分为训练、测试和验证数据集
#
# 我们将大约最后 21 天的数据保存为测试数据集,这些数据集会在训练完网络后使用。我们将使用该数据集进行预测,并与实际的骑行人数进行对比。
# In[7]:
# Save data for approximately the last 21 days
test_data = data[-21*24:]
# Now remove the test data from the data set
data = data[:-21*24]
# Separate the data into features and targets
target_fields = ['cnt', 'casual', 'registered']
features, targets = data.drop(target_fields, axis=1), data[target_fields]
test_features, test_targets = test_data.drop(target_fields, axis=1), test_data[target_fields]
# 我们将数据拆分为两个数据集,一个用作训练,一个在网络训练完后用来验证网络。因为数据是有时间序列特性的,所以我们用历史数据进行训练,然后尝试预测未来数据(验证数据集)。
# In[8]:
# Hold out the last 60 days or so of the remaining data as a validation set
train_features, train_targets = features[:-60*24], targets[:-60*24]
val_features, val_targets = features[-60*24:], targets[-60*24:]
# ## 开始构建网络
#
# 下面你将构建自己的网络。我们已经构建好结构和反向传递部分。你将实现网络的前向传递部分。还需要设置超参数:学习速率、隐藏单元的数量,以及训练传递数量。
#
# <img src="assets/neural_network.png" width=300px>
#
# 该网络有两个层级,一个隐藏层和一个输出层。隐藏层级将使用 S 型函数作为激活函数。输出层只有一个节点,用于递归,节点的输出和节点的输入相同。即激活函数是 $f(x)=x$。这种函数获得输入信号,并生成输出信号,但是会考虑阈值,称为激活函数。我们完成网络的每个层级,并计算每个神经元的输出。一个层级的所有输出变成下一层级神经元的输入。这一流程叫做前向传播(forward propagation)。
#
# 我们在神经网络中使用权重将信号从输入层传播到输出层。我们还使用权重将错误从输出层传播回网络,以便更新权重。这叫做反向传播(backpropagation)。
#
# > **提示**:你需要为反向传播实现计算输出激活函数 ($f(x) = x$) 的导数。如果你不熟悉微积分,其实该函数就等同于等式 $y = x$。该等式的斜率是多少?也就是导数 $f(x)$。
#
#
# 你需要完成以下任务:
#
# 1. 实现 S 型激活函数。将 `__init__` 中的 `self.activation_function` 设为你的 S 型函数。
# 2. 在 `train` 方法中实现前向传递。
# 3. 在 `train` 方法中实现反向传播算法,包括计算输出错误。
# 4. 在 `run` 方法中实现前向传递。
#
#
# In[9]:
class NeuralNetwork(object):
def __init__(self, input_nodes, hidden_nodes, output_nodes, learning_rate):
# Set number of nodes in input, hidden and output layers.
self.input_nodes = input_nodes
self.hidden_nodes = hidden_nodes
self.output_nodes = output_nodes
# Initialize weights
self.weights_input_to_hidden = np.random.normal(0.0, self.input_nodes**-0.5,
(self.input_nodes, self.hidden_nodes))
self.weights_hidden_to_output = np.random.normal(0.0, self.hidden_nodes**-0.5,
(self.hidden_nodes, self.output_nodes))
self.lr = learning_rate
def sigmoid(x):
return 1 / (1 + np.exp(-x)) # Replace 0 with your sigmoid calculation here
self.activation_function = sigmoid
def train(self, features, targets):
''' Train the network on batch of features and targets.
Arguments
---------
features: 2D array, each row is one data record, each column is a feature
targets: 1D array of target values
'''
n_records = features.shape[0]
delta_weights_i_h = np.zeros(self.weights_input_to_hidden.shape)
delta_weights_h_o = np.zeros(self.weights_hidden_to_output.shape)
for X, y in zip(features, targets):
#### Implement the forward pass here ####
### Forward pass ###
# TODO: Hidden layer - Replace these values with your calculations.
hidden_inputs = np.dot(X, self.weights_input_to_hidden) # signals into hidden layer
hidden_outputs = self.activation_function(hidden_inputs) # signals from hidden layer
# TODO: Output layer - Replace these values with your calculations.
final_inputs = np.dot(hidden_outputs, self.weights_hidden_to_output) # signals into final output layer
final_outputs = final_inputs # signals from final output layer
#### Implement the backward pass here ####
### Backward pass ###
# TODO: Output error - Replace this value with your calculations.
error = y - final_outputs # Output layer error is the difference between desired target and actual output.
# TODO: Calculate the hidden layer's contribution to the error
hidden_error = np.dot(error, self.weights_hidden_to_output.T)
# TODO: Backpropagated error terms - Replace these values with your calculations.
output_error_term = error
hidden_error_term = hidden_error * hidden_outputs * (1 - hidden_outputs)
# Weight step (input to hidden)
delta_weights_i_h += np.dot(X[:, None], hidden_error_term[:, None].T)
# Weight step (hidden to output)
delta_weights_h_o += np.dot(hidden_outputs[:, None], output_error_term[:, None])
# TODO: Update the weights - Replace these values with your calculations.
self.weights_hidden_to_output += self.lr * delta_weights_h_o / n_records# update hidden-to-output weights with gradient descent step
self.weights_input_to_hidden += self.lr * delta_weights_i_h / n_records# update input-to-hidden weights with gradient descent step
def run(self, features):
''' Run a forward pass through the network with input features
Arguments
---------
features: 1D array of feature values
'''
#### Implement the forward pass here ####
# TODO: Hidden layer - replace these values with the appropriate calculations.
hidden_inputs = np.dot(features, self.weights_input_to_hidden) # signals into hidden layer
hidden_outputs = self.activation_function(hidden_inputs) # signals from hidden layer
# TODO: Output layer - Replace these values with the appropriate calculations.
final_inputs = np.dot(hidden_outputs, self.weights_hidden_to_output) # signals into final output layer
final_outputs = final_inputs # signals from final output layer
return final_outputs
# In[10]:
def MSE(y, Y):
return np.mean((y-Y)**2)
# ## 单元测试
#
# 运行这些单元测试,检查你的网络实现是否正确。这样可以帮助你确保网络已正确实现,然后再开始训练网络。这些测试必须成功才能通过此项目。
# In[11]:
import unittest
inputs = np.array([[0.5, -0.2, 0.1]])
targets = np.array([[0.4]])
test_w_i_h = np.array([[0.1, -0.2],
[0.4, 0.5],
[-0.3, 0.2]])
test_w_h_o = np.array([[0.3],
[-0.1]])
class TestMethods(unittest.TestCase):
##########
# Unit tests for data loading
##########
def test_data_path(self):
# Test that file path to dataset has been unaltered
self.assertTrue(data_path.lower() == 'bike-sharing-dataset/hour.csv')
def test_data_loaded(self):
# Test that data frame loaded
self.assertTrue(isinstance(rides, pd.DataFrame))
##########
# Unit tests for network functionality
##########
def test_activation(self):
network = NeuralNetwork(3, 2, 1, 0.5)
# Test that the activation function is a sigmoid
self.assertTrue(np.all(network.activation_function(0.5) == 1/(1+np.exp(-0.5))))
def test_train(self):
# Test that weights are updated correctly on training
network = NeuralNetwork(3, 2, 1, 0.5)
network.weights_input_to_hidden = test_w_i_h.copy()
network.weights_hidden_to_output = test_w_h_o.copy()
network.train(inputs, targets)
self.assertTrue(np.allclose(network.weights_hidden_to_output,
np.array([[ 0.37275328],
[-0.03172939]])))
self.assertTrue(np.allclose(network.weights_input_to_hidden,
np.array([[ 0.10562014, -0.20185996],
[0.39775194, 0.50074398],
[-0.29887597, 0.19962801]])))
def test_run(self):
# Test correctness of run method
network = NeuralNetwork(3, 2, 1, 0.5)
network.weights_input_to_hidden = test_w_i_h.copy()
network.weights_hidden_to_output = test_w_h_o.copy()
self.assertTrue(np.allclose(network.run(inputs), 0.09998924))
suite = unittest.TestLoader().loadTestsFromModule(TestMethods())
unittest.TextTestRunner().run(suite)
# ## 训练网络
#
# 现在你将设置网络的超参数。策略是设置的超参数使训练集上的错误很小但是数据不会过拟合。如果网络训练时间太长,或者有太多的隐藏节点,可能就会过于针对特定训练集,无法泛化到验证数据集。即当训练集的损失降低时,验证集的损失将开始增大。
#
# 你还将采用随机梯度下降 (SGD) 方法训练网络。对于每次训练,都获取随机样本数据,而不是整个数据集。与普通梯度下降相比,训练次数要更多,但是每次时间更短。这样的话,网络训练效率更高。稍后你将详细了解 SGD。
#
#
# ### 选择迭代次数
#
# 也就是训练网络时从训练数据中抽样的批次数量。迭代次数越多,模型就与数据越拟合。但是,如果迭代次数太多,模型就无法很好地泛化到其他数据,这叫做过拟合。你需要选择一个使训练损失很低并且验证损失保持中等水平的数字。当你开始过拟合时,你会发现训练损失继续下降,但是验证损失开始上升。
#
# ### 选择学习速率
#
# 速率可以调整权重更新幅度。如果速率太大,权重就会太大,导致网络无法与数据相拟合。建议从 0.1 开始。如果网络在与数据拟合时遇到问题,尝试降低学习速率。注意,学习速率越低,权重更新的步长就越小,神经网络收敛的时间就越长。
#
#
# ### 选择隐藏节点数量
#
# 隐藏节点越多,模型的预测结果就越准确。尝试不同的隐藏节点的数量,看看对性能有何影响。你可以查看损失字典,寻找网络性能指标。如果隐藏单元的数量太少,那么模型就没有足够的空间进行学习,如果太多,则学习方向就有太多的选择。选择隐藏单元数量的技巧在于找到合适的平衡点。
# In[15]:
import sys
### Set the hyperparameters here ###
iterations = 20000
learning_rate = 0.09
hidden_nodes = 30
output_nodes = 1
N_i = train_features.shape[1]
network = NeuralNetwork(N_i, hidden_nodes, output_nodes, learning_rate)
losses = {'train':[], 'validation':[]}
for ii in range(iterations):
# Go through a random batch of 128 records from the training data set
batch = np.random.choice(train_features.index, size=128)
X, y = train_features.ix[batch].values, train_targets.ix[batch]['cnt']
network.train(X, y)
# Printing out the training progress
train_loss = MSE(network.run(train_features).T, train_targets['cnt'].values)
val_loss = MSE(network.run(val_features).T, val_targets['cnt'].values)
sys.stdout.write("\rProgress: {:2.1f}".format(100 * ii/float(iterations)) + "% ... Training loss: " + str(train_loss)[:5] + " ... Validation loss: " + str(val_loss)[:5])
sys.stdout.flush()
losses['train'].append(train_loss)
losses['validation'].append(val_loss)
# In[16]:
plt.plot(losses['train'], label='Training loss')
plt.plot(losses['validation'], label='Validation loss')
plt.legend()
_ = plt.ylim()
# ## 检查预测结果
#
# 使用测试数据看看网络对数据建模的效果如何。如果完全错了,请确保网络中的每步都正确实现。
# In[17]:
fig, ax = plt.subplots(figsize=(8,4))
mean, std = scaled_features['cnt']
predictions = network.run(test_features).T*std + mean
ax.plot(predictions[0], label='Prediction')
ax.plot((test_targets['cnt']*std + mean).values, label='Data')
ax.set_xlim(right=len(predictions))
ax.legend()
dates = pd.to_datetime(rides.ix[test_data.index]['dteday'])
dates = dates.apply(lambda d: d.strftime('%b %d'))
ax.set_xticks(np.arange(len(dates))[12::24])
_ = ax.set_xticklabels(dates[12::24], rotation=45)
# ## 可选:思考下你的结果(我们不会评估这道题的答案)
#
#
# 请针对你的结果回答以下问题。模型对数据的预测效果如何?哪里出现问题了?为何出现问题呢?
#
# > **注意**:你可以通过双击该单元编辑文本。如果想要预览文本,请按 Control + Enter
#
# #### 请将你的答案填写在下方
#
| [
"[email protected]"
] | |
1a6975cd55131e7bd09d9bc4e42dd3e044eb92c9 | 3ef70fe63acaa665e2b163f30f1abd0a592231c1 | /stackoverflow/venv/lib/python3.6/site-packages/twisted/internet/pyuisupport.py | 1e7def5911854cb1cd5a6c3f5be81cc1e5bd2184 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | wistbean/learn_python3_spider | 14914b63691ac032955ba1adc29ad64976d80e15 | 40861791ec4ed3bbd14b07875af25cc740f76920 | refs/heads/master | 2023-08-16T05:42:27.208302 | 2023-03-30T17:03:58 | 2023-03-30T17:03:58 | 179,152,420 | 14,403 | 3,556 | MIT | 2022-05-20T14:08:34 | 2019-04-02T20:19:54 | Python | UTF-8 | Python | false | false | 817 | py | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
This module integrates PyUI with twisted.internet's mainloop.
Maintainer: Jp Calderone
See doc/examples/pyuidemo.py for example usage.
"""
# System imports
import pyui
def _guiUpdate(reactor, delay):
pyui.draw()
if pyui.update() == 0:
pyui.quit()
reactor.stop()
else:
reactor.callLater(delay, _guiUpdate, reactor, delay)
def install(ms=10, reactor=None, args=(), kw={}):
"""
Schedule PyUI's display to be updated approximately every C{ms}
milliseconds, and initialize PyUI with the specified arguments.
"""
d = pyui.init(*args, **kw)
if reactor is None:
from twisted.internet import reactor
_guiUpdate(reactor, ms / 1000.0)
return d
__all__ = ["install"]
| [
"[email protected]"
] | |
94f40202f3e35fe04abdfa187f1ab1fe9b3f457b | 2ec67709f1308a4437a1015967682c587039e1af | /venv/bin/chardetect | 2ce458dcfdb2f9d322a1ac47f86a15563c38e23e | [] | no_license | AndreyNoMercy/social_media | f8448bad64ba0248a79270dae8ee1a9a1f52e02c | cd6ece29a8c96da0d3862cabf5ea75b7e501c7ec | refs/heads/main | 2023-03-26T21:28:23.370016 | 2021-03-30T12:28:08 | 2021-03-30T12:28:08 | 352,992,615 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 253 | #!/Users/mac/data_mining/social/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from chardet.cli.chardetect import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | ||
32d06dc49e46306b27d555cc5eb7bd40056a2c00 | ce2bc16ac803434be57c7813732c97ca0b6bd6c7 | /lab02/google-python3-exercises/babynames/solution/babynames.py | 69a94da7f52d989972e22642888455adb6cc5a73 | [] | no_license | mathana96/dev-ops | 0600b22b39d7b619d7f6e303d6d7366b068fb98e | c5eb00294bdcd4965e409b17f62e904ffd17b239 | refs/heads/master | 2021-07-15T22:01:27.180601 | 2017-10-19T17:59:32 | 2017-10-19T17:59:32 | 104,484,013 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,855 | py | #!/usr/bin/python3
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
import sys
import re
"""Baby Names exercise
Define the extract_names() function below and change main()
to call it.
For writing regex, it's nice to include a copy of the target
text for inspiration.
Here's what the html looks like in the baby.html files:
...
<h3 align="center">Popularity in 1990</h3>
....
<tr align="right"><td>1</td><td>Michael</td><td>Jessica</td>
<tr align="right"><td>2</td><td>Christopher</td><td>Ashley</td>
<tr align="right"><td>3</td><td>Matthew</td><td>Brittany</td>
...
Suggested milestones for incremental development:
-Extract the year and print it
-Extract the names and rank numbers and just print them
-Get the names data into a dict and print it
-Build the [year, 'name rank', ... ] list and print it
-Fix main() to use the extract_names list
"""
def extract_names(filename):
"""
Given a file name for baby.html, returns a list starting with the year string
followed by the name-rank strings in alphabetical order.
['2006', 'Aaliyah 91', Aaron 57', 'Abagail 895', ' ...]
"""
# +++your code here+++
# LAB(begin solution)
# The list [year, name_and_rank, name_and_rank, ...] we'll eventually return.
names = []
# Open and read the file.
f = open(filename, 'rU')
text = f.read()
# Could process the file line-by-line, but regex on the whole text
# at once is even easier.
# Get the year.
year_match = re.search(r'Popularity\sin\s(\d\d\d\d)', text)
if not year_match:
# We didn't find a year, so we'll exit with an error message.
sys.stderr.write('Couldn\'t find the year!\n')
sys.exit(1)
year = year_match.group(1)
names.append(year)
# Extract all the data tuples with a findall()
# each tuple is: (rank, boy-name, girl-name)
tuples = re.findall(r'<td>(\d+)</td><td>(\w+)</td>\<td>(\w+)</td>', text)
#print tuples
# Store data into a dict using each name as a key and that
# name's rank number as the value.
# (if the name is already in there, don't add it, since
# this new rank will be bigger than the previous rank).
names_to_rank = {}
for rank_tuple in tuples:
(rank, boyname, girlname) = rank_tuple # unpack the tuple into 3 vars
if boyname not in names_to_rank:
names_to_rank[boyname] = rank
if girlname not in names_to_rank:
names_to_rank[girlname] = rank
# You can also write:
# for rank, boyname, girlname in tuples:
# ...
# To unpack the tuples inside a for-loop.
# Get the names, sorted in the right order
sorted_names = sorted(names_to_rank.keys())
# Build up result list, one element per line
for name in sorted_names:
names.append(name + " " + names_to_rank[name])
return names
# LAB(replace solution)
# return
# LAB(end solution)
def main():
# This command-line parsing code is provided.
# Make a list of command line arguments, omitting the [0] element
# which is the script itself.
args = sys.argv[1:]
if not args:
print('usage: [--summaryfile] file [file ...]')
sys.exit(1)
# Notice the summary flag and remove it from args if it is present.
summary = False
if args[0] == '--summaryfile':
summary = True
del args[0]
# +++your code here+++
# For each filename, get the names, then either print the text output
# or write it to a summary file
# LAB(begin solution)
for filename in args:
names = extract_names(filename)
# Make text out of the whole list
text = '\n'.join(names)
if summary:
outf = open(filename + '.summary', 'w')
outf.write(text + '\n')
outf.close()
else:
print(text)
# LAB(end solution)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
37b2f9d6b52094089172c38cd1e6751779dfb331 | ec47e801c93d0e13de72884a1e4b8ad61b102b2a | /dbsession.py | 989b358dcb8c4c37bb275c8b3cebc5df02958f95 | [] | no_license | Sekitakovich/AIS5G | 37e07cd246b94f5980986a8807ec63a765831a6b | 9d531d63856331aa31bf7b6b01bcc3d195051812 | refs/heads/master | 2022-11-20T05:44:42.131046 | 2020-07-17T10:03:38 | 2020-07-17T10:03:38 | 268,015,057 | 0 | 0 | null | 2020-05-30T05:44:21 | 2020-05-30T05:24:12 | JavaScript | UTF-8 | Python | false | false | 3,313 | py | from datetime import datetime as dt
from typing import List
from contextlib import closing
import pathlib
import sqlite3
from multiprocessing import Process, Queue as MPQueue, Lock
from queue import Empty
from dataclasses import dataclass
from loguru import logger
@dataclass()
class Record(object):
sentence: bytes # NMEA asis
# passed: float # delta secs from prev
at: dt # 受信日時
class DBSession(Process):
def __init__(self, *, path: str, qp: MPQueue, timeout: int = 5, buffersize: int = 32):
super().__init__()
self.daemon = True
self.name = 'SQLite'
self.qp = qp
self.path = pathlib.Path(path) # path for *.db
self.nameformat: str = '%04d-%02d-%02d.db'
self.dateformat: str = '%Y-%m-%d %H:%M:%S.%f'
self.locker = Lock()
self.counter: int = 0
self.lastat: dt = dt.now()
self.timeout = timeout
self.buffer: List[Record] = []
self.buffersize = buffersize
self.schema = 'CREATE TABLE "sentence" ( \
"id" INTEGER NOT NULL DEFAULT 0 PRIMARY KEY AUTOINCREMENT, \
"at" TEXT NOT NULL DEFAULT \'\', \
"ds" REAL NOT NULL DEFAULT 0.0, \
"nmea" TEXT NOT NULL DEFAULT \'\' \
)'
def create(self, *, cursor: sqlite3.Cursor):
cursor.execute(self.schema)
def append(self, *, at: dt):
with self.locker: # 念の為
rows = self.buffer.copy()
self.buffer.clear()
passed = (at-self.lastat).total_seconds()
name = self.nameformat % (at.year, at.month, at.day)
file = self.path / name # pathlib
exists = file.exists()
with closing(sqlite3.connect(str(file))) as db:
cursor = db.cursor()
if exists is False:
self.create(cursor=cursor)
for ooo in rows:
query = 'insert into sentence(at,ds,nmea) values(?,?,?)'
cursor.execute(query, [ooo.at.strftime(self.dateformat), passed, ooo.sentence])
logger.debug('+++ %d records were saved to %s' % (len(rows), file))
cursor.close()
db.commit() # never forget
def run(self) -> None:
while True:
try:
raw: bytes = self.qp.get(timeout=self.timeout)
self.counter += 1
except Empty as e:
if len(self.buffer):
logger.debug('!!! saved %d cause timeout' % len(self.buffer))
self.append(at=self.lastat)
self.lastat = dt.now()
except KeyboardInterrupt as e:
self.append(at=self.lastat)
logger.error(e)
break
else:
now = dt.now()
if now.day != self.lastat.day:
self.append(at=self.lastat)
logger.debug('just in today')
record = Record(sentence=raw, at=now, passed=(now - self.lastat).total_seconds())
self.buffer.append(record)
if len(self.buffer) == self.buffersize:
self.append(at=now)
self.lastat = now
| [
"[email protected]"
] | |
573c13844654553a89dfe7f3a802ab62bfb20452 | efdc190e08c5ae51cbc3fe7345df252e17454890 | /sql_injection_escape.py | 69f05a2312ec1b7d11d48eea702d05e0009e95d8 | [
"BSD-3-Clause"
] | permissive | dafna972/WAF | bc15aef5ded69e029a8f690a6eb2d83d5b01bee2 | 8339f493dc202fcbf3660ba936621090934b1443 | refs/heads/master | 2020-04-25T12:46:27.829706 | 2019-02-26T20:53:47 | 2019-02-26T20:53:47 | 172,788,778 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 461 | py | def sql_escape(req_body):
# escape all suspicious chars (regarding SQLi)
return req_body.replace("%27", "\\%27")\
.replace("%00", "\\%00")\
.replace("%08", "\\%08")\
.replace("%09", "\\%09")\
.replace("%0a", "\\%0a")\
.replace("%0d", "\\%0d")\
.replace("%1a", "\\%1a")\
.replace("%22", "\\%22")\
.replace("%25", "\\%25")\
.replace("%5c", "\\%5c")\
.replace("%5f", "\\%5f")
| [
"[email protected]"
] | |
644a87cc993ec601656954004027ab4fbed81f22 | 6e141361fc9268240d6a1d0a4ae7fb880adffb07 | /tests/unit/streamalert_cli/athena/test_helpers.py | 61294b4f8a3f40a029c028094621d89610a0ca9d | [
"Apache-2.0"
] | permissive | bellyfat/streamalert | 3e5ed4ab39f17eb7bda11a18f8c2d657146d4d4c | a0a284c6bfdb9de40e76fc7581627b5863445cc0 | refs/heads/master | 2022-12-01T05:46:26.929911 | 2020-04-10T23:10:48 | 2020-04-10T23:10:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,476 | py | """
Copyright 2017-present Airbnb, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from mock import patch
from nose.tools import assert_equal, assert_true
from streamalert_cli.athena import helpers
from streamalert_cli.config import CLIConfig
from streamalert.classifier.clients import FirehoseClient
CONFIG = CLIConfig(config_path='tests/unit/conf')
def test_generate_athena_schema_simple():
"""CLI - Generate Athena schema: simple"""
log_schema = CONFIG['logs']['unit_test_simple_log']['schema']
athena_schema = helpers.logs_schema_to_athena_schema(log_schema)
expected_athena_schema = {
'`unit_key_01`': 'bigint',
'`unit_key_02`': 'string'
}
assert_equal(athena_schema, expected_athena_schema)
def test_generate_athena_schema_special_key():
"""CLI - Generate Athena schema: special key"""
log_schema = CONFIG['logs']['test_log_type_json']['schema']
athena_schema = helpers.logs_schema_to_athena_schema(log_schema)
expected_athena_schema = {
'`key1`': 'array<string>',
'`key2`': 'string',
'`key3`': 'bigint',
'`key9`': 'boolean',
'`key10`': 'map<string,string>',
'`key11`': 'decimal(10,3)'
}
assert_equal(athena_schema, expected_athena_schema)
def test_generate_athena_schema_nested():
"""CLI - Generate Athena schema: nested"""
log_schema = CONFIG['logs']['test_log_type_json_nested_with_data']['schema']
athena_schema = helpers.logs_schema_to_athena_schema(log_schema)
expected_athena_schema = {
'`date`': 'string',
'`unixtime`': 'bigint',
'`host`': 'string',
'`application`': 'string',
'`environment`': 'string',
'`data`': {
'`category`': 'string',
'`type`': 'bigint',
'`source`': 'string'
}
}
assert_equal(athena_schema, expected_athena_schema)
def test_add_partition_statements():
"""CLI - Athena Add Partition Statement"""
partitions = {
'dt=2017-12-01-01',
'dt=2016-12-01-02',
'dt=2018-12-01-05',
'dt=2013-12-01-04',
}
expected_result = ("ALTER TABLE test ADD IF NOT EXISTS "
"PARTITION (dt = '2013-12-01-04') "
"LOCATION 's3://bucket/test/2013/12/01/04' "
"PARTITION (dt = '2016-12-01-02') "
"LOCATION 's3://bucket/test/2016/12/01/02' "
"PARTITION (dt = '2017-12-01-01') "
"LOCATION 's3://bucket/test/2017/12/01/01' "
"PARTITION (dt = '2018-12-01-05') "
"LOCATION 's3://bucket/test/2018/12/01/05'")
results = helpers.add_partition_statements(partitions, 'bucket', 'test')
results_copy = list(results)
assert_equal(len(results_copy), 1)
assert_equal(results_copy[0], expected_result)
@patch.object(helpers, 'MAX_QUERY_LENGTH', 256)
def test_add_partition_statements_exceed_length():
"""CLI - Athena Add Partition Statement when statement exceed max query length"""
partitions = {
'dt=2017-12-01-01',
'dt=2016-12-01-02',
'dt=2018-12-01-05',
'dt=2013-12-01-04',
}
results = helpers.add_partition_statements(partitions, 'bucket', 'test')
results_copy = list(results)
assert_equal(len(results_copy), 2)
expected_result_0 = ("ALTER TABLE test ADD IF NOT EXISTS "
"PARTITION (dt = '2013-12-01-04') "
"LOCATION 's3://bucket/test/2013/12/01/04' "
"PARTITION (dt = '2016-12-01-02') "
"LOCATION 's3://bucket/test/2016/12/01/02'")
expected_result_1 = ("ALTER TABLE test ADD IF NOT EXISTS "
"PARTITION (dt = '2017-12-01-01') "
"LOCATION 's3://bucket/test/2017/12/01/01' "
"PARTITION (dt = '2018-12-01-05') "
"LOCATION 's3://bucket/test/2018/12/01/05'")
assert_equal(results_copy[0], expected_result_0)
assert_equal(results_copy[1], expected_result_1)
# pylint: disable=protected-access
def test_generate_data_table_schema():
"""CLI - Athena generate_data_table_schema helper"""
config = CLIConfig(config_path='tests/unit/conf')
config['global']['infrastructure']['firehose']['enabled_logs'] = {
'test:log.name.with.dots': {}
}
assert_true(helpers.generate_data_table_schema(config, 'test:log.name.with.dots'))
FirehoseClient._ENABLED_LOGS.clear()
# pylint: disable=protected-access
def test_generate_data_table_schema_2():
"""CLI - Athena generate_data_table_schema helper"""
config = CLIConfig(config_path='tests/unit/conf')
config['global']['infrastructure']['firehose']['enabled_logs'] = {
'cloudwatch:test_match_types': {}
}
assert_true(helpers.generate_data_table_schema(config, 'cloudwatch:test_match_types'))
FirehoseClient._ENABLED_LOGS.clear()
| [
"[email protected]"
] | |
ebb7cc1a92ce4d62d536460981e70b4154a3e415 | 44e08242888922dcd425f457b89fb8f576f9ac28 | /jss_api.py | b742970545a7a84f53e3eb9ceba2d6a737ef9853 | [] | no_license | dSalazar10/Jamf | de80a7a04370ef806653bc0a34cf7bae5b5ab334 | bd44ea6f5a9a06b355fa5eb157b26ce040a6c7fd | refs/heads/master | 2021-09-20T06:22:24.668203 | 2018-08-05T20:01:50 | 2018-08-05T20:01:50 | 141,527,743 | 0 | 1 | null | 2018-07-24T18:46:02 | 2018-07-19T05:17:11 | Python | UTF-8 | Python | false | false | 1,693 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Jul 19 17:19:59 2018
@author: vu7972
Inspired by sreyemnayr JSSAPI:
https://github.com/sreyemnayr/jamf_pro_api/blob/master/jssapi/jssapi.py
CoProducer jpsthecelt:
https://github.com/jpsthecelt/configNquery/blob/master/rdConfigNqueryBF.py
"""
import requests
import sys
import json
from requests.packages.urllib3.exceptions import InsecureRequestWarning
class JSSAPI:
def __init__(self, url='', head={"Accept": "application/json"}, user='', pwd=''):
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
self.url = url + '/JSSResource/'
self.head = head
self.auth = requests.auth.HTTPBasicAuth(user, pwd)
self.r = requests.Response
self.e = sys.exc_info()[0]
def get(self, method='mobiledevices'):
try:
self.r = requests.get(url=(self.url + method), headers=self.head, auth=self.auth)
if self.r.status_code != 200:
self.r.raise_for_status()
# Convert to JSON
json = self.r.json()
return next(iter(json.values()))
except:
self.e = sys.exec_info[0]
return []
def set_auth(self, cfg_file='../credentials.json'):
try:
with open(cfg_file) as data_file:
data = json.load(data_file)
self.auth = requests.auth.HTTPBasicAuth(data["credentials"]["username"], data["credentials"]["password"])
except:
self.e = sys.exc_info[0]
def queryBFviaRelevance(self, rVance):
self.set_auth()
self.get('/api/login')
self.get('/api/query?relevance=' + rVance)
| [
"[email protected]"
] | |
3446191b847ea26f7fe1e44c40529ad84b9f76e8 | e24002c17b1a406edf15f5ed681a94f26be0e400 | /3.VariablesAndDataTypes.py | 57dadd81f92f7dc992cc387329d39b9469609eee | [] | no_license | Jason30102212/python_basics_refresher | ac6e65c0472db516a9ef81e9c3e31c3f72f67a57 | b8e6560c311139cb45c13c0947a67a64c99db547 | refs/heads/master | 2020-04-22T10:33:31.544806 | 2019-02-14T04:45:50 | 2019-02-14T04:45:50 | 170,309,310 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 397 | py | #3.VariablesAndDataTypes
#Variables:
number = 2
name = "Jim"
print("The following number will be based on a variable: "+str(number) )
print("Name based on variable: "+name)
number = 5
name = "Jack"
print("The following number will be based on a variable: "+str(number) )
print("Name based on variable: "+name)
#DataTypes:
stringType = "This is a string"
numberType = 2.342
booleanType = True
| [
"[email protected]"
] | |
7c866ecffb76cea14f2093856b5a25d302269a79 | 999ed80db247794159be1d752bc6f0fc272bd117 | /tests/container_hardening/test_container_hardening.py | 6150f77f72f470bc5676f6750e08a825ff00ae58 | [
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
] | permissive | ramakristipati/sonic-mgmt | 7fee876412f0121da96d751f7d199690c73496f3 | a86f0e5b1742d01b8d8a28a537f79bf608955695 | refs/heads/master | 2023-08-31T07:55:38.446663 | 2023-08-31T06:34:53 | 2023-08-31T06:34:53 | 315,448,103 | 2 | 0 | NOASSERTION | 2020-11-23T21:44:07 | 2020-11-23T21:44:07 | null | UTF-8 | Python | false | false | 970 | py | import pytest
import logging
from tests.common.helpers.assertions import pytest_assert
pytestmark = [
pytest.mark.topology('any'),
]
logger = logging.getLogger(__name__)
NO_PRIVILEGED_CONTAINERS = [
'bgp',
]
def test_container_privileged(duthost):
"""
Test container without --privileged flag has no access to /dev/vda* or /dev/sda*
"""
for container_name in NO_PRIVILEGED_CONTAINERS:
docker_exec_cmd = 'docker exec {} bash -c '.format(container_name)
cmd = duthost.shell(docker_exec_cmd + "'df -h | grep /etc/hosts' | awk '{print $1}'")
rc, device = cmd['rc'], cmd['stdout']
pytest_assert(rc == 0, 'Failed to get the device name.')
pytest_assert(device.startswith('/dev/'), 'Invalid device {}.'.format(device))
output = duthost.shell(docker_exec_cmd + "'ls {}'".format(device), module_ignore_errors=True)['stdout']
pytest_assert(not output, 'The partition {} exists.'.format(device))
| [
"[email protected]"
] | |
037ef6e7c30b36905415c7b5b3bc8f1ee0f50dc0 | b13271e8ca691771e15fa8a40166bad951f8d51d | /depreciated/FireModel.py | 468064e8319f7e9f4c112984e691ea2ec653e6bf | [] | no_license | nkuo/suds-fire-commercial | 4a7a90fba66e9b5fc84681af54602bae51c562ad | 8a7c0f73161a08c35231e35cbe14ab8ba8d87588 | refs/heads/master | 2021-09-04T18:36:18.604130 | 2018-01-21T03:37:49 | 2018-01-21T03:37:49 | 110,370,520 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 28,571 | py | #importing relevant libraries
import pandas as pd
import numpy as np
import sqlalchemy as sa
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from sklearn.preprocessing import scale
#%matplotlib inline
# Reading plidata - permits, licenses and inspection
plidata = pd.read_csv('data/pli.csv',encoding = 'utf-8',dtype={'STREET_NUM':'str','STREET_NAME':'str'})
#Reading city of Pittsburgh dataset - building info
pittdata = pd.read_csv('data/pittdata.csv',dtype={'PROPERTYADDRESS':'str','PROPERTYHOUSENUM':'str','STATEDESC':'str'})
#Reading 311 file - request for violation
calldata = pd.read_csv('data/311request.csv')
#Cleaning function for address column in calldata
def removingZeros(x):
x = x.replace(' ',' ')
if x[0] == 0 and x[1]==0:
x= x.replace('0','',2)
elif x[0] == 0:
x= x.replace('0','',1)
return x
#cleaning 311 data
calldata = calldata.dropna(subset = ['ADDRESS'])
calldata['ADDRESS'] = calldata['ADDRESS'].map(lambda x : removingZeros(x))
calldata['street_num'], calldata ['street_name'] = calldata['ADDRESS'].str.split(' ', 1).str
calldata['street_name'] = calldata ['street_name'].str.upper()
calldata['street_name'] = calldata ['street_name'].str.replace('av','ave')
#converting to datetime
calldata.Date = pd.to_datetime(calldata.Date)
calldata['call_year'] = calldata['Date'].map(lambda x: x.year)
#N- calldata not used after this..
#removing extra whitespaces
plidata['STREET_NAME'] = plidata['STREET_NAME'].str.strip()
plidata['STREET_NUM'] = plidata['STREET_NUM'].str.strip()
#removing residential data
pittdata = pittdata[pittdata.STATEDESC!='RESIDENTIAL']
pittdata = pittdata[pittdata.PROPERTYHOUSENUM!= '0']
pittdata = pittdata[pittdata.PROPERTYADDRESS!= '']
#dropping columns with less than 15% data
pittdata = pittdata.dropna(thresh=4000, axis=1)
pittdata = pittdata.rename(columns={pittdata.columns[0]:'PARID'})
pittdata = pittdata.drop_duplicates()
#merging pli with city of pitt
plipca = pd.merge(pittdata, plidata[['PARCEL','INSPECTION_DATE','INSPECTION_RESULT','VIOLATION']], how = 'left', left_on =['PARID'], right_on = ['PARCEL'] )
plipca = plipca.drop_duplicates()
#dropping nas
newpli = plipca.dropna(subset =['PARCEL','INSPECTION_DATE','INSPECTION_RESULT','VIOLATION'] ) #drop lines with NA
newpli = newpli.reset_index()
newpli = newpli.drop(['index','PARID','index',u'PROPERTYOWNER', #remove unneeded columns
u'PROPERTYCITY', u'PROPERTYSTATE', u'PROPERTYUNIT', u'PROPERTYZIP',
u'MUNICODE', u'MUNIDESC', u'SCHOOLCODE', u'SCHOOLDESC', u'NEIGHCODE',
u'TAXCODE', u'TAXDESC', u'OWNERCODE', u'OWNERDESC', u'STATECODE',
u'STATEDESC', u'USECODE', u'USEDESC', u'LOTAREA', u'SALEDATE',
u'SALEPRICE', u'SALECODE', u'SALEDESC', u'DEEDBOOK', u'DEEDPAGE',
u'AGENT', u'TAXFULLADDRESS1', u'TAXFULLADDRESS2', u'TAXFULLADDRESS3',
u'TAXFULLADDRESS4', u'CHANGENOTICEADDRESS1', u'CHANGENOTICEADDRESS2',
u'CHANGENOTICEADDRESS3', u'CHANGENOTICEADDRESS4', u'COUNTYBUILDING',
u'COUNTYLAND', u'COUNTYTOTAL', u'COUNTYEXEMPTBLDG', u'LOCALBUILDING',
u'LOCALLAND', u'LOCALTOTAL', u'FAIRMARKETBUILDING', u'FAIRMARKETLAND',
u'FAIRMARKETTOTAL', u'PARCEL'], axis=1)
newpli = newpli.drop_duplicates()
#converting to datetime
newpli.INSPECTION_DATE = pd.to_datetime(newpli.INSPECTION_DATE)
newpli['violation_year'] = newpli['INSPECTION_DATE'].map(lambda x: x.year)
plipca.SALEPRICE = plipca.SALEPRICE.replace('NaN',0)
#Groups by address and replaces LOTAREA','SALEPRICE','FAIRMARKETLAND','FAIRMARKETBUILDING' by mean
numerical = plipca.groupby( [ "PROPERTYHOUSENUM", "PROPERTYADDRESS"] , as_index=False)[['LOTAREA','SALEPRICE',
'FAIRMARKETLAND',
'FAIRMARKETBUILDING']].mean()
# Following blocks of code group by address and get the category with maximum count for each given categorical columns
temp = pd.DataFrame({'count' : plipca.groupby( [ "PROPERTYHOUSENUM", "PROPERTYADDRESS"] ).STATEDESC.value_counts()}).reset_index()
idx = temp.groupby([ "PROPERTYHOUSENUM", "PROPERTYADDRESS"])['count'].transform(max) == temp['count']
result1 = temp[idx]
result1 = result1.drop_duplicates(subset=[ "PROPERTYHOUSENUM", "PROPERTYADDRESS"], keep = 'last')
del result1['count']
temp = pd.DataFrame({'count' : plipca.groupby( [ "PROPERTYHOUSENUM", "PROPERTYADDRESS"] ).STATEDESC.value_counts()}).reset_index()
temp.groupby([ "PROPERTYHOUSENUM", "PROPERTYADDRESS"])['count'].transform(max)
idx = temp.groupby([ "PROPERTYHOUSENUM", "PROPERTYADDRESS"])['count'].transform(max) == temp['count']
result1 = temp[idx]
result1 = result1.drop_duplicates(subset=[ "PROPERTYHOUSENUM", "PROPERTYADDRESS"], keep = 'last')
del result1['count']
temp = pd.DataFrame({'count' : plipca.groupby( [ "PROPERTYHOUSENUM", "PROPERTYADDRESS"] ).SCHOOLDESC.value_counts()}).reset_index()
idx = temp.groupby([ "PROPERTYHOUSENUM", "PROPERTYADDRESS"])['count'].transform(max) == temp['count']
result2 = temp[idx]
result2 = result2.drop_duplicates(subset=[ "PROPERTYHOUSENUM", "PROPERTYADDRESS"], keep = 'last')
del result2['count']
temp = pd.DataFrame({'count' : plipca.groupby( [ "PROPERTYHOUSENUM", "PROPERTYADDRESS"] ).OWNERDESC.value_counts()}).reset_index()
idx = temp.groupby([ "PROPERTYHOUSENUM", "PROPERTYADDRESS"])['count'].transform(max) == temp['count']
result3 = temp[idx]
result3 = result3.drop_duplicates(subset=[ "PROPERTYHOUSENUM", "PROPERTYADDRESS"], keep = 'last')
del result3['count']
temp = pd.DataFrame({'count' : plipca.groupby( [ "PROPERTYHOUSENUM", "PROPERTYADDRESS"] ).MUNIDESC.value_counts()}).reset_index()
idx = temp.groupby([ "PROPERTYHOUSENUM", "PROPERTYADDRESS"])['count'].transform(max) == temp['count']
result4 = temp[idx]
result4 = result4.drop_duplicates(subset=[ "PROPERTYHOUSENUM", "PROPERTYADDRESS"], keep = 'last')
del result4['count']
temp = pd.DataFrame({'count' : plipca.groupby( [ "PROPERTYHOUSENUM", "PROPERTYADDRESS"] ).INSPECTION_RESULT.value_counts()}).reset_index()
idx = temp.groupby([ "PROPERTYHOUSENUM", "PROPERTYADDRESS"])['count'].transform(max) == temp['count']
result5 = temp[idx]
result5 = result5.drop_duplicates(subset=[ "PROPERTYHOUSENUM", "PROPERTYADDRESS"], keep = 'last')
del result5['count']
temp = pd.DataFrame({'count' : plipca.groupby( [ "PROPERTYHOUSENUM", "PROPERTYADDRESS"] ).NEIGHCODE.value_counts()}).reset_index()
idx = temp.groupby([ "PROPERTYHOUSENUM", "PROPERTYADDRESS"])['count'].transform(max) == temp['count']
result6 = temp[idx]
result6 = result6.drop_duplicates(subset=[ "PROPERTYHOUSENUM", "PROPERTYADDRESS"], keep = 'last')
del result6['count']
temp = pd.DataFrame({'count' : plipca.groupby( [ "PROPERTYHOUSENUM", "PROPERTYADDRESS"] ).TAXDESC.value_counts()}).reset_index()
idx = temp.groupby([ "PROPERTYHOUSENUM", "PROPERTYADDRESS"])['count'].transform(max) == temp['count']
result7 = temp[idx]
result7 = result7.drop_duplicates(subset=[ "PROPERTYHOUSENUM", "PROPERTYADDRESS"], keep = 'last')
del result7['count']
temp = pd.DataFrame({'count' : plipca.groupby( [ "PROPERTYHOUSENUM", "PROPERTYADDRESS"] ).USEDESC.value_counts()}).reset_index()
idx = temp.groupby([ "PROPERTYHOUSENUM", "PROPERTYADDRESS"])['count'].transform(max) == temp['count']
result8 = temp[idx]
result8 = result8.drop_duplicates(subset=[ "PROPERTYHOUSENUM", "PROPERTYADDRESS"], keep = 'last')
del result8['count']
dfs = [result1,result2,result3,result4,result6,result7,result8,numerical]
pcafinal = reduce(lambda left,right: pd.merge(left,right,on= [ "PROPERTYHOUSENUM", "PROPERTYADDRESS"] ), dfs)
plipca1 = pd.merge(pcafinal, newpli, how = 'left', left_on =[ "PROPERTYHOUSENUM", "PROPERTYADDRESS"], right_on = [ "PROPERTYHOUSENUM", "PROPERTYADDRESS"] )
#N- features cleaning done (plipca1 has all features)
#loading fire incidents csvs
fire_pre14 = pd.read_csv('data/Fire_Incidents_Pre14.csv',encoding = 'latin-1',dtype={'street':'str','number':'str'})
#cleaning columns of fire_pre14
fire_pre14['full.code'] = fire_pre14['full.code'].str.replace(' -',' -')
fire_pre14['st_type'] = fire_pre14['st_type'].str.strip()
fire_pre14['street'] = fire_pre14['street'].str.strip()
fire_pre14['number'] = fire_pre14['number'].str.strip()
fire_pre14['st_type'] = fire_pre14['st_type'].str.replace('AV','AVE')
fire_pre14['street'] = fire_pre14['street'].str.strip() +' ' +fire_pre14['st_type'].str.strip()
#reading the fire_historicalfile
fire_historical = pd.read_csv('data/Fire_Incidents_Historical.csv',encoding = 'utf-8',dtype={'street':'str','number':'str'})
#deleting columns not required - N- manual removal of features
del fire_historical['inci_id']
del fire_historical['alm_dttm']
del fire_historical['arv_dttm']
del fire_historical['pbf_narcan']
del fire_historical['meds_glucose']
del fire_historical['meds_epi']
del fire_historical['meds_nitro']
del fire_historical['pbf_albut']
del fire_historical['cpr']
del fire_historical['car_arr']
del fire_historical['aed']
del fire_historical['none']
del fire_historical['pbf_lift_ass']
del fire_historical['Med_Assist']
del fire_historical['XCOORD']
del fire_historical['YCOORD']
del fire_historical['LOCATION']
del fire_historical['REP_DIST']
del fire_historical['alarms']
del fire_historical['inci_type']
del fire_historical['Lift_Ref']
del fire_historical['Card_CPR']
del fire_historical['AGENCY']
del fire_historical['PRIMARY_UNIT']
del fire_historical['MAP_PAGE']
del fire_historical['CURR_DGROUP']
del fire_historical['CALL_NO']
del fire_pre14['PRIMARY_UNIT']
del fire_pre14['MAP_PAGE']
del fire_pre14['alm_dttm']
del fire_pre14['arv_dttm']
del fire_pre14['XCOORD']
del fire_pre14['YCOORD']
del fire_pre14['inci_id']
del fire_pre14['inci_type']
del fire_pre14['alarms']
del fire_pre14['st_prefix']
del fire_pre14['st_suffix']
del fire_pre14['st_type']
del fire_pre14['CALL_NO']
cols = [0,4]
fire_pre14.drop(fire_pre14.columns[cols],axis=1,inplace=True)
#joining both the fire incidents file together
fire_historical = fire_historical.append(fire_pre14, ignore_index=True)
#more cleaning and removing descriptions which are not fire related
fire_historical['descript'] = fire_historical['descript'].str.strip()
fire_historical = fire_historical[fire_historical.descript != 'System malfunction, Other']
# fire_historical = fire_historical[fire_historical.descript != 'Smoke detector activation, no fire - unintentional']
# fire_historical = fire_historical[fire_historical.descript != 'Alarm system activation, no fire - unintentional']
fire_historical = fire_historical[fire_historical.descript != 'Detector activation, no fire - unintentional']
fire_historical = fire_historical[fire_historical.descript != 'Smoke detector activation due to malfunction']
fire_historical = fire_historical[fire_historical.descript != 'Dispatched & cancelled en route']
fire_historical = fire_historical[fire_historical.descript != 'Dispatched & cancelled on arrival']
fire_historical = fire_historical[fire_historical.descript != 'EMS call, excluding vehicle accident with injury']
fire_historical = fire_historical[fire_historical.descript != 'Medical assist, assist EMS crew']
fire_historical = fire_historical[fire_historical.descript != 'Emergency medical service, other']
fire_historical = fire_historical[fire_historical.descript != 'Good intent call, Other']
fire_historical = fire_historical[fire_historical.descript != 'Rescue, EMS incident, other']
fire_historical = fire_historical[fire_historical.descript != 'Medical Alarm Activation (No Medical Service Req)']
fire_historical = fire_historical[fire_historical.descript != 'Motor Vehicle Accident with no injuries']
fire_historical = fire_historical[fire_historical.descript != 'No Incident found on arrival at dispatch address']
fire_historical = fire_historical[fire_historical.descript != 'Unintentional transmission of alarm, Other']
fire_historical = fire_historical[fire_historical.descript != 'Motor vehicle accident with injuries']
fire_historical = fire_historical[fire_historical.descript != 'Vehicle accident, general cleanup']
fire_historical = fire_historical[fire_historical.descript != 'Power line down']
fire_historical = fire_historical[fire_historical.descript != 'Person in distress, Other']
fire_historical = fire_historical[fire_historical.descript != 'Cable/Telco Wires Down']
fire_historical = fire_historical[fire_historical.descript != 'Service Call, other']
fire_historical = fire_historical[fire_historical.descript != 'Vehicle Accident canceled en route']
fire_historical = fire_historical[fire_historical.descript != 'Lock-out']
fire_historical = fire_historical[fire_historical.descript != 'False alarm or false call, Other']
fire_historical = fire_historical[fire_historical.descript != 'Assist police or other governmental agency']
fire_historical = fire_historical[fire_historical.descript != 'Special type of incident, Other']
fire_historical = fire_historical[fire_historical.descript != 'Alarm system sounded due to malfunction']
fire_historical = fire_historical[fire_historical.descript != 'Motor vehicle/pedestrian accident (MV Ped)']
fire_historical = fire_historical[fire_historical.descript != 'Assist invalid ']
fire_historical = fire_historical[fire_historical.descript != 'Malicious, mischievous false call, Other']
fire_historical = fire_historical[fire_historical.descript != 'Accident, potential accident, Other']
fire_historical = fire_historical[fire_historical.descript != 'Assist invalid']
fire_historical = fire_historical[fire_historical.descript != 'EMS call, party transported by non-fire agency']
fire_historical = fire_historical[fire_historical.descript != 'Rescue or EMS standby']
fire_historical = fire_historical[fire_historical.descript != 'Public service assistance, Other']
fire_historical = fire_historical[fire_historical.descript != 'Police matter']
fire_historical = fire_historical[fire_historical.descript != 'Lock-in (if lock out , use 511 )']
fire_historical = fire_historical[fire_historical.descript != 'Sprinkler activation, no fire - unintentional']
fire_historical = fire_historical[fire_historical.descript != 'Wrong location']
fire_historical = fire_historical[fire_historical.descript != 'Local alarm system, malicious false alarm']
fire_historical = fire_historical[fire_historical.descript != 'Authorized controlled burning']
fire_historical = fire_historical[fire_historical.descript != 'Water problem, Other']
# fire_historical = fire_historical[fire_historical.descript != 'Smoke or odor removal']
fire_historical = fire_historical[fire_historical.descript != 'Passenger vehicle fire']
fire_historical = fire_historical[fire_historical.descript != 'CO detector activation due to malfunction']
fire_historical = fire_historical[fire_historical.descript != 'Authorized controlled burning']
fire_historical = fire_historical[fire_historical.descript != 'Steam, vapor, fog or dust thought to be smoke']
fire_historical = fire_historical[fire_historical.descript != 'Overheated motor']
fire_historical = fire_historical[fire_historical.descript != 'Local alarm system, malicious false alarm']
fire_historical = fire_historical[fire_historical.descript != 'Central station, malicious false alarm']
fire_historical = fire_historical[fire_historical.descript != 'Public service']
# fire_historical = fire_historical[fire_historical.descript != 'Building or structure weakened or collapsed']
fire_historical = fire_historical[fire_historical.descript != 'Heat detector activation due to malfunction']
fire_historical = fire_historical[fire_historical.descript != 'Citizen complaint']
fire_historical = fire_historical[fire_historical.descript != 'Municipal alarm system, malicious false alarm']
fire_historical = fire_historical[fire_historical.descript != 'Sprinkler activation due to malfunction']
fire_historical = fire_historical[fire_historical.descript != 'Severe weather or natural disaster, Other']
fire_historical = fire_historical[fire_historical.descript != 'Water evacuation']
fire_historical = fire_historical[fire_historical.descript != 'Breakdown of light ballast']
fire_historical = fire_historical[fire_historical.descript != 'Extrication of victim(s) from vehicle']
fire_historical = fire_historical[fire_historical.descript != 'Flood assessment']
fire_historical = fire_historical[fire_historical.descript != 'Telephone, malicious false alarm']
fire_historical = fire_historical[fire_historical.descript != 'Cover assignment, standby, moveup']
fire_historical = fire_historical[fire_historical.descript != 'Road freight or transport vehicle fire']
fire_historical = fire_historical[fire_historical['full.code'].str.strip() != '540 - Animal problem, Other']
fire_historical = fire_historical[fire_historical['full.code'].str.strip() != '5532 - Public Education (Station Visit)']
fire_historical = fire_historical[fire_historical['full.code'].str.strip() != '353 - Removal of victim(s) from stalled elevator']
#correcting problems with the street column
fire_historical['street'] = fire_historical['street'].replace(to_replace=', PGH', value='', regex=True)
fire_historical['street'] = fire_historical['street'].replace(to_replace=', P', value='', regex=True)
fire_historical['street'] = fire_historical['street'].replace(to_replace=',', value='', regex=True)
fire_historical['street'] = fire_historical['street'].replace(to_replace='#.*', value='', regex=True)
fire_historical['street'] = fire_historical['street'].str.strip()
fire_historical['number'] = fire_historical['number'].str.strip()
#converting to date time and extracting year
fireDate, fireTime = fire_historical['CALL_CREATED_DATE'].str.split(' ', 1).str
fire_historical['CALL_CREATED_DATE']= fireDate
fire_historical['CALL_CREATED_DATE'] = pd.to_datetime(fire_historical['CALL_CREATED_DATE'])
fire_historical['fire_year'] = fire_historical['CALL_CREATED_DATE'].map(lambda x: x.year)
#removing all codes with less than 20 occurences
for col,val in fire_historical['full.code'].value_counts().iteritems():
if val <20 and col[0]!= '1':
fire_historical = fire_historical[fire_historical['full.code'] != col]
fire_historical = fire_historical.drop_duplicates()
#joining plipca with fireincidents -N final data!
pcafire = pd.merge(plipca1, fire_historical, how = 'left', left_on =['PROPERTYADDRESS','PROPERTYHOUSENUM'],
right_on = ['street','number'])
# making the fire column with all type 100s as fires -N: important!
pcafire['fire'] = pcafire['full.code'].astype(str).str[0]
pcafire.loc[pcafire.fire == '1', 'fire'] = 'fire'
pcafire.loc[pcafire.fire != 'fire', 'fire'] = 'No fire'
pcafire['full.code'][pcafire['fire'] == 'fire'] = None
#Fire occured after inspection
pcafire1 = pcafire[(pcafire.CALL_CREATED_DATE >= pcafire.INSPECTION_DATE )]
pcafire1 = pcafire[(pcafire.CALL_CREATED_DATE >= pcafire.INSPECTION_DATE )]
pcafire1 = pcafire1[pd.notnull(pcafire1.INSPECTION_DATE)]
#checking if violation is in the same year as the fire and keeping only those -N: the "time dependent" feature generation?"
pcafire2 = pcafire1[(pcafire1.violation_year == pcafire1.fire_year)]
#joining all rows with no pli violations
fire_nopli = pd.concat([fire_historical, pcafire2[['number','street','CALL_CREATED_DATE','full.code','response_time','fire_year']], pcafire2[['number','street','CALL_CREATED_DATE','full.code','response_time','fire_year']]]).drop_duplicates(keep=False)
pcafire_nopli = pd.merge(pcafinal, fire_nopli, how = 'left', left_on =['PROPERTYADDRESS','PROPERTYHOUSENUM'],
right_on = ['street','number'])
pcafire_nopli['fire'] = pcafire_nopli['full.code'].astype(str).str[0]
pcafire_nopli.loc[pcafire_nopli.fire == '1', 'fire'] = 'fire'
pcafire_nopli.loc[pcafire_nopli.fire != 'fire', 'fire'] = 'No fire'
pcafire_nopli['full.code'][pcafire_nopli['fire'] == 'fire'] = None
#combined_df is the final file
combined_df = pcafire_nopli.append(pcafire2, ignore_index=True)
combined_df.to_csv('data/Final_Combined_Df.csv')
#N- =================== DATA CLEANING DONE, START MODEL =======================
#importing the necessary libraries
from sklearn import datasets, linear_model, cross_validation, grid_search
import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import RandomForestClassifier
from sklearn.cross_validation import KFold, StratifiedKFold, cross_val_score
from sklearn.pipeline import Pipeline
from sklearn.linear_model import LogisticRegression
from sklearn.grid_search import GridSearchCV
from sklearn_pandas import DataFrameMapper
from sklearn.preprocessing import OneHotEncoder
from sklearn import metrics
from sklearn.metrics import confusion_matrix
from xgboost import XGBClassifier
from sklearn.ensemble import ExtraTreesClassifier
#Reading the cleaned dataset
# combined_df = pd.read_csv('Final_Combined_Df.csv')
#Removing vacant commerical land
combined_df = combined_df[combined_df.USEDESC!= 'VACANT COMMERCIAL LAND']
#converting back to 1 and 0
combined_df['fire'] = combined_df['fire'].map({'fire': 1, 'No fire': 0})
#one hot encoding the features -N: make categorial values into numeric values
ohe9 = pd.get_dummies(combined_df['VIOLATION'])
ohe8 = pd.get_dummies(combined_df['full.code'])
ohe10 = pd.get_dummies(combined_df['INSPECTION_RESULT'])
#concatenating the features together -N:this is all the fields we're using!?
combined_df1 = pd.concat([combined_df[['PROPERTYADDRESS','PROPERTYHOUSENUM','CALL_CREATED_DATE','fire','fire_year']],ohe8,ohe9,ohe10], axis=1)
#-N: ==split into test and train data from here
#PREPARING THE TESTING DATA (6 months of data)
testdata = combined_df1[combined_df1.CALL_CREATED_DATE > '12/01/2016']
testdata2 = testdata.groupby( [ "PROPERTYHOUSENUM", "PROPERTYADDRESS",'CALL_CREATED_DATE','fire_year'] ).sum().reset_index()
del testdata['CALL_CREATED_DATE']
del testdata['fire_year']
#testdata2 = testdata.groupby( [ "PROPERTYHOUSENUM", "PROPERTYADDRESS"] ).sum().reset_index() #,'CALL_CREATED_DATE','fire_year'
testdata2.loc[testdata2.fire != 0, 'fire'] = 1
nofire2017 = pd.concat([pcafinal[["PROPERTYHOUSENUM","PROPERTYADDRESS"]], testdata2[["PROPERTYHOUSENUM","PROPERTYADDRESS"]],testdata2[["PROPERTYHOUSENUM","PROPERTYADDRESS"]]]).drop_duplicates(keep=False)
testdata2 = testdata2.append(nofire2017, ignore_index=True)
testdata2 = testdata2.fillna(0)
test_data = pd.merge(testdata2,pcafinal, on = ["PROPERTYHOUSENUM", "PROPERTYADDRESS"], how = 'left')
#test_data.fire.value_counts()
#One hot encoding the features for the test set
ohe1 = pd.get_dummies(test_data['STATEDESC'])
ohe2 = pd.get_dummies(test_data['SCHOOLDESC'])
ohe3 = pd.get_dummies(test_data['OWNERDESC'])
ohe4 = pd.get_dummies(test_data['MUNIDESC'])
ohe5 = pd.get_dummies(test_data['NEIGHCODE'])
ohe6 = pd.get_dummies(test_data['TAXDESC'])
ohe7 = pd.get_dummies(test_data['USEDESC'])
#N- used later when creating the results excel sheet
state_desc = test_data['STATEDESC']
school_desc= test_data['SCHOOLDESC']
owner_desc= test_data['OWNERDESC']
muni_desc= test_data['MUNIDESC']
neigh_desc= test_data['NEIGHCODE']
tax_desc= test_data['TAXDESC']
use_desc= test_data['USEDESC']
#Deleting features not required anymore or already one hot encoded for the model
del test_data['CALL_CREATED_DATE']
del test_data['STATEDESC']
del test_data['SCHOOLDESC']
del test_data['OWNERDESC']
del test_data['MUNIDESC']
del test_data['NEIGHCODE']
del test_data['TAXDESC']
del test_data['USEDESC']
del test_data['fire_year']
del test_data['PROPERTYADDRESS']
del test_data['PROPERTYHOUSENUM']
#Concatenating everything back together
encoded_testdata = pd.concat([test_data,ohe1,ohe2,ohe3,ohe4,ohe5,ohe6,ohe7], axis=1)
#PREPARING THE TRAINING DATA
#Everything till 1st May 2016 is training data
traindata1 = combined_df1[combined_df1.CALL_CREATED_DATE <= '12/01/2016']
#Combining multiple instances of an address together
traindata = traindata1.groupby( [ "PROPERTYHOUSENUM", "PROPERTYADDRESS",'CALL_CREATED_DATE','fire_year'] ).sum().reset_index()
#Relabeling them
traindata.loc[traindata.fire != 0, 'fire'] = 1
#concatenating non fire, non pca and fire instances together
nofire_train = pd.concat([pcafinal[["PROPERTYHOUSENUM","PROPERTYADDRESS"]], traindata[["PROPERTYHOUSENUM","PROPERTYADDRESS"]],traindata[["PROPERTYHOUSENUM","PROPERTYADDRESS"]]]).drop_duplicates(keep=False)
traindata = traindata.append(nofire2017, ignore_index=True)
traindata = traindata.fillna(0)
train_data = pd.merge(traindata,pcafinal, on = ["PROPERTYHOUSENUM", "PROPERTYADDRESS"], how = 'left')
#train_data.fire.value_counts()
#creating on hot encoded features for the categorical values
ohe1 = pd.get_dummies(train_data['STATEDESC'])
ohe2 = pd.get_dummies(train_data['SCHOOLDESC'])
ohe3 = pd.get_dummies(train_data['OWNERDESC'])
ohe4 = pd.get_dummies(train_data['MUNIDESC'])
ohe5 = pd.get_dummies(train_data['NEIGHCODE'])
ohe6 = pd.get_dummies(train_data['TAXDESC'])
ohe7 = pd.get_dummies(train_data['USEDESC'])
#deleting the categories
del train_data['STATEDESC']
del train_data['CALL_CREATED_DATE']
del train_data['SCHOOLDESC']
del train_data['OWNERDESC']
del train_data['MUNIDESC']
del train_data['NEIGHCODE']
del train_data['TAXDESC']
del train_data['USEDESC']
del train_data['fire_year']
del train_data['PROPERTYADDRESS']
del train_data['PROPERTYHOUSENUM']
#concatenating all the created features together
encoded_traindata = pd.concat([train_data,ohe1,ohe2,ohe3,ohe4,ohe5,ohe6,ohe7], axis=1)
#converting to array and reshaping the data to prep for model
fireVarTrain = encoded_traindata['fire']
del encoded_traindata['fire']
X_train = np.array(encoded_traindata)
y_train = np.reshape(fireVarTrain.values,[fireVarTrain.shape[0],])
#converting to array and reshaping the data to prep for model
fireVarTest = encoded_testdata['fire']
del encoded_testdata['fire']
X_test = np.array(encoded_testdata)
y_test = np.reshape(fireVarTest.values,[fireVarTest.shape[0],])
#The XG Boost model
#Grid Search was taking too long a time to run hence did hyperparameter tuning manually and arrived
#at the below parameters fiving the most optimal result
model = XGBClassifier( learning_rate =0.13,
n_estimators=1500,
max_depth=5,min_child_weight=1,
gamma=0,
subsample=0.8,
colsample_bytree=0.8,
objective= 'binary:logistic',
nthread=4,
seed=27)
model.fit(X_train, y_train)
pred = model.predict(X_test)
real = y_test
cm = confusion_matrix(real, pred)
print confusion_matrix(real, pred)
from sklearn.metrics import cohen_kappa_score
kappa = cohen_kappa_score(real, pred)
fpr, tpr, thresholds = metrics.roc_curve(y_test, pred, pos_label=1)
roc_auc = metrics.auc(fpr, tpr)
print 'Accuracy = ', float(cm[0][0] + cm[1][1])/len(real)
print 'kappa score = ', kappa
print 'AUC Score = ', metrics.auc(fpr, tpr)
print 'recall = ',tpr[1]
print 'precision = ',float(cm[1][1])/(cm[1][1]+cm[0][1])
#Getting the probability scores
predictions = model.predict_proba(X_test)
addresses = test_data['PROPERTYHOUSENUM'] +' '+ test_data['PROPERTYADDRESS']
#Addresses with fire and risk score
risk = []
for row in predictions:
risk.append(row[1])
cols = {"Address":addresses,"Fire":pred,"RiskScore":risk,"state_desc":state_desc,"school_desc":school_desc,
"owner_desc":owner_desc,"muni_desc":muni_desc,"neigh_desc":neigh_desc,"tax_desc":tax_desc,"use_desc":use_desc}
Results = pd.DataFrame(cols)
#Writing results as a csv
Results.to_csv('data/Results.csv')
#N- ==== some concluding analysis ====
#Plotting the ROC curve
plt.title('Receiver Operating Characteristic')
plt.plot(fpr[1:], tpr[1:], 'b',
label='AUC = %0.2f'% roc_auc)
plt.legend(loc='lower right')
plt.plot([0,1],[0,1],'r--')
plt.xlim([-0.1,1.2])
plt.ylim([-0.1,1.2])
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
plt.show()
#Tree model for getting features importance
clf = ExtraTreesClassifier()
clf = clf.fit(X_train, y_train)
clf.feature_importances_
UsedDf = encoded_traindata
import pandas as pd
important_features = pd.Series(data=clf.feature_importances_,index=UsedDf.columns)
important_features.sort_values(ascending=False,inplace=True)
#top 20 features
print important_features[0:20]
#Plotting the top 20 features
y_pos = np.arange(len(important_features.index[0:20]))
plt.bar(y_pos,important_features.values[0:20], alpha=0.3)
plt.xticks(y_pos, important_features.index[0:20], rotation = (90), fontsize = 11, ha='left')
plt.ylabel('Feature Importance Scores')
plt.title('Feature Importance')
plt.show()
# Implementing PCA `
#Scaling the values
X = scale(X_train)
pca = PCA(n_components=820)
pca.fit(X)
var= pca.explained_variance_ratio_
#Cumulative Variance explains
var1=np.cumsum(np.round(pca.explained_variance_ratio_, decimals=4)*100)
plt.semilogy(var, '--o');
plt.semilogy(var1, '--o');
| [
"[email protected]"
] | |
9979c413406f18379a77e1c667bf53fd290ccd31 | 8aea9270d2941377500aaac06b112908e706aa6f | /bitcoin/basics/final/constants.py | 4a193527c6dae6616bb188e4d10bbed53a42b0a1 | [] | no_license | oscarsernarosero/blockchain | e4a10163e8fc4cd2cf89cb2101693cba94566b5a | fdff5506f8b590d0a4a02598dbf01e8337116988 | refs/heads/master | 2023-01-28T07:59:32.288165 | 2020-12-09T03:54:22 | 2020-12-09T03:54:22 | 238,325,865 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 127 | py | FALSE = 0
TRUE = 1
P2PKH = 2
P2SH = 3
P2WPKH = 4
P2WSH = 5
P2SH_P2WPKH = 6
P2SH_P2WSH = 7 | [
"[email protected]"
] | |
138416d9407f0ecf82210259f71be5495feb8791 | f4b2c035ad1aa18bed1b67ceb971eb4e535d6dcf | /medhacks17/wsgi.py | 7bae2dd2c93a127853c4dced5469378208f75ed4 | [] | no_license | thejonathanma/medhacks17 | 222a10f3982f8937b016a536bc7cf269ae2297e2 | b9c02e674c6a054512817190a8c122e0f48f1328 | refs/heads/master | 2021-01-23T22:20:06.574900 | 2017-09-10T03:44:04 | 2017-09-10T03:44:04 | 102,928,651 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 398 | py | """
WSGI config for medhacks17 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "medhacks17.settings")
application = get_wsgi_application()
| [
"[email protected]"
] | |
a39e3c581d7f281d9d39e3c07cb80e2daced9831 | 532e337751c44b89e68f0022966d6295116928a9 | /client/commands/check.py | a4c1104173a880f18eb681db839b4e013cce5d44 | [
"MIT"
] | permissive | laashub-soa/pyre-check | 8f1a2717888a22c15a7f6608e0d732e62fa060f9 | cc1a1b5c1007bf3e0e52e7f8b04c8e8fc365db44 | refs/heads/master | 2022-04-13T12:12:46.317095 | 2020-04-11T03:39:21 | 2020-04-11T03:41:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,891 | py | # Copyright (c) 2016-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import logging
from logging import Logger
from typing import List, Optional
from ..analysis_directory import AnalysisDirectory, resolve_analysis_directory
from ..configuration import Configuration
from .command import ExitCode, typeshed_search_path
from .reporting import Reporting
LOG: Logger = logging.getLogger(__name__)
class Check(Reporting):
NAME = "check"
def __init__(
self,
arguments: argparse.Namespace,
original_directory: str,
configuration: Optional[Configuration] = None,
analysis_directory: Optional[AnalysisDirectory] = None,
) -> None:
super(Check, self).__init__(
arguments, original_directory, configuration, analysis_directory
)
@classmethod
def add_subparser(cls, parser: argparse._SubParsersAction) -> None:
check = parser.add_parser(
cls.NAME,
epilog="""
Runs a one-time check of a project without initializing a type check server.
""",
)
check.set_defaults(command=cls)
def generate_analysis_directory(self) -> AnalysisDirectory:
return resolve_analysis_directory(
self._arguments,
self._configuration,
self._original_directory,
self._current_directory,
isolate=True,
)
def _flags(self) -> List[str]:
flags = super()._flags()
filter_directories = self._get_directories_to_analyze()
if len(filter_directories):
flags.extend(["-filter-directories", ";".join(sorted(filter_directories))])
flags.extend(["-workers", str(self._number_of_workers)])
search_path = self._configuration.search_path + typeshed_search_path(
self._configuration.typeshed
)
if len(self._configuration.ignore_all_errors):
flags.extend(
[
"-ignore-all-errors",
";".join(sorted(self._configuration.ignore_all_errors)),
]
)
if search_path:
flags.extend(["-search-path", ",".join(search_path)])
excludes = self._configuration.excludes
for exclude in excludes:
flags.extend(["-exclude", exclude])
extensions = self._configuration.extensions
for extension in extensions:
flags.extend(["-extension", extension])
return flags
def _run(self, retries: int = 1) -> None:
self._analysis_directory.prepare()
result = self._call_client(command="check")
errors = self._get_errors(result)
self._print(errors)
if errors:
self._exit_code = ExitCode.FOUND_ERRORS
| [
"[email protected]"
] | |
f2a579a60c4cd35a2c5881d5eaa956df42173082 | e11f1025ae854b2fee71ad2f3d5f7cdbd48b6c72 | /mygame01.py | 59b6e530b9fe14e220e2fbabb6bcd8cab0d0f0d2 | [] | no_license | mikesaccount/mycode | 6f924383bfa473ef99ff4c38070ff06de8a73c99 | af078d98895a70170769fa7a195b966810405145 | refs/heads/master | 2020-05-04T00:48:37.334269 | 2019-04-04T16:45:13 | 2019-04-04T16:45:13 | 178,891,932 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,199 | py | #!/usr/bin/env python3
# Replace RPG starter project with this code when new instructions are live
def showInstructions():
#print a main menu and the commands
print('''
RPG Game
========
Commands:
go [direction]
get [item]
''')
def showStatus():
#print the player's current status
print('---------------------------')
print('You are in the ' + currentRoom)
#print the current inventory
print('Inventory : ' + str(inventory))
#print an item if there is one
if "item" in rooms[currentRoom]:
print('You see a ' + rooms[currentRoom]['item'])
print("---------------------------")
#an inventory, which is initially empty
inventory = []
#a dictionary linking a room to other rooms
rooms = {
'Hall' : {
'south' : 'Kitchen'
},
'Kitchen' : {
'north' : 'Hall'
}
}
#start the player in the Hall
currentRoom = 'Hall'
showInstructions()
#loop forever
while True:
showStatus()
#get the player's next 'move'
#.split() breaks it up into an list array
#eg typing 'go east' would give the list:
#['go','east']
move = ''
while move == '':
move = input('>')
move = move.lower().split()
#if they type 'go' first
if move[0] == 'go':
#check that they are allowed wherever they want to go
if move[1] in rooms[currentRoom]:
#set the current room to the new room
currentRoom = rooms[currentRoom][move[1]]
#there is no door (link) to the new room
else:
print('You can\'t go that way!')
#if they type 'get' first
if move[0] == 'get' :
#if the room contains an item, and the item is the one they want to get
if "item" in rooms[currentRoom] and move[1] in rooms[currentRoom]['item']:
#add the item to their inventory
inventory += [move[1]]
#display a helpful message
print(move[1] + ' got!')
#delete the item from the room
del rooms[currentRoom]['item']
#otherwise, if the item isn't there to get
else:
#tell them they can't get it
print('Can\'t get ' + move[1] + '!')
| [
"[email protected]"
] | |
d9c34f82754e052494324bd101103e86d0390101 | 1ce0d2e83367ca96137ff21e20c63cd6bd682316 | /Python/SortIPs.py | 3d1155e2c75227fb35e81070f5fe94fda3f4fbe0 | [] | no_license | raystyle/Small-scripts-and-one-liners | d361b791deed5ac3f16df78a0619a6c044f683d3 | 9562f66c9f223550743138a8f7a0e9decd3b8386 | refs/heads/master | 2020-06-15T17:04:29.849060 | 2017-08-22T00:13:50 | 2017-08-22T00:13:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,431 | py | #!/usr/bin/python
import argparse
import os
import re
# Grab our argument values with ArgParse
parser = argparse.ArgumentParser(description='Sort IPs in file and kill dupes')
parser.add_argument('-f', '--file', help='The file containing unsorted IP addresses', action='store')
args = parser.parse_args()
inputFile = args.file
# Define regex for an IP address (this will match IPv4 addresses,
# but will also match things like 111.333.555.999)
r = re.compile('\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}')
# Declare uniqueIpList as an empty dict
uniqueIpList = []
# Get the IPs out of the file and kill dupes
with open(inputFile) as file:
ipList = file.read().split('\n')
for line in ipList:
if r.search(line): # Ignore anything that isn't an IP
if line in uniqueIpList: # Ignore duplicate matches
pass
else:
uniqueIpList.append(line)
#Pre-process each item, changing '192.168.1.22' into '192.168. 1. 22'
for i in range(len(uniqueIpList)):
uniqueIpList[i] = '%3s.%3s.%3s.%3s' % tuple(uniqueIpList[i].split('.'))
#Sort the pre-processed list of IP addresses
uniqueIpList.sort()
#Turn the IP addresses back to 'normal' and output to our results file
for i in range(len(uniqueIpList)):
uniqueIpList[i] = uniqueIpList[i].replace(' ','')
with open('results.txt', 'a+') as output_file:
output_file.write(uniqueIpList[i] + '\n')
print 'Done'
| [
"[email protected]"
] | |
3dcafcb8ddbfcf101cf8d23f6cd6121a86d227a3 | e09bbc5a50010270db56af7bbfd9e821df25ffc4 | /segmentation/prediction/predict.py | 3655da62ab80ccf5830f8336de01e3f762f69843 | [] | no_license | treyamador/machine-learning | 5c9318172dc49fde57b29e6794a4d9e8a4327865 | 3b5ac49dace202875344493840d072daf772f31b | refs/heads/master | 2020-03-08T04:33:22.888344 | 2018-05-28T17:29:25 | 2018-05-28T17:29:25 | 127,925,210 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,244 | py | # prediction script
from keras.models import load_model
from keras import backend as K
from skimage import transform
from skimage import io
from PIL import Image
import numpy as np
import cv2
MODEL_NAME = 'model.nodropcont.unet.35'
def dice_coef(y_true, y_pred):
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
intersection = K.sum(y_true_f * y_pred_f)
return (2. * intersection + 1.0) / (K.sum(y_true_f) + K.sum(y_pred_f) + 1.0)
def dice_coef_loss(y_true, y_pred):
return -dice_coef(y_true, y_pred)
def run_predictions():
model = load_model(MODEL_NAME+'.hdf5',
custom_objects={'dice_coef': dice_coef,
'dice_coef_loss': dice_coef_loss})
for i in range(1, 928):
filepath = '../data/test_resize/test_img_'+str(i)+'.jpg'
x = np.array([io.imread(filepath)])
prediction = model.predict(x)
prediction = transform.resize(prediction[0], (250, 250, 1)) * 255
prediction[prediction >= 127] = 255
prediction[prediction < 127] = 0
cv2.imwrite('test_masks/test_mask_'+str(i)+'.jpg', prediction)
print('creating mask', i)
def rle_encoding(x):
dots = np.where(x.T.flatten() == 1)[0] # .T sets Fortran order down-then-right
run_lengths = []
prev = -2
for b in dots:
if b > prev + 1:
run_lengths.extend((b + 1, 0))
run_lengths[-1] += 1
prev = b
return run_lengths
def gen_encodings():
masks = ['test_masks/test_mask_'+str(x)+'.jpg' for x in range(1, 928)]
encodings = []
for i, m in enumerate(masks):
img = Image.open(m)
x = np.array(img.getdata(), dtype=np.uint8).reshape(img.size[::-1])
x = x // 255
encodings.append(rle_encoding(x))
print('creating encoding', i+1)
conv = lambda l: ' '.join(map(str, l)) # list -> string
with open('encodings.'+MODEL_NAME+'.csv', 'wt') as file_writer:
file_writer.write('ImageId,EncodedPixels\n')
for i, encoding in enumerate(encodings):
entry = conv(encoding)
file_writer.write('test_mask_'+str(i+1)+','+entry+'\n')
if __name__ == '__main__':
run_predictions()
gen_encodings()
| [
"[email protected]"
] | |
4be6061f5023ac300b2a7e8fcfe8c0db52c94e8a | daedd651442329e46d492e9ddfc040c57c912077 | /HW5.0/03-evaluate.py | 032d74f591807f754144401cabbe341417257f41 | [] | no_license | JCgithubaccount/590-jc3155 | 4a8b286006b343f2270821b6bcf1c9ac14bb5b8f | 40da69c770142f57f26ec3c2f496b801577f613f | refs/heads/main | 2023-08-30T17:49:01.757767 | 2021-11-16T19:42:43 | 2021-11-16T19:42:43 | 403,991,647 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,497 | py | import numpy as np
import tensorflow as tf
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import classification_report
data = np.load('02vector.npz')
features = data['features']
target = data['target']
cnn_model = tf.keras.models.load_model('cnn_model.h5', compile=False)
lstm_model = tf.keras.models.load_model('lstm_model.h5', compile=False)
X_train, X_test, y_train, y_test = train_test_split(features, target, test_size=0.2, random_state=0)
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
train_pred = cnn_model.predict(X_train[:, :, np.newaxis])
train_pred = np.argmax(train_pred, -1)
print("===================CNN Train metrics===================")
print(classification_report(y_train, train_pred))
test_pred = cnn_model.predict(X_test[:, :, np.newaxis])
test_pred = np.argmax(test_pred, -1)
print("===================CNN Test metrics===================")
print(classification_report(y_test, test_pred))
train_pred = lstm_model.predict(X_train[:, np.newaxis, :])
train_pred = np.argmax(train_pred, -1)
print("==================LSTM Train metrics===================")
print(classification_report(y_train, train_pred))
test_pred = lstm_model.predict(X_test[:, np.newaxis, :])
test_pred = np.argmax(test_pred, -1)
print("==================LSTM Test metrics===================")
print(classification_report(y_test, test_pred))
| [
"[email protected]"
] | |
7dfa8742af0323539e84ac872f221265e1bdca25 | 9976538606416c483f9df88e72369b3ad43d0337 | /ansible/load_ips.py | f7d0961a005eb1ac38ab921e9433579162f268fc | [] | no_license | maxburkhardt/midnight-net | 7a7ca9c92a45181439b7274f0e606efe12af7205 | e95ccf8445250e71a65bf449a525b0e35c6e6f07 | refs/heads/master | 2021-07-07T16:10:39.119901 | 2020-06-22T00:25:10 | 2020-06-22T00:25:10 | 89,821,840 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,253 | py | #!/usr/bin/env python
import json
import uuid
import subprocess
from jinja2 import Template
"""
A script to load the IP addresses of generated midnight net hosts from the
terraform state file and automatically create relevant ansible infrastructure,
such as inventory.ini.
"""
def get_from_terraform():
state_data = subprocess.check_output(
["terraform", "show", "-json", "terraform.tfstate"],
cwd="../terraform"
)
state_parsed = json.loads(state_data)
ips = {
"hub_core": state_parsed["values"]["outputs"]["midnight-hub-core-ip"]["value"],
"hub_west": state_parsed["values"]["outputs"]["midnight-hub-west-ip"]["value"],
"hub_south": state_parsed["values"]["outputs"]["midnight-hub-south-ip"]["value"]
}
return ips
def write_ini_file(ips):
fh = open("inventory.ini", "w")
for hubname, ip in ips.items():
fh.write(f"[{hubname}]\n{ip} ansible_ssh_private_key_file=../credentials/id_rsa\n\n")
fh.close()
def write_hint_file(ips):
template_source_fh = open("resources/midnight_notes_template.txt", "r")
template_source = template_source_fh.read()
template_source_fh.close()
fh = open("resources/midnight_notes.txt", "w")
fh.write(Template(template_source).render(hub_south_ip = ips["hub_south"]))
fh.close()
def write_known_hosts(ips):
fh = open("resources/south_known_hosts", "w")
fh.write(
f"{ips['hub_core']} ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdH" +
f"AyNTYAAABBBOh52KovN3+i+TqWQuH8yx4/gxHuW+wo3aAztEI+4jPyUqVTxesQPk7GD/X/gbO3CAsReq" +
f"B3Ms/slZVDbcAztY4=\n"
)
fh.close()
def write_game_key(key):
fh = open("resources/game_key", "w")
fh.write(f"Congratulations! Tell the Game Master that you've found the key: {key}\n")
fh.close()
if __name__ == "__main__":
print("Loading IPs from terraform state...")
ips = get_from_terraform()
print("Writing inventory.ini...")
write_ini_file(ips)
print("Writing midnight_notes.txt...")
write_hint_file(ips)
print("Writing south_known_hosts...")
write_known_hosts(ips)
game_key = uuid.uuid4()
print(f"The game key for this instance will be {game_key}")
write_game_key(game_key)
| [
"[email protected]"
] | |
8bf2ee94e3e751581ea7caa05a37b30b802ff49a | 0f89b48353f0a9e1339faca745b94bf2e2a80ead | /clusterAnalysis/Kmeans/k-means4.py | a1e265c5f45f0b6c1d10e14591ff642ad9d48d0e | [] | no_license | Limlin-Qs/MachineLearningAlgorithm | efbb5a68076159349281131db7f5f709deea78b6 | c5237dc4467de72974e94bf195e55d313c50eee0 | refs/heads/master | 2023-08-18T19:39:51.878228 | 2021-10-14T10:46:55 | 2021-10-14T10:46:55 | 363,423,200 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,503 | py | # -*- coding:utf-8 -*-
# 会根据给出的数据分析,并生成聚类,输出图像到文件
import math
import pylab as pl
import codecs
import re
import datetime
pl.rcParams['axes.unicode_minus'] = False
# 计算欧式距离,a,b代表两个元组
def calcudistance(a, b):
return math.sqrt(math.pow(a[0] - b[0], 2) + math.pow(a[1] - b[1], 2))
# 求出最小距离
def dist_min(Ci, Cj):
return min(calcudistance(i, j) for i in Ci for j in Cj)
# 求出最大距离
def dist_max(Ci, Cj):
return max(calcudistance(i, j) for i in Ci for j in Cj)
# 求出平均距离
def dist_avg(Ci, Cj):
return sum(calcudistance(i, j) for i in Ci for j in Cj) / (len(Ci) * len(Cj))
# 找到距离最小的下标
def find_min(M):
min = 1000
x = 0;
y = 0
for i in range(len(M)):
for j in range(len(M[i])):
if i != j and M[i][j] < min:
min = M[i][j];
x = i;
y = j
return (x, y, min)
# 算法核心
def AGNES(dataset, dist, k):
# 初始化C和M
C = [];
M = []
for i in dataset:
Ci = []
Ci.append(i)
C.append(Ci)
for i in C:
Mi = []
for j in C:
Mi.append(dist(i, j))
M.append(Mi)
q = len(dataset)
# 合并更新
while q > k:
x, y, min = find_min(M)
C[x].extend(C[y])
C.remove(C[y])
M = []
for i in C:
Mi = []
for j in C:
Mi.append(dist(i, j))
M.append(Mi)
q -= 1
return C
# 画出结果图
def drawfig(C):
colValue = ['r', 'y', 'g', 'b', 'c', 'k', 'm'] # 颜色数组
for i in range(len(C)):
coo_X = [] # x坐标
coo_Y = [] # y坐标
for j in range(len(C[i])):
coo_X.append(C[i][j][0])
coo_Y.append(C[i][j][1])
pl.scatter(coo_X, coo_Y, marker='o', color=colValue[i % len(colValue)], label=i)
pl.legend(loc='upper right')
pl.title("聚类结果图")
pl.savefig(savepath + '2.png')
pl.show()
def draworigian(dataset):
x_list = list()
y_list = list()
for i in range(len(dataSet)):
temp = dataSet[i]
x_list.append(temp[0])
y_list.append(temp[1])
pl.scatter(x_list, y_list, marker='o', color="b")
pl.legend(loc='upper right')
pl.title("数据原始分布")
pl.savefig(savepath + '1.png')
pl.show()
def loadtxt(Filepath):
# 读取文本 保存为二维点集
inDate = codecs.open(Filepath, 'r', 'utf-8').readlines()
dataSet = list()
for line in inDate: # 段落的处理
line = line.strip()
strList = re.split('[ ]+', line)
numList = list()
for item in strList:
num = float(item)
numList.append(num)
# print numList
dataSet.append(numList)
return dataSet # dataSet = [[], [], [], ...]
savepath = 'D:/研2/模式识别/'
Filepath = "D:/研2/模式识别/testSet2.txt" # 数据集文件
dataSet = loadtxt(Filepath) # 载入数据集
draworigian(dataSet)
start = datetime.datetime.now()
result = AGNES(dataSet, dist_avg, 4)
end = datetime.datetime.now()
timeused = end - start
print(timeused)
drawfig(result)
# 100 1.203133, 01.140652, 1.156260, 1.203152, 1.453138
# 200点 9.359476, 09.367193, 09.312600, 09.325362, 09.356845
# 500点 147.946446, 147:351248, 147.153595,147.946446, 145.493638
# 500 无需 145.429797 146.016936 147.240645 146.563253 147.534587
| [
"[email protected]"
] | |
48b90312527aaca5a3d117dcad0c546c2006051c | f0792fc58c0d2c261428464971b1379a42b17cc1 | /group/g32_뱀_p327.py | 7126f264a418af888e49bedbe14646f5cf5d1986 | [] | no_license | Tao-Kim/study_algo | d3e6ae70995b87f92286fe0243bbf3b0e2426e2e | aacf5de901b38eb53b21147e867eaa7c860fc113 | refs/heads/master | 2022-12-21T13:26:35.519167 | 2020-09-28T12:53:38 | 2020-09-28T12:53:38 | 288,083,720 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,106 | py | from collections import deque
n = int(input())
board = [[0] * n for _ in range(n)]
board[0][0] = 1
moveQueue = deque()
moveQueue.append((0,0))
k = int(input())
for _ in range(k):
row, col = map(int, input().split())
board[row-1][col-1] = 2
l = int(input())
turnQueue = deque()
for _ in range(l):
x, c = input().split()
turnQueue.append((int(x), c))
turnQueue.append((0, 'Dummy'))
curTime = 0
x, c = turnQueue.popleft()
direction = 0 # 0 - 우 1 - 상 2- 좌 3 - 하
move = [(0, 1), (-1, 0), (0, -1), (1, 0)]
curRow = 0
curCol = 0
def turnLeft():
global direction
if direction == 3:
direction = 0
else:
direction += 1
def turnRight():
global direction
if direction == 0:
direction = 3
else:
direction -= 1
while True:
if x == curTime:
if c == 'L':
turnLeft()
elif c == 'D':
turnRight()
x, c = turnQueue.popleft()
curTime += 1
nextRow = curRow + move[direction][0]
nextCol = curCol + move[direction][1]
if nextRow < 0 or nextRow >= n or nextCol < 0 or nextCol >= n or board[nextRow][nextCol] == 1:
print(curTime)
break
elif board[nextRow][nextCol] == 0:
lastRow, lastCol = moveQueue.popleft()
board[lastRow][lastCol] = 0
curRow = nextRow
curCol = nextCol
board[nextRow][nextCol] = 1
moveQueue.append((nextRow, nextCol))
"""
문제 : p327_뱀
시간 : 26분
접근 :
해설과 풀이 유사
-해설과 달리 전부 전역으로 선언하고 풀이하여 지저분한듯
다른 사람 풀이 :
========================================================================================
방향전환 나누기
def turn(direction, c):
if c == "L":
direction = (direction - 1) % 4
else:
direction = (direction + 1) % 4
return direction
함수로 정리한 형태
n = int(input())
k = int(input())
data = [[0] * (n + 1) for _ in range(n + 1)] # 맵 정보
info = [] # 방향 회전 정보
# 맵 정보(사과 있는 곳은 1로 표시)
for _ in range(k):
a, b = map(int, input().split())
data[a][b] = 1
# 방향 회전 정보 입력
l = int(input())
for _ in range(l):
x, c = input().split()
info.append((int(x), c))
# 처음에는 오른쪽을 보고 있으므로(동, 남, 서, 북)
dx = [0, 1, 0, -1]
dy = [1, 0, -1, 0]
def turn(direction, c):
if c == "L":
direction = (direction - 1) % 4
else:
direction = (direction + 1) % 4
return direction
def simulate():
x, y = 1, 1 # 뱀의 머리 위치
data[x][y] = 2 # 뱀이 존재하는 위치는 2로 표시
direction = 0 # 처음에는 동쪽을 보고 있음
time = 0 # 시작한 뒤에 지난 '초' 시간
index = 0 # 다음에 회전할 정보
q = [(x, y)] # 뱀이 차지하고 있는 위치 정보(꼬리가 앞쪽)
while True:
nx = x + dx[direction]
ny = y + dy[direction]
# 맵 범위 안에 있고, 뱀의 몸통이 없는 위치라면
if 1 <= nx and nx <= n and 1 <= ny and ny <= n and data[nx][ny] != 2:
# 사과가 없다면 이동 후에 꼬리 제거
if data[nx][ny] == 0:
data[nx][ny] = 2
q.append((nx, ny))
px, py = q.pop(0)
data[px][py] = 0
# 사과가 있다면 이동 후에 꼬리 그대로 두기
if data[nx][ny] == 1:
data[nx][ny] = 2
q.append((nx, ny))
# 벽이나 뱀의 몸통과 부딪혔다면
else:
time += 1
break
x, y = nx, ny # 다음 위치로 머리를 이동
time += 1
if index < l and time == info[index][0]: # 회전할 시간인 경우 회전
direction = turn(direction, info[index][1])
index += 1
return time
print(simulate())
========================================================================================
import sys
n = int(sys.stdin.readline())
apple = set([tuple(map(int, sys.stdin.readline().split())) for i in range(int(sys.stdin.readline()))])
cmd = [list(sys.stdin.readline().split()) for i in range(int(sys.stdin.readline()))]
snake = [[-2]*n for _ in range(n)]
snake[0][0] = 0
x,y,d = 0,0,0
l = 1
t = 0
idx = 0
dx = [0,1,0,-1]
dy = [1,0,-1,0]
# print((2,5) in apple)
while True:
t += 1
l += 1
x += dx[d]
y += dy[d]
if x < 0 or x >= n or y < 0 or y >= n:
break
if snake[x][y] >= t-l+1:
break
if (x+1,y+1) in apple:
apple.remove((x+1,y+1))
else:
l -= 1
snake[x][y]=t
if idx < len(cmd) and int(cmd[idx][0]) == t:
if cmd[idx][1] == 'D':
d = (d+1) % 4
else:
d = (d+3) % 4
idx += 1
print(t)
========================================================================================
노트 :
- [0, 1, 2, 3] 순환하는 꼴 x = (x + 1) % 4 고려하기
- 로직이 복잡해질거 같으면 함수로 나누기
- g32 다시풀기 (함수로 나누어 풀기)
- g32 풀이2번 다시보기
"""
| [
"[email protected]"
] | |
f225e76098fd88d5fb8e7bec28cb199093663486 | eca8c05a405d13cc2402464be18ca49e8175f5f8 | /project/main.py | de304a098689c9d5297e53d02a1b9ee6c0515931 | [] | no_license | o-oJames/MapBox_James | c8d716c54ec1343743deafa888a2b2699e63d631 | 2fc55c6f1f2a1d63ccc089e3c4de81eb63f43fbb | refs/heads/master | 2023-02-05T01:31:09.803302 | 2019-12-10T03:12:09 | 2019-12-10T03:12:09 | 226,804,727 | 0 | 0 | null | 2023-02-02T05:12:17 | 2019-12-09T06:53:03 | HTML | UTF-8 | Python | false | false | 419 | py | # main.py
import os
from flask import Blueprint, render_template, request, json
from flask_login import login_required, current_user
from pyproj import Transformer
main = Blueprint('main', __name__)
@main.route('/')
def index():
return render_template('index.html')
@main.route('/profile')
@login_required
def profile():
return render_template('profile.html', name=current_user.name)
| [
"[email protected]"
] | |
4940ca8db7984e58d9977e6e369641cf36cadd49 | 4968a692977a725c69cf1f0b75883c862d71e4a3 | /CtCI/recursion-dynamic-programming/davis-staircase.py | ebe4b0bab0288fcf6147b9be417f4e5a57dd44b5 | [] | no_license | OscarOzaine/Hackerraank | e4edcda7021ca47e079b552a0cb9c837c5e140ae | eabafecd312bc01e00a1f9779184e2ea6382045b | refs/heads/master | 2021-01-09T09:37:47.560605 | 2018-08-03T03:42:19 | 2018-08-03T03:42:19 | 81,183,806 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 501 | py | ## Recursion: Davis' Staircase
## https://www.hackerrank.com/challenges/ctci-recursive-staircase
def getSteps(n, cache=dict()):
if n == 0:
return 1
if n < 0:
return 0
total = 0
if cache.get(n):
return cache[n]
for hop in [1, 2, 3]:
total += getSteps(n-hop, cache)
cache[n] = total
return cache[n]
s = int(raw_input().strip())
for a0 in xrange(s):
n = int(raw_input().strip())
print getSteps(n)
| [
"[email protected]"
] | |
9f6c426d5a8caf837408e0c5f02c5fb25805e144 | 49b5e4b5f19f539866dfe2ac64292e255625af86 | /python_lessons_luts/Streams/reader.py | 5cccaefbdd66158536af0a3ed640bec04850deef | [] | no_license | nikitaKravchenko/Funny_scripts | 6ffd74a69b7d4e1a20a0f20919ecb587ca9a8fe6 | 486cbb6188f2793248e8e99a23f2dee7814c3c50 | refs/heads/master | 2021-01-20T03:21:57.066009 | 2019-12-03T14:16:15 | 2019-12-03T14:16:15 | 89,528,471 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129 | py | #print('Got this: "%s"' % input())
import sys
data = sys.stdin.readline()
print('The meaning of the life is', data, int(data)*2)
| [
"[email protected]"
] | |
a024f955b0216ad6ae22356a5a4212bbed1014f3 | e8bf2e6550113ffcdaab92a5e8311477221d69e5 | /music_player.py | 4ab3a232d30c324822ce4453153617275be1df15 | [] | no_license | PrateekJain999/Music-Player | 8b8fc3f96a6edc638c8d282659466386d0d558d1 | 4b6165141120650eb03f3735fb96e2727f3f82bb | refs/heads/main | 2023-02-24T13:45:20.313424 | 2021-02-04T11:41:33 | 2021-02-04T11:41:33 | 331,238,413 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,026 | py | from tkinter import *
from tkinter import ttk
import os
import tkinter.messagebox
from tkinter import filedialog
from pygame import mixer
import time
import threading
from mutagen.mp3 import MP3
root=Tk()
#=======================MENU BAR==============================================
menubar=Menu(root)
root.config(menu=menubar)
mixer.init()
root.title('Music Player')
root.iconbitmap('icon.ico')
statusbar=ttk.Label(root,text='Music Player',relief=SUNKEN,anchor=W,font='Times 10 italic')
statusbar.pack(side=BOTTOM,fill=X)
#===========================FRAME==========================
leftframe=Frame(root)
leftframe.pack(side=LEFT,padx=30)
#========================LISTBOX============================
PlayListBox=Listbox(leftframe)
PlayListBox.pack()
#===========================FRAME=========================
rightframe=Frame(root)
rightframe.pack()
topframe=Frame(rightframe)
topframe.pack()
middleframe=Frame(rightframe)
middleframe.pack(padx=30,pady=30)
bottomframe=Frame(rightframe)
bottomframe.pack()
#=========================LABEL=============================
lengthlabel = ttk.Label(topframe,text='TOTAL LENGTH : --:--',font='Times 10 bold')
lengthlabel.pack(pady=5)
currenttimelabel = ttk.Label(topframe,text="CURRENT TIME : --:--",font='Times 10 bold',relief=GROOVE)
currenttimelabel.pack()
#======================FUNCTION====================================
playlist=[] #contains the full path and filename
def Browse():
global filename_path
filename_path=filedialog.askopenfilename()
add_to_playlist(filename_path)
def add_to_playlist(filename):
filename=os.path.basename(filename)
index=0
PlayListBox.insert(index,filename)
playlist.insert(index,filename_path)
PlayListBox.pack()
index+=1
def About_us():
tkinter.messagebox.showinfo('About Music Player',' HLW FRIENDS CHAI PEE LO')
def show_details(play_song):
file_data=os.path.splitext(play_song)
if file_data[1]=='.mp3':
audio=MP3(play_song)
total_length=audio.info.length
else:
a=mixer.Sound(play_song)
total_length=a.get_length()
mins,secs= divmod(total_length,60)
mins=round(mins)
secs=round(secs)
timeformat='{:02d}:{:02d}'.format(mins,secs)
lengthlabel['text']="Total Length" + ' - ' + timeformat
t1=threading.Thread(target=start_count,args=(total_length,))
t1.start()
def start_count(t):
global paused
current_time=0
while current_time<=t and mixer.music.get_busy():
if paused:
continue
else:
mins,secs= divmod(current_time,60)
mins=round(mins)
secs=round(secs)
timeformat='{:02d}:{:02d}'.format(mins,secs)
currenttimelabel['text']="Current Time" + ' - ' +timeformat
time.sleep(1)
current_time+=1
def play_music():
global paused
if paused:
mixer.music.unpause()
statusbar['text']="Music Resumed"
paused =FALSE
else:
try:
stop_music()
time.sleep(1)
selected_song=PlayListBox.curselection()
selected_song=int(selected_song[0])
play_it=playlist[selected_song]
mixer.music.load(play_it)
mixer.music.play()
statusbar['text']="Playing Music"+' '+os.path.basename(play_it)
show_details(play_it)
except:
tkinter.messagebox.showerror('File not found','not found')
def stop_music():
mixer.music.stop()
statusbar['text']='Stop Music'
paused =FALSE
def pause_music():
global paused
paused=TRUE
mixer.music.pause()
statusbar['text']='Music Pause'
def rewind_music():
play_music()
statusbar['text']='Music rewinded'
def on_closing():
stop_music()
root.destroy()
def set_vol(val):
volume=float(val)/100
mixer.music.set_volume(volume)
muted=FALSE
def mute_music():
global muted
if muted:
mixer.music.set_volume(0.5)
volumeBtn.configure(image=volumePhoto)
scale.set(50)
muted=FALSE
else:
mixer.music.set_volume(0)
volumeBtn.configure(image=mutePhoto)
scale.set(0)
muted=TRUE
def del_song():
selected_song=PlayListBox.curselection()
selected_song=int(selected_song[0])
PlayListBox.delete(selected_song)
playlist.pop(selected_song)
print(playlist)
#===================CREATE SUBMENU===========================
submenu=Menu(menubar,tearoff=0)
menubar.add_cascade(label='File',menu=submenu)
submenu.add_command(label='Open',command=Browse)
submenu.add_command(label='Exit',command=root.destroy)
submenu=Menu(menubar,tearoff=0)
menubar.add_cascade(label='Help',menu=submenu)
submenu.add_command(label='About us',command=About_us)
#===============BUTTON=========================================
addBtn=ttk.Button(leftframe,text="+ Add",command=Browse)
addBtn.pack(side=LEFT)
delBtn=ttk.Button(leftframe,text="- Del",command=del_song)
delBtn.pack(side=LEFT)
playPhoto = PhotoImage(file='play.png')
playBtn= ttk.Button(middleframe,image=playPhoto,command=play_music)
playBtn.grid(row=0,column=0,padx=10)
stopPhoto = PhotoImage(file='stop.png')
stopBtn= ttk.Button(middleframe,image=stopPhoto,command=stop_music)
stopBtn.grid(row=0,column=1,padx=10)
pausePhoto = PhotoImage(file='pause.png')
pauseBtn= ttk.Button(middleframe,image=pausePhoto,command=pause_music)
pauseBtn.grid(row=0,column=2,padx=10)
rewindPhoto = PhotoImage(file='rewind.png')
rewindBtn= Button(bottomframe,image=rewindPhoto,command=rewind_music)
rewindBtn.grid(row=0,column=0)
mutePhoto = PhotoImage(file='mute.png')
volumePhoto = PhotoImage(file='volume.png')
volumeBtn= ttk.Button(bottomframe,image=volumePhoto,command=mute_music)
volumeBtn.grid(row=0,column=1)
scale= ttk.Scale(bottomframe,from_=0,to=100,orient=HORIZONTAL,command=set_vol)
scale.set(50)
mixer.music.set_volume(0.5)
scale.grid(row=0,column=2,pady=15,padx=30)
root.protocol("WM_DELETE_WINDOW",on_closing)
root.mainloop()
| [
"[email protected]"
] | |
7331026b83d8e6f850f8c56d9a5b1831c2cf19a3 | 656333723126feda937bff37cbae7d675a77c186 | /euler/2_EvenFibonacciNumbers.py | c1532a8f573a97167d2154fc2cc41fdac130fe93 | [] | no_license | luyaosuperman/python3-playground | 424ead9f69ed0d208f019d7ad7b838f74191205b | 3ac89c15bb03c8282376f354705d2c437b41775e | refs/heads/master | 2023-07-23T07:30:19.038120 | 2023-07-17T07:59:59 | 2023-07-17T07:59:59 | 88,848,778 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 495 | py | #!/usr/bin/python3
import unittest
class Solution(unittest.TestCase):
def sol(self):
self.result = 0
self.count = 0
self.a = 1
self.b = 2
self.result = 0
while self.a < 4000000:
if (self.count - 1) % 3 == 0:
print(self.a)
self.result += self.a
self.feb()
print(self.result)
def feb(self):
self.b, self.a = self.a + self.b, self.b
self.count += 1
def test_1(self):
#self.assertEqual()
self.sol()
if __name__ == '__main__':
unittest.main() | [
"[email protected]"
] | |
67b3c669ef83f6826df7fe37df6cce574c437a79 | d646608860b6d384200f2bcc3dc30f0fd0403c36 | /app/models/reviews.py | 15c7064f076feaa5a5b5dec4365164fb50a06084 | [] | no_license | karobia001/Watchlist | 7503c5c1e6b5cb14b2bfc2e6aeadcb32e607f61c | bed75eefc00147febf921d7d778399cff7aa398e | refs/heads/master | 2020-08-08T15:59:08.670249 | 2019-10-11T15:35:31 | 2019-10-11T15:35:31 | 213,864,325 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 576 | py | class Review:
all_reviews = []
def __init__(self,movie_id,title,imageurl,review):
self.movie_id = movie_id
self.title = title
self.imageurl = imageurl
self.review = review
@classmethod
def get_reviews(cls,id):
response = []
for review in cls.all_reviews:
if review.movie_id == id:
response.append(review)
return response
def save_review(self):
Review.all_reviews.append(self)
@classmethod
def clear_reviews(cls):
Review.all_reviews.clear() | [
"[email protected]"
] | |
fe1a7825059ee36d7669e58884816a5ed5472ab3 | 0a66f88db3e902465657dba290481d4d87161de2 | /code/OrbitalDebris/venv/bin/runxlrd.py | 3b86586b761b0841ba34e6eef8dda29a71dc4ff9 | [] | no_license | ReeceHumphreys/OrbitalDebrisThesis | c26440f60ab2e33027912a9e0dffdc5f93919851 | 3c703a01ea69679e3ee82123e0b68b6b89766777 | refs/heads/master | 2023-04-16T07:30:07.396842 | 2021-04-23T01:53:34 | 2021-04-23T01:53:34 | 298,690,476 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,330 | py | #!/Users/reecehumphreys/Developer/Thesis/Thesis/venv/bin/python3
# Copyright (c) 2005-2012 Stephen John Machin, Lingfo Pty Ltd
# This script is part of the xlrd package, which is released under a
# BSD-style licence.
from __future__ import print_function
cmd_doc = """
Commands:
2rows Print the contents of first and last row in each sheet
3rows Print the contents of first, second and last row in each sheet
bench Same as "show", but doesn't print -- for profiling
biff_count[1] Print a count of each type of BIFF record in the file
biff_dump[1] Print a dump (char and hex) of the BIFF records in the file
fonts hdr + print a dump of all font objects
hdr Mini-overview of file (no per-sheet information)
hotshot Do a hotshot profile run e.g. ... -f1 hotshot bench bigfile*.xls
labels Dump of sheet.col_label_ranges and ...row... for each sheet
name_dump Dump of each object in book.name_obj_list
names Print brief information for each NAME record
ov Overview of file
profile Like "hotshot", but uses cProfile
show Print the contents of all rows in each sheet
version[0] Print versions of xlrd and Python and exit
xfc Print "XF counts" and cell-type counts -- see code for details
[0] means no file arg
[1] means only one file arg i.e. no glob.glob pattern
"""
options = None
if __name__ == "__main__":
PSYCO = 0
import xlrd
import sys
import time
import glob
import traceback
import gc
from xlrd.timemachine import xrange, REPR
class LogHandler(object):
def __init__(self, logfileobj):
self.logfileobj = logfileobj
self.fileheading = None
self.shown = 0
def setfileheading(self, fileheading):
self.fileheading = fileheading
self.shown = 0
def write(self, text):
if self.fileheading and not self.shown:
self.logfileobj.write(self.fileheading)
self.shown = 1
self.logfileobj.write(text)
null_cell = xlrd.empty_cell
def show_row(bk, sh, rowx, colrange, printit):
if bk.ragged_rows:
colrange = range(sh.row_len(rowx))
if not colrange: return
if printit: print()
if bk.formatting_info:
for colx, ty, val, cxfx in get_row_data(bk, sh, rowx, colrange):
if printit:
print("cell %s%d: type=%d, data: %r, xfx: %s"
% (xlrd.colname(colx), rowx+1, ty, val, cxfx))
else:
for colx, ty, val, _unused in get_row_data(bk, sh, rowx, colrange):
if printit:
print("cell %s%d: type=%d, data: %r" % (xlrd.colname(colx), rowx+1, ty, val))
def get_row_data(bk, sh, rowx, colrange):
result = []
dmode = bk.datemode
ctys = sh.row_types(rowx)
cvals = sh.row_values(rowx)
for colx in colrange:
cty = ctys[colx]
cval = cvals[colx]
if bk.formatting_info:
cxfx = str(sh.cell_xf_index(rowx, colx))
else:
cxfx = ''
if cty == xlrd.XL_CELL_DATE:
try:
showval = xlrd.xldate_as_tuple(cval, dmode)
except xlrd.XLDateError as e:
showval = "%s:%s" % (type(e).__name__, e)
cty = xlrd.XL_CELL_ERROR
elif cty == xlrd.XL_CELL_ERROR:
showval = xlrd.error_text_from_code.get(cval, '<Unknown error code 0x%02x>' % cval)
else:
showval = cval
result.append((colx, cty, showval, cxfx))
return result
def bk_header(bk):
print()
print("BIFF version: %s; datemode: %s"
% (xlrd.biff_text_from_num[bk.biff_version], bk.datemode))
print("codepage: %r (encoding: %s); countries: %r"
% (bk.codepage, bk.encoding, bk.countries))
print("Last saved by: %r" % bk.user_name)
print("Number of data sheets: %d" % bk.nsheets)
print("Use mmap: %d; Formatting: %d; On demand: %d"
% (bk.use_mmap, bk.formatting_info, bk.on_demand))
print("Ragged rows: %d" % bk.ragged_rows)
if bk.formatting_info:
print("FORMATs: %d, FONTs: %d, XFs: %d"
% (len(bk.format_list), len(bk.font_list), len(bk.xf_list)))
if not options.suppress_timing:
print("Load time: %.2f seconds (stage 1) %.2f seconds (stage 2)"
% (bk.load_time_stage_1, bk.load_time_stage_2))
print()
def show_fonts(bk):
print("Fonts:")
for x in xrange(len(bk.font_list)):
font = bk.font_list[x]
font.dump(header='== Index %d ==' % x, indent=4)
def show_names(bk, dump=0):
bk_header(bk)
if bk.biff_version < 50:
print("Names not extracted in this BIFF version")
return
nlist = bk.name_obj_list
print("Name list: %d entries" % len(nlist))
for nobj in nlist:
if dump:
nobj.dump(sys.stdout,
header="\n=== Dump of name_obj_list[%d] ===" % nobj.name_index)
else:
print("[%d]\tName:%r macro:%r scope:%d\n\tresult:%r\n"
% (nobj.name_index, nobj.name, nobj.macro, nobj.scope, nobj.result))
def print_labels(sh, labs, title):
if not labs:return
for rlo, rhi, clo, chi in labs:
print("%s label range %s:%s contains:"
% (title, xlrd.cellname(rlo, clo), xlrd.cellname(rhi-1, chi-1)))
for rx in xrange(rlo, rhi):
for cx in xrange(clo, chi):
print(" %s: %r" % (xlrd.cellname(rx, cx), sh.cell_value(rx, cx)))
def show_labels(bk):
# bk_header(bk)
hdr = 0
for shx in range(bk.nsheets):
sh = bk.sheet_by_index(shx)
clabs = sh.col_label_ranges
rlabs = sh.row_label_ranges
if clabs or rlabs:
if not hdr:
bk_header(bk)
hdr = 1
print("sheet %d: name = %r; nrows = %d; ncols = %d" %
(shx, sh.name, sh.nrows, sh.ncols))
print_labels(sh, clabs, 'Col')
print_labels(sh, rlabs, 'Row')
if bk.on_demand: bk.unload_sheet(shx)
def show(bk, nshow=65535, printit=1):
bk_header(bk)
if 0:
rclist = xlrd.sheet.rc_stats.items()
rclist = sorted(rclist)
print("rc stats")
for k, v in rclist:
print("0x%04x %7d" % (k, v))
if options.onesheet:
try:
shx = int(options.onesheet)
except ValueError:
shx = bk.sheet_by_name(options.onesheet).number
shxrange = [shx]
else:
shxrange = range(bk.nsheets)
# print("shxrange", list(shxrange))
for shx in shxrange:
sh = bk.sheet_by_index(shx)
nrows, ncols = sh.nrows, sh.ncols
colrange = range(ncols)
anshow = min(nshow, nrows)
print("sheet %d: name = %s; nrows = %d; ncols = %d" %
(shx, REPR(sh.name), sh.nrows, sh.ncols))
if nrows and ncols:
# Beat the bounds
for rowx in xrange(nrows):
nc = sh.row_len(rowx)
if nc:
sh.row_types(rowx)[nc-1]
sh.row_values(rowx)[nc-1]
sh.cell(rowx, nc-1)
for rowx in xrange(anshow-1):
if not printit and rowx % 10000 == 1 and rowx > 1:
print("done %d rows" % (rowx-1,))
show_row(bk, sh, rowx, colrange, printit)
if anshow and nrows:
show_row(bk, sh, nrows-1, colrange, printit)
print()
if bk.on_demand: bk.unload_sheet(shx)
def count_xfs(bk):
bk_header(bk)
for shx in range(bk.nsheets):
sh = bk.sheet_by_index(shx)
nrows = sh.nrows
print("sheet %d: name = %r; nrows = %d; ncols = %d" %
(shx, sh.name, sh.nrows, sh.ncols))
# Access all xfindexes to force gathering stats
type_stats = [0, 0, 0, 0, 0, 0, 0]
for rowx in xrange(nrows):
for colx in xrange(sh.row_len(rowx)):
xfx = sh.cell_xf_index(rowx, colx)
assert xfx >= 0
cty = sh.cell_type(rowx, colx)
type_stats[cty] += 1
print("XF stats", sh._xf_index_stats)
print("type stats", type_stats)
print()
if bk.on_demand: bk.unload_sheet(shx)
def main(cmd_args):
import optparse
global options, PSYCO
usage = "\n%prog [options] command [input-file-patterns]\n" + cmd_doc
oparser = optparse.OptionParser(usage)
oparser.add_option(
"-l", "--logfilename",
default="",
help="contains error messages")
oparser.add_option(
"-v", "--verbosity",
type="int", default=0,
help="level of information and diagnostics provided")
oparser.add_option(
"-m", "--mmap",
type="int", default=-1,
help="1: use mmap; 0: don't use mmap; -1: accept heuristic")
oparser.add_option(
"-e", "--encoding",
default="",
help="encoding override")
oparser.add_option(
"-f", "--formatting",
type="int", default=0,
help="0 (default): no fmt info\n"
"1: fmt info (all cells)\n",
)
oparser.add_option(
"-g", "--gc",
type="int", default=0,
help="0: auto gc enabled; 1: auto gc disabled, manual collect after each file; 2: no gc")
oparser.add_option(
"-s", "--onesheet",
default="",
help="restrict output to this sheet (name or index)")
oparser.add_option(
"-u", "--unnumbered",
action="store_true", default=0,
help="omit line numbers or offsets in biff_dump")
oparser.add_option(
"-d", "--on-demand",
action="store_true", default=0,
help="load sheets on demand instead of all at once")
oparser.add_option(
"-t", "--suppress-timing",
action="store_true", default=0,
help="don't print timings (diffs are less messy)")
oparser.add_option(
"-r", "--ragged-rows",
action="store_true", default=0,
help="open_workbook(..., ragged_rows=True)")
options, args = oparser.parse_args(cmd_args)
if len(args) == 1 and args[0] in ("version", ):
pass
elif len(args) < 2:
oparser.error("Expected at least 2 args, found %d" % len(args))
cmd = args[0]
xlrd_version = getattr(xlrd, "__VERSION__", "unknown; before 0.5")
if cmd == 'biff_dump':
xlrd.dump(args[1], unnumbered=options.unnumbered)
sys.exit(0)
if cmd == 'biff_count':
xlrd.count_records(args[1])
sys.exit(0)
if cmd == 'version':
print("xlrd: %s, from %s" % (xlrd_version, xlrd.__file__))
print("Python:", sys.version)
sys.exit(0)
if options.logfilename:
logfile = LogHandler(open(options.logfilename, 'w'))
else:
logfile = sys.stdout
mmap_opt = options.mmap
mmap_arg = xlrd.USE_MMAP
if mmap_opt in (1, 0):
mmap_arg = mmap_opt
elif mmap_opt != -1:
print('Unexpected value (%r) for mmap option -- assuming default' % mmap_opt)
fmt_opt = options.formatting | (cmd in ('xfc', ))
gc_mode = options.gc
if gc_mode:
gc.disable()
for pattern in args[1:]:
for fname in glob.glob(pattern):
print("\n=== File: %s ===" % fname)
if logfile != sys.stdout:
logfile.setfileheading("\n=== File: %s ===\n" % fname)
if gc_mode == 1:
n_unreachable = gc.collect()
if n_unreachable:
print("GC before open:", n_unreachable, "unreachable objects")
if PSYCO:
import psyco
psyco.full()
PSYCO = 0
try:
t0 = time.time()
bk = xlrd.open_workbook(
fname,
verbosity=options.verbosity, logfile=logfile,
use_mmap=mmap_arg,
encoding_override=options.encoding,
formatting_info=fmt_opt,
on_demand=options.on_demand,
ragged_rows=options.ragged_rows,
)
t1 = time.time()
if not options.suppress_timing:
print("Open took %.2f seconds" % (t1-t0,))
except xlrd.XLRDError as e:
print("*** Open failed: %s: %s" % (type(e).__name__, e))
continue
except KeyboardInterrupt:
print("*** KeyboardInterrupt ***")
traceback.print_exc(file=sys.stdout)
sys.exit(1)
except BaseException as e:
print("*** Open failed: %s: %s" % (type(e).__name__, e))
traceback.print_exc(file=sys.stdout)
continue
t0 = time.time()
if cmd == 'hdr':
bk_header(bk)
elif cmd == 'ov': # OverView
show(bk, 0)
elif cmd == 'show': # all rows
show(bk)
elif cmd == '2rows': # first row and last row
show(bk, 2)
elif cmd == '3rows': # first row, 2nd row and last row
show(bk, 3)
elif cmd == 'bench':
show(bk, printit=0)
elif cmd == 'fonts':
bk_header(bk)
show_fonts(bk)
elif cmd == 'names': # named reference list
show_names(bk)
elif cmd == 'name_dump': # named reference list
show_names(bk, dump=1)
elif cmd == 'labels':
show_labels(bk)
elif cmd == 'xfc':
count_xfs(bk)
else:
print("*** Unknown command <%s>" % cmd)
sys.exit(1)
del bk
if gc_mode == 1:
n_unreachable = gc.collect()
if n_unreachable:
print("GC post cmd:", fname, "->", n_unreachable, "unreachable objects")
if not options.suppress_timing:
t1 = time.time()
print("\ncommand took %.2f seconds\n" % (t1-t0,))
return None
av = sys.argv[1:]
if not av:
main(av)
firstarg = av[0].lower()
if firstarg == "hotshot":
import hotshot
import hotshot.stats
av = av[1:]
prof_log_name = "XXXX.prof"
prof = hotshot.Profile(prof_log_name)
# benchtime, result = prof.runcall(main, *av)
result = prof.runcall(main, *(av, ))
print("result", repr(result))
prof.close()
stats = hotshot.stats.load(prof_log_name)
stats.strip_dirs()
stats.sort_stats('time', 'calls')
stats.print_stats(20)
elif firstarg == "profile":
import cProfile
av = av[1:]
cProfile.run('main(av)', 'YYYY.prof')
import pstats
p = pstats.Stats('YYYY.prof')
p.strip_dirs().sort_stats('cumulative').print_stats(30)
elif firstarg == "psyco":
PSYCO = 1
main(av[1:])
else:
main(av)
| [
"[email protected]"
] | |
98a5cad2cf3eaa198f86362ddbe11f53eaf77e90 | 5e2a66e0416dcaf4674bd58f7dd7bc905800aa92 | /chapter6_calculator/not_eval.py | 19b6b540e3d3b4f4cad577f42734a3714894ec95 | [] | no_license | gridl/applications_with_wxpython | c96ed05b49e3494323e612afb1baccc8ea1e6f93 | 1fc63c384b7856402b99a97bf91fe0966a5ec413 | refs/heads/master | 2020-05-17T08:37:01.672727 | 2019-04-25T19:24:14 | 2019-04-25T19:24:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 997 | py | # not_eval.py
import ast
import operator
allowed_operators = {ast.Add: operator.add, ast.Sub: operator.sub,
ast.Mult: operator.mul, ast.Div: operator.truediv}
def noeval(expression):
if isinstance(expression, ast.Num):
return expression.n
elif isinstance(expression, ast.BinOp):
print('Operator: {}'.format(expression.op))
print('Left operand: {}'.format(expression.left))
print('Right operand: {}'.format(expression.right))
op = allowed_operators.get(type(expression.op))
if op:
return op(noeval(expression.left),
noeval(expression.right))
else:
print('This statement will be ignored')
if __name__ == '__main__':
print(ast.parse('1+4', mode='eval').body)
print(noeval(ast.parse('1+4', mode='eval').body))
print(noeval(ast.parse('1**4', mode='eval').body))
print(noeval(ast.parse("__import__('os').remove('path/to/file')", mode='eval').body))
| [
"[email protected]"
] | |
84c3b710030d76d8822dda535ce1e7a9fccde6d2 | 3fa8676cfde6abe3c67c12b316e282641328da49 | /ch16/LineBot/app/router.py | 5d07a2776f6d1b9ef7f27f5cb74d638333586115 | [
"MIT"
] | permissive | antallen/PythonMaterial | 33ae38581e01144924fee3aecef66fb4f5ebe46e | c582fb1610610feb72002f43a3758d5c58d6da85 | refs/heads/main | 2023-07-14T15:15:52.963373 | 2021-08-19T12:19:31 | 2021-08-19T12:19:31 | 307,311,816 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 594 | py | from app import app, handler, request, abort
from linebot.exceptions import InvalidSignatureError
from flask import render_template
# 設定預設網頁
@app.route("/")
def home():
return render_template("home.html")
# 接收 Line 平台來的「通知」
@app.route("/callback", methods=['POST'])
def callback():
signature = request.headers['X-Line-Signature']
body = request.get_data(as_text=True)
app.logger.info("Request body: " + body)
print(body)
try:
handler.handle(body, signature)
except InvalidSignatureError:
abort(400)
return 'OK' | [
"[email protected]"
] | |
5d87c3542355bd8f653901ad12eeea83b43d4bde | 17f069da55a7f41fa73dd070c7b51d74e1c8dca5 | /Generic ReTarget Addon/KeeMap Retarget Addon.py | f641c589d0c8f4c272399d7be78307f8a4c52586 | [
"MIT"
] | permissive | dorsalstream/Blender-Retarget-Rig-Radical-to-Daz3D | 0b632e67fe3e4e2108e64a98a96be296681182f8 | 34418db2ca835ed0e30b3e2626c74bc5664bd920 | refs/heads/master | 2022-12-30T10:23:06.153004 | 2020-10-10T22:01:20 | 2020-10-10T22:01:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 38,083 | py | bl_info = {
"name": "KeeMap Anicmation Transfer Tool",
"description": "Tools for moving animation from one Rig To Another",
"author": "Nick Keeline",
"version": (0, 0, 0),
"blender": (2, 83, 0),
"location": "3D View > Tools",
"warning": "", # used for warning icon and text in addons panel
"wiki_url": "",
"tracker_url": "",
"category": "Transfer Animation"
}
import bpy
import math
import json
from os import path
import mathutils
def Update():
#bpy.context.view_layer.update()
dg = bpy.context.evaluated_depsgraph_get()
#bpy.context.view_layer.update()
#bpy.ops.wm.redraw_timer(type='DRAW_WIN_SWAP', iterations=1)
def SetBonePosition(SourceArmature, SourceBoneName, DestinationArmature, DestinationBoneName, DestinationTwistBoneName, WeShouldKeyframe):
destination_bone = DestinationArmature.pose.bones[DestinationBoneName]
sourceBone = SourceArmature.pose.bones[SourceBoneName]
WsPosition = sourceBone.matrix.translation
matrix_final = SourceArmature.matrix_world @ sourceBone.matrix
destination_bone.matrix.translation = matrix_final.translation
#destination_bone.location = sourceBone.location
Update()
if (WeShouldKeyframe):
currentFrame = bpy.context.scene.frame_current
destination_bone.keyframe_insert(data_path='location',frame=currentFrame)
def GetBoneWSQuat(Bone, Arm):
source_arm_matrix = Arm.matrix_world
source_bone_matrix = Bone.matrix
#get the source bones rotation in world space.
source_bone_world_matrix = source_arm_matrix @ source_bone_matrix
return source_bone_world_matrix.to_quaternion()
def SetBoneRotation(SourceArmature, SourceBoneName, DestinationArmature, DestinationBoneName, DestinationTwistBoneName, CorrectionQuat, WeShouldKeyframe, hastwistbone, xferAxis, Transpose):
#Get the rotation of the bone in edit mode
# SourceBoneEdit = SourceArmature.data.bones[SourceBoneName]
# SourceBoneEditRotation = SourceBoneEdit.matrix_local.to_quaternion()
#Get the rotation of the bone in edit mode
# DestinationBoneEdit = DestinationArmature.data.bones[DestinationBoneName]
# DestinationBoneEditRotation = DestinationBoneEdit.matrix_local.to_quaternion()
#
# DeltaSourceEditBoneandDestEditBone = DestinationBoneEditRotation.rotation_difference(SourceBoneEditRotation)
# DeltaDestinationEditBoneandSourceEdit = SourceBoneEditRotation.rotation_difference(DestinationBoneEditRotation)
#rotate the edit rotation quat first to armature rotation
#ArmatureSpaceBoneEditPosition = RigArmature.rotation_quaternion * BoneEditRotation
if(DestinationTwistBoneName == "" and hastwistbone):
self.report({'ERROR'}, "You checked Twist Bone, but no name of bone entered!")
hastwistbone = False
elif hastwistbone:
TwistBone = DestinationArmature.pose.bones[DestinationTwistBoneName]
destination_bone = DestinationArmature.pose.bones[DestinationBoneName]
sourceBone = SourceArmature.pose.bones[SourceBoneName]
#Set Bone Position now that we've calculated it.
destination_bone.rotation_mode = 'QUATERNION'
#################################################
################## Get Source WS Quat ###########
#################################################
source_arm_matrix = SourceArmature.matrix_world
source_bone_matrix = sourceBone.matrix
#get the source bones rotation in world space.
source_bone_world_matrix = source_arm_matrix @ source_bone_matrix
SourceBoneRotWS = source_bone_world_matrix.to_quaternion()
#print('Source Rotation WS Before:')
#print(SourceBoneRotWS.to_euler())
#################################################
################## Get Dest edit WS Quat ###########
#################################################
dest_arm_matrix = DestinationArmature.matrix_world
dest_bone_matrix = destination_bone.matrix
#get the DESTINATION bones rotation in world space.
dest_bone_world_matrix = dest_arm_matrix @ dest_bone_matrix
DestBoneRotWS = dest_bone_world_matrix.to_quaternion()
#print('Destination Rotation WS Before:')
#print(DestBoneRotWS.to_euler())
DifferenceBetweenSourceWSandDestWS = DestBoneRotWS.rotation_difference(SourceBoneRotWS)
#print('Difference Rotation')
FinalQuat = destination_bone.rotation_quaternion.copy() @ DifferenceBetweenSourceWSandDestWS @ CorrectionQuat
destination_bone.rotation_mode = 'XYZ'
FinalEul = FinalQuat.to_euler()
if Transpose == 'ZYX':
destination_bone.rotation_euler.x = FinalEul.z
destination_bone.rotation_euler.y = FinalEul.y
destination_bone.rotation_euler.z = FinalEul.x
elif Transpose == 'ZXY':
destination_bone.rotation_euler.x = FinalEul.z
destination_bone.rotation_euler.y = FinalEul.x
destination_bone.rotation_euler.z = FinalEul.y
elif Transpose == 'XZY':
destination_bone.rotation_euler.x = FinalEul.x
destination_bone.rotation_euler.y = FinalEul.z
destination_bone.rotation_euler.z = FinalEul.y
elif Transpose == 'YZX':
destination_bone.rotation_euler.x = FinalEul.y
destination_bone.rotation_euler.y = FinalEul.z
destination_bone.rotation_euler.z = FinalEul.x
elif Transpose == 'YXZ':
destination_bone.rotation_euler.x = FinalEul.y
destination_bone.rotation_euler.y = FinalEul.x
destination_bone.rotation_euler.z = FinalEul.z
else:
destination_bone.rotation_euler = FinalEul
if xferAxis == 'X':
destination_bone.rotation_euler.y = 0
destination_bone.rotation_euler.z = 0
elif xferAxis == 'Y':
destination_bone.rotation_euler.x = 0
destination_bone.rotation_euler.z = 0
elif xferAxis == 'Z':
destination_bone.rotation_euler.x = 0
destination_bone.rotation_euler.y = 0
elif xferAxis == 'XY':
destination_bone.rotation_euler.z = 0
elif xferAxis == 'XZ':
destination_bone.rotation_euler.y = 0
elif xferAxis == 'YZ':
destination_bone.rotation_euler.x = 0
Update()
if (hastwistbone):
TwistBone.rotation_mode = 'XYZ'
yrotation = destination_bone.rotation_euler.y
destination_bone.rotation_euler.y = 0
TwistBone.rotation_euler.y = math.degrees(yrotation)
#print('Setting Twist Bone: ' + yrotation)
#TwistBone.rotation_mode = 'QUATERNION'
#destination_bone.rotation_mode = 'QUATERNION'
Update()
if (WeShouldKeyframe):
currentFrame = bpy.context.scene.frame_current
destination_bone.rotation_mode = 'XYZ'
destination_bone.keyframe_insert(data_path='rotation_euler',frame=currentFrame)
#print('keyframed' + str(currentFrame))
if (hastwistbone):
TwistBone.rotation_mode = 'XYZ'
TwistBone.keyframe_insert(data_path='rotation_euler',frame=currentFrame)
def GetBoneEditRotationWorldSpace(arm, bonename):
BoneEdit = arm.data.bones[bonename]
BoneEditRotation = BoneEdit.matrix_local.to_quaternion()
BoneEditWS = arm.rotation_quaternion*BoneEditRotation
return BoneEditWS
class KeeMapSettings(bpy.types.PropertyGroup):
start_frame_to_apply: bpy.props.IntProperty(
name = "Starting Frame",
description="Frame to Apply Motion Capture To",
default = 0,
min = 0,
max = 10000
)
number_of_frames_to_apply: bpy.props.IntProperty(
name = "Number of Samples",
description="Number of Samples to read in and apply",
default = 100,
min = 0,
max = 10000
)
keyframe_every_n_frames: bpy.props.IntProperty(
name = "Mouth Keyframe Number",
description="Frame to Apply a Keyframe to, 1 is every frame",
default = 3,
min = 1,
max = 100
)
source_rig_name: bpy.props.StringProperty(
name="Source Rig Name",
description="Rig Name to Apply Capture To",
default="",
maxlen=1024
)
destination_rig_name: bpy.props.StringProperty(
name="Destination Rig Name",
description="Rig Name to Apply Capture To",
default="",
maxlen=1024
)
bone_mapping_file: bpy.props.StringProperty(
name="Bone Mapping File to Read and Save",
description="Select a File to Read In:",
default="",
maxlen=1024,
subtype='FILE_PATH'
)
keyframe_test: bpy.props.BoolProperty(
name="KeyFrame Test",
description="Use this checkbox to enable keyframing of this bone while testing.",
default = False
)
class KeeMapBoneMappingListItem(bpy.types.PropertyGroup):
#"""Group of properties representing a bone mapping from OpenPose to a Rig"""
name : bpy.props.StringProperty()
label : bpy.props.StringProperty()
description : bpy.props.StringProperty()
SourceBoneName: bpy.props.StringProperty(
name="Source Bone Name",
description="This is the name for the rig bone name.",
default="",
maxlen=1024
)
DestinationBoneName: bpy.props.StringProperty(
name="Destination Bone Name",
description="This is the name for the rig bone name.",
default="",
maxlen=1024
)
keyframe_this_bone: bpy.props.BoolProperty(
name="KeyFrame This Bone",
description="Use this checkbox to disable keyframing of this bone for testing.",
default = True
)
CorrectionFactor: bpy.props.FloatVectorProperty(
name="Correction Rotation",
description="After Setting the global position of the bone to the same as the source the script will rotate the bone by these angles afterwards to correct rotational differences between the sourc and destination bones.",
subtype = 'EULER',
unit = 'ROTATION',
default = (0.0, 0.0, 0.0),
size = 3
)
has_twist_bone: bpy.props.BoolProperty(
name="Has a Twist Bone",
description="This will apply the twist along the y axis",
default = False
)
TwistBoneName: bpy.props.StringProperty(
name="Twist Bone Name",
description="This is the name for the rig bone name.",
default="",
maxlen=1024
)
set_bone_position: bpy.props.BoolProperty(
name="Set Position of Bone",
description="This will set the bone position to the same position of the source bone.",
default = False
)
set_bone_rotation: bpy.props.BoolProperty(
name="Set Rotation of Bone",
description="This will set the bone rotation to the same position of the source bone.",
default = True
)
bone_rotation_application_axis: bpy.props.EnumProperty(
name="Apply To Axis",
description="Axis to Apply twist translation or rotation to, other axis will be left zero.",
items=[ ('XYZ', "XYZ", ""),
('XY', "XY", ""),
('XZ', "XZ", ""),
('YZ', "YZ", ""),
('X', "X", ""),
('Y', "Y", ""),
('Z', "Z", "")
]
)
bone_transpose_axis: bpy.props.EnumProperty(
name="Transpose Axis",
description="Select Two Axis to swap when applying angle.",
items=[ ('NONE', "NONE", ""),
('ZXY', "ZXY", ""),
('ZYX', "ZYX", ""),
('XZY', "XZY", ""),
('YZX', "YZX", ""),
('YXZ', "YXZ", ""),
('ZXY', "ZXY", "")
]
)
####################################################################################
####################################################################################
####################################################################################
# Code for iteration through frames and applying positions and angles to rig
####################################################################################
####################################################################################
####################################################################################
class PerformAnimationTransfer(bpy.types.Operator):
bl_idname = "wm.perform_animation_transfer"
bl_label = "Read in OpenPose JSON Files and Apply to Character"
def execute(self, context):
scene = bpy.context.scene
KeeMap = bpy.context.scene.keemap_settings
bone_mapping_list = context.scene.keemap_bone_mapping_list
SourceArmName = KeeMap.source_rig_name
DestArmName = KeeMap.destination_rig_name
KeyFrame_Every_Nth_Frame = KeeMap.keyframe_every_n_frames
NumberOfFramesToTransfer = KeeMap.number_of_frames_to_apply
#StartFrame = scene.frame_current
StartFrame = KeeMap.start_frame_to_apply
print('')
print('Start of Everything')
print('')
#SourcArm = bpy.context.selected_objects[SourcArmName]
#DestArm = bpy.context.selected_objects[DestArmName]
if SourceArmName == "":
self.report({'ERROR'}, "Must Have a Source Armature Name Entered")
elif DestArmName == "":
self.report({'ERROR'}, "Must Have a Destination Armature Name Entered")
else:
SourceArm = bpy.data.objects[SourceArmName]
DestArm = bpy.data.objects[DestArmName]
i=0
while (i < NumberOfFramesToTransfer):
#scene.frame_current = StartFrame + i
bpy.context.scene.frame_set(StartFrame + i)
Update()
print('')
CurrentFrame = scene.frame_current
EndFrame = StartFrame + NumberOfFramesToTransfer
PercentComplete = ((CurrentFrame - StartFrame)/(EndFrame - StartFrame))*100
print('Working On Frame: ' + str(scene.frame_current) + ' of ' + str(EndFrame) + ' ' + "{:.1f}".format(PercentComplete) + '%')
print('')
bpy.ops.wm.test_all_bones(keyframe = True)
i = i + KeyFrame_Every_Nth_Frame
return{'FINISHED'}
class KEEMAP_TestSetRotationOfBone(bpy.types.Operator):
"""Maps a Single Bone on the Current Frame to Test Mapping"""
bl_idname = "wm.test_set_rotation_of_bone"
bl_label = "Test Bone Re-Targetting"
index2pose: bpy.props.IntProperty()
keyframe: bpy.props.BoolProperty(default = False)
def execute(self, context):
scene = bpy.context.scene
if(self.index2pose == -1):
index = scene.keemap_bone_mapping_list_index
else:
index = self.index2pose
KeeMap = bpy.context.scene.keemap_settings
bone_mapping_list = context.scene.keemap_bone_mapping_list
#if the box is checked we're going to keyframe no matter what:
if KeeMap.keyframe_test:
self.keyframe = True
#print('')
#print('Test Pressed:')
SourceArmName = KeeMap.source_rig_name
DestArmName = KeeMap.destination_rig_name
if SourceArmName == "":
self.report({'ERROR'}, "Must Have a Source Armature Name Entered")
elif DestArmName == "":
self.report({'ERROR'}, "Must Have a Destination Armature Name Entered")
else:
SourceArm = bpy.data.objects[SourceArmName]
DestArm = bpy.data.objects[DestArmName]
SourceBoneName = bone_mapping_list[index].SourceBoneName
DestBoneName = bone_mapping_list[index].DestinationBoneName
xferAxis = bone_mapping_list[index].bone_rotation_application_axis
xPose = bone_mapping_list[index].bone_transpose_axis
if SourceBoneName == "":
self.report({'ERROR'}, "Must Have a Source Bone Name Entered")
elif DestBoneName == "":
self.report({'ERROR'}, "Must Have a Destination Bone Name Entered")
else:
HasTwist = bone_mapping_list[index].has_twist_bone
TwistBoneName = bone_mapping_list[index].TwistBoneName
CorrectionVectorX = bone_mapping_list[index].CorrectionFactor.x
#print(math.degrees(CorrectionVectorX))
CorrectionVectorY = bone_mapping_list[index].CorrectionFactor.y
#print(math.degrees(CorrectionVectorY))
CorrectionVectorZ = bone_mapping_list[index].CorrectionFactor.z
#print(math.degrees(CorrectionVectorZ))
corrEul = mathutils.Euler((CorrectionVectorX, CorrectionVectorY, CorrectionVectorZ), 'XYZ')
#print('correction Eul in:')
#print(corrEul)
CorrQuat = corrEul.to_quaternion()
#print('correction in:')
#print(CorrQuat.to_euler())
if bone_mapping_list[index].set_bone_rotation:
SetBoneRotation(SourceArm, SourceBoneName, DestArm, DestBoneName, TwistBoneName, CorrQuat, self.keyframe, HasTwist, xferAxis,xPose)
if bone_mapping_list[index].set_bone_position:
SetBonePosition(SourceArm, SourceBoneName, DestArm, DestBoneName, TwistBoneName, self.keyframe)
return{'FINISHED'}
class KEEMAP_BoneSelectedOperator(bpy.types.Operator):
bl_idname = "wm.bone_selected"
bl_label = "Operator to Change Selection based on selected bone"
@classmethod
def poll(cls, context):
return len(context.selected_pose_bones) > 0
def execute(self, context):
print('Checking')
bone_mapping_list = context.scene.keemap_bone_mapping_list
index = context.scene.keemap_bone_mapping_list_index
KeeMap = bpy.context.scene.keemap_settings
DestArmName = KeeMap.destination_rig_name
if DestArmName != '':
DestArm = bpy.data.objects[DestArmName]
if len(context.selected_pose_bones) > 0:
bonename = context.selected_pose_bones[0].name
i = 0
for bone_settings in bone_mapping_list:
if bone_settings.DestinationBoneName == bonename:
context.scene.keemap_bone_mapping_list_index = i
i = i+1
return {'FINISHED'}
class KEEMAP_TestAllBones(bpy.types.Operator):
"""Test All Bones to set there position"""
bl_idname = "wm.test_all_bones"
bl_label = "Test Set All Bone's Posiitoin"
keyframe: bpy.props.BoolProperty(default = False)
def execute(self, context):
bone_mapping_list = context.scene.keemap_bone_mapping_list
index = context.scene.keemap_bone_mapping_list_index
# CODE FOR SETTING BONE POSITIONS:
i = 0
for bone_settings in bone_mapping_list:
index = i
print(bone_settings.name)
bpy.ops.wm.test_set_rotation_of_bone(index2pose = index,keyframe = self.keyframe)
i = i+1
return{'FINISHED'}
class KEEMAP_GetSourceBoneName(bpy.types.Operator):
"""If a bone is selected, get the name and popultate"""
bl_idname = "wm.get_source_bone_name"
bl_label = "Get Source Bone Name"
def execute(self, context):
scene = bpy.context.scene
index = scene.keemap_bone_mapping_list_index
KeeMap = bpy.context.scene.keemap_settings
bone_mapping_list = context.scene.keemap_bone_mapping_list
if len(context.selected_objects) == 1:
rigname = context.selected_objects[0].name
bonename = context.selected_pose_bones[0].name
elif len(context.selected_objects) == 2:
bonename = context.selected_pose_bones[0].name
rig1 = context.selected_objects[0]
if rig1.pose.bones.find(bonename) == -1:
rigname = context.selected_objects[1].name
else:
rigname = context.selected_objects[0].name
if len(context.selected_pose_bones) == 1:
if rigname == KeeMap.source_rig_name:
bone_mapping_list[index].SourceBoneName = bonename
if rigname == KeeMap.destination_rig_name:
bone_mapping_list[index].DestinationBoneName = bonename
if bone_mapping_list[index].name == '' and rigname == KeeMap.source_rig_name:
bone_mapping_list[index].name = bonename
return{'FINISHED'}
class KEEMAP_AutoGetBoneCorrection(bpy.types.Operator):
"""Auto Calculate the Bones Correction Number from calculated to current position."""
bl_idname = "wm.get_bone_rotation_correction"
bl_label = "Auto Calc Correction"
def execute(self, context):
scene = bpy.context.scene
index = scene.keemap_bone_mapping_list_index
KeeMap = bpy.context.scene.keemap_settings
bone_mapping_list = context.scene.keemap_bone_mapping_list
print('')
print('Calc Pressed:')
SourceArmName = KeeMap.source_rig_name
DestArmName = KeeMap.destination_rig_name
if SourceArmName == "":
self.report({'ERROR'}, "Must Have a Source Armature Name Entered")
elif DestArmName == "":
self.report({'ERROR'}, "Must Have a Destination Armature Name Entered")
else:
SourceArm = bpy.data.objects[SourceArmName]
DestArm = bpy.data.objects[DestArmName]
SourceBoneName = bone_mapping_list[index].SourceBoneName
DestBoneName = bone_mapping_list[index].DestinationBoneName
xferAxis = bone_mapping_list[index].bone_rotation_application_axis
xPose = bone_mapping_list[index].bone_transpose_axis
if SourceBoneName == "":
self.report({'ERROR'}, "Must Have a Source Bone Name Entered")
elif DestBoneName == "":
self.report({'ERROR'}, "Must Have a Destination Bone Name Entered")
else:
destBone = DestArm.pose.bones[DestBoneName]
sourceBone = SourceArm.pose.bones[SourceBoneName]
destBoneMode = 'XYZ'
destBone.rotation_mode = destBoneMode
StartingDestBoneWSQuat = GetBoneWSQuat(destBone, DestArm)
print("Destination Bone Starting WS")
print(StartingDestBoneWSQuat.to_euler())
destBoneStartPosition = destBone.rotation_euler.copy()
#print(destBoneStartPosition)
HasTwist = bone_mapping_list[index].has_twist_bone
if HasTwist:
TwistBoneName = bone_mapping_list[index].TwistBoneName
TwistBone = DestArm.pose.bones[TwistBoneName]
y = TwistBone.rotation_euler.y
else:
TwistBoneName = ''
CorrQuat = mathutils.Quaternion((1,0,0,0))
SetBoneRotation(SourceArm, SourceBoneName, DestArm, DestBoneName, TwistBoneName, CorrQuat, False, HasTwist, xferAxis,xPose)
Update()
ModifiedDestBoneWSQuat = GetBoneWSQuat(destBone, DestArm)
print("Destination Bone After Modifying WS")
print(ModifiedDestBoneWSQuat.to_euler())
q = ModifiedDestBoneWSQuat.rotation_difference(StartingDestBoneWSQuat)
print('Difference between before and After modification')
print(q.to_euler())
corrEuler = q.to_euler()
print(math.degrees(corrEuler.x))
print(math.degrees(corrEuler.y))
print(math.degrees(corrEuler.z))
print(corrEuler.to_quaternion())
bone_mapping_list[index].CorrectionFactor.x = corrEuler.x
bone_mapping_list[index].CorrectionFactor.y = corrEuler.y
bone_mapping_list[index].CorrectionFactor.z = corrEuler.z
destBone.rotation_euler = destBoneStartPosition
if HasTwist:
TwistBone.rotation_euler.y = y
destBone.rotation_euler.y = 0
print(destBoneStartPosition)
return{'FINISHED'}
class KEEMAP_BONE_UL_List(bpy.types.UIList):
"""Demo UIList."""
def draw_item(self, context, layout, data, item, icon, active_data, active_propname):
# We could write some code to decide which icon to use here...
custom_icon = 'BONE_DATA'
# Make sure your code supports all 3 layout types if
if self.layout_type in {'DEFAULT', 'COMPACT'}:
layout.label(text=item.name, icon = custom_icon)
elif self.layout_type in {'GRID'}:
layout.alignment = 'CENTER'
layout.label(text="", icon = custom_icon)
class KEEMAP_LIST_OT_NewItem(bpy.types.Operator):
"""Add a new item to the list."""
bl_idname = "keemap_bone_mapping_list.new_item"
bl_label = "Add a new item"
def execute(self, context):
index = context.scene.keemap_bone_mapping_list_index
context.scene.keemap_bone_mapping_list.add()
index = len(context.scene.keemap_bone_mapping_list)
return{'FINISHED'}
class KEEMAP_LIST_OT_DeleteItem(bpy.types.Operator):
"""Delete the selected item from the list."""
bl_idname = "keemap_bone_mapping_list.delete_item"
bl_label = "Deletes an item"
@classmethod
def poll(cls, context):
return context.scene.keemap_bone_mapping_list
def execute(self, context):
bone_mapping_list = context.scene.keemap_bone_mapping_list
index = context.scene.keemap_bone_mapping_list_index
bone_mapping_list.remove(index)
index = min(max(0, index - 1), len(bone_mapping_list) - 1)
return{'FINISHED'}
class KEEMAP_LIST_OT_MoveItem(bpy.types.Operator):
"""Move an item in the list."""
bl_idname = "keemap_bone_mapping_list.move_item"
bl_label = "Move an item in the list"
direction: bpy.props.EnumProperty(items=(('UP', 'Up', ""), ('DOWN', 'Down', ""),))
@classmethod
def poll(cls, context):
return context.scene.keemap_bone_mapping_list
def move_index(self):
""" Move index of an item render queue while clamping it. """
scene = bpy.context.scene
index = scene.keemap_bone_mapping_list_index
list_length = len(bpy.context.scene.keemap_bone_mapping_list) - 1 # (index starts at 0)
new_index = index + (-1 if self.direction == 'UP' else 1)
index = max(0, min(new_index, list_length))
def execute(self, context):
bone_mapping_list = context.scene.keemap_bone_mapping_list
scene = context.scene
index = scene.keemap_bone_mapping_list_index
neighbor = index + (-1 if self.direction == 'UP' else 1)
bone_mapping_list.move(neighbor, index)
self.move_index()
return{'FINISHED'}
class KEEMAP_LIST_OT_ReadInFile(bpy.types.Operator):
"""Read in Bone Mapping File"""
bl_idname = "wm.keemap_read_file"
bl_label = "Read In Bone Mapping File"
def execute(self, context):
context.scene.keemap_bone_mapping_list_index = 0
bone_list = context.scene.keemap_bone_mapping_list
bone_list.clear()
KeeMap = bpy.context.scene.keemap_settings
filepath = bpy.path.abspath(KeeMap.bone_mapping_file)
file = open(filepath, 'r')
data = json.load(file)
KeeMap.facial_capture = data['start_frame_to_apply']
KeeMap.number_of_frames_to_apply = data['number_of_frames_to_apply']
KeeMap.keyframe_every_n_frames = data['keyframe_every_n_frames']
KeeMap.source_rig_name = data['source_rig_name']
KeeMap.destination_rig_name = data['destination_rig_name']
KeeMap.bone_mapping_file = data['bone_mapping_file']
i = 0
for p in data['bones']:
bone_list.add()
bone = bone_list[i]
bone.name = p['name']
bone.label = p['label']
bone.description = p['description']
bone.SourceBoneName = p['SourceBoneName']
bone.DestinationBoneName = p['DestinationBoneName']
bone.keyframe_this_bone = p['keyframe_this_bone']
bone.CorrectionFactor.x = p['CorrectionFactorX']
bone.CorrectionFactor.y = p['CorrectionFactorY']
bone.CorrectionFactor.z = p['CorrectionFactorZ']
bone.has_twist_bone = p['has_twist_bone']
bone.TwistBoneName = p['TwistBoneName']
bone.set_bone_position = p['set_bone_position']
bone.set_bone_rotation = p['set_bone_rotation']
bone.bone_rotation_application_axis = p['bone_rotation_application_axis']
i = i + 1
file.close()
return{'FINISHED'}
class KEEMAP_LIST_OT_SaveToFile(bpy.types.Operator):
"""Save Out Bone Mapping File"""
bl_idname = "wm.keemap_save_file"
bl_label = "Save Bone Mapping File"
def execute(self, context):
#context.scene.bone_mapping_list.clear()
KeeMap = bpy.context.scene.keemap_settings
filepath = bpy.path.abspath(KeeMap.bone_mapping_file)
file = open(filepath, 'w+')
rootParams = {
"start_frame_to_apply":KeeMap.start_frame_to_apply,
"number_of_frames_to_apply":KeeMap.number_of_frames_to_apply,
"keyframe_every_n_frames":KeeMap.keyframe_every_n_frames,
"source_rig_name":KeeMap.source_rig_name,
"destination_rig_name":KeeMap.destination_rig_name,
"bone_mapping_file":KeeMap.bone_mapping_file
}
bone_list = context.scene.keemap_bone_mapping_list
jsonbones = {}
jsonbones['bones'] = []
for bone in bone_list:
jsonbones['bones'].append({
'name': bone.name,
'label': bone.label,
'description': bone.description,
'SourceBoneName': bone.SourceBoneName,
'DestinationBoneName': bone.DestinationBoneName,
'keyframe_this_bone': bone.keyframe_this_bone,
'CorrectionFactorX': bone.CorrectionFactor.x,
'CorrectionFactorY': bone.CorrectionFactor.y,
'CorrectionFactorZ': bone.CorrectionFactor.z,
'has_twist_bone': bone.has_twist_bone,
'TwistBoneName': bone.TwistBoneName,
'set_bone_position': bone.set_bone_position,
'set_bone_rotation': bone.set_bone_rotation,
'bone_rotation_application_axis': bone.bone_rotation_application_axis,
})
jsonbones.update(rootParams)
print(jsonbones)
json.dump(jsonbones, file)
file.close()
return{'FINISHED'}
class KeeMapToolsPanel(bpy.types.Panel):
"""Creates a Panel for the KeeMap animation retargetting rig addon"""
bl_label = "KeeMap"
bl_idname = "KEEMAP_PT_MAINPANEL"
bl_space_type = "VIEW_3D"
bl_region_type = "UI"
bl_category = 'KeeMapRig'
bl_context = "posemode"
@classmethod
def poll(self,context):
return context.object is not None
def draw(self, context):
layout = self.layout
scene = context.scene
row = layout.row()
row.label(text="KeeMap Script written by Nick Keeline")
row = layout.row()
row.label(text="Subscribe to Checkered Bug on Youtube")
class KeemapPanelOne(KeeMapToolsPanel, bpy.types.Panel):
bl_idname = "KEEMAP_PT_TRANSFERSETTINGS"
bl_label = "Transfer Settings"
def draw(self, context):
layout = self.layout
KeeMap = bpy.context.scene.keemap_settings
#layout.label(text="Transfer Settings")
layout.prop(KeeMap, "start_frame_to_apply")
layout.prop(KeeMap, "number_of_frames_to_apply")
layout.prop(KeeMap, "keyframe_every_n_frames")
layout.prop(KeeMap, "source_rig_name")
layout.prop(KeeMap, "destination_rig_name")
layout.prop(KeeMap, "bone_mapping_file")
row = layout.row()
row.operator("wm.keemap_read_file")
row.operator("wm.keemap_save_file")
layout.operator("wm.perform_animation_transfer")
class KeemapPanelTwo(KeeMapToolsPanel, bpy.types.Panel):
bl_idname = "KEEMAP_PT_BONEMAPPING"
bl_label = "Bone Mapping"
def draw(self, context):
layout = self.layout
scene = context.scene
KeeMap = bpy.context.scene.keemap_settings
row = layout.row()
row.template_list("KEEMAP_BONE_UL_List", "The_Keemap_List", scene, "keemap_bone_mapping_list", scene,"keemap_bone_mapping_list_index")#, type='COMPACT')#, "index")
row = layout.row()
row.operator('keemap_bone_mapping_list.new_item', text='NEW')
row.operator('keemap_bone_mapping_list.delete_item', text='REMOVE')
row.operator('keemap_bone_mapping_list.move_item', text='UP').direction = 'UP'
row.operator('keemap_bone_mapping_list.move_item', text='DOWN').direction = 'DOWN'
row = layout.row()
row.label(text="List MUST be ordered Parent->Child")
if scene.keemap_bone_mapping_list_index >= 0 and scene.keemap_bone_mapping_list:
item = scene.keemap_bone_mapping_list[scene.keemap_bone_mapping_list_index]
layout = self.layout
box = layout.box()
box.prop(item, "name")
box.prop(item, "SourceBoneName")
box.prop(item, "DestinationBoneName")
box.operator('wm.get_source_bone_name', text='GET NAME')
box.operator('wm.bone_selected', text='SELECT')
box.prop(item, "keyframe_this_bone")
row = layout.row()
row.prop(item, "set_bone_rotation")
if item.set_bone_rotation:
box = layout.box()
box.prop(item, "bone_rotation_application_axis")
box.prop(item, "bone_transpose_axis")
box.prop(item, "CorrectionFactor")
box.operator('wm.get_bone_rotation_correction', text='CALC CORRECTiON')
# if not item.has_twist_bone:
# box.operator('wm.get_bone_rotation_correction', text='CALC CORRECTiON')
# box.prop(item, "has_twist_bone")
# if item.has_twist_bone:
# box.prop(item, "TwistBoneName")
row = layout.row()
row.prop(item, "set_bone_position")
row = layout.row()
row.operator('wm.test_set_rotation_of_bone', text='TEST').index2pose = -1
row.operator('wm.test_all_bones', text='TEST ALL').keyframe = KeeMap.keyframe_test
layout.prop(KeeMap, "keyframe_test")
# ------------------------------------------------------------------------
# register and unregister
# ------------------------------------------------------------------------
def register():
bpy.utils.register_class(KeemapPanelOne)
bpy.utils.register_class(KeemapPanelTwo)
bpy.utils.register_class(PerformAnimationTransfer)
bpy.utils.register_class(KEEMAP_BONE_UL_List)
bpy.utils.register_class(KEEMAP_GetSourceBoneName)
bpy.utils.register_class(KeeMapToolsPanel)
bpy.utils.register_class(KeeMapBoneMappingListItem)
bpy.utils.register_class(KeeMapSettings)
bpy.utils.register_class(KEEMAP_LIST_OT_NewItem)
bpy.utils.register_class(KEEMAP_LIST_OT_DeleteItem)
bpy.utils.register_class(KEEMAP_LIST_OT_MoveItem)
bpy.utils.register_class(KEEMAP_TestSetRotationOfBone)
bpy.utils.register_class(KEEMAP_LIST_OT_ReadInFile)
bpy.utils.register_class(KEEMAP_LIST_OT_SaveToFile)
bpy.utils.register_class(KEEMAP_AutoGetBoneCorrection)
bpy.utils.register_class(KEEMAP_TestAllBones)
bpy.utils.register_class(KEEMAP_BoneSelectedOperator)
bpy.types.Scene.keemap_bone_mapping_list_index = bpy.props.IntProperty()
bpy.types.Scene.keemap_bone_mapping_list = bpy.props.CollectionProperty(type = KeeMapBoneMappingListItem)
bpy.types.Scene.keemap_settings = bpy.props.PointerProperty(type=KeeMapSettings)
def unregister():
bpy.utils.unregister_class(PerformAnimationTransfer)
bpy.utils.unregister_class(KEEMAP_BONE_UL_List)
bpy.utils.unregister_class(KeeMapSettings)
bpy.utils.unregister_class(KEEMAP_LIST_OT_NewItem)
bpy.utils.unregister_class(KEEMAP_LIST_OT_DeleteItem)
bpy.utils.unregister_class(KEEMAP_LIST_OT_MoveItem)
bpy.utils.unregister_class(KEEMAP_GetSourceBoneName)
bpy.utils.unregister_class(KeeMapToolsPanel)
bpy.utils.unregister_class(KeemapPanelOne)
bpy.utils.unregister_class(KeemapPanelTwo)
bpy.utils.unregister_class(KeeMapBoneMappingListItem)
bpy.utils.unregister_class(KEEMAP_TestSetRotationOfBone)
bpy.utils.unregister_class(KEEMAP_LIST_OT_ReadInFile)
bpy.utils.unregister_class(KEEMAP_LIST_OT_SaveToFile)
bpy.utils.unregister_class(KEEMAP_AutoGetBoneCorrection)
bpy.utils.unregister_class(KEEMAP_TestAllBones)
bpy.utils.unregister_class(KEEMAP_BoneSelectedOperator)
del bpy.types.Scene.keemap_bone_mapping_list
del bpy.types.Scene.keemap_bone_mapping_list_index
del bpy.types.Scene.keemap_settings
if __name__ == "__main__":
register()
| [
"[email protected]"
] | |
7aa79c03836223277f5b6a4e670ec095339614e3 | 2c665ab7365b82ac6103475cea4cc729019ff3bc | /cisco/netmiko/netmiko-from-file.py | 74cbc3be4911ff9013c2ae99040f643c286acddc | [] | no_license | Magnusjensaas/Network-Automation | a67f47573dc895c5f564daf23e9e123d1aa7def8 | 09816766ebee50c6fa8ef81fb0881da67393c07c | refs/heads/master | 2020-09-22T11:09:30.779457 | 2020-03-29T19:35:34 | 2020-03-29T19:35:34 | 225,169,583 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,465 | py | import netmiko
import getpass
# Get user input for username and password.
username = input("Username: ")
password = getpass.getpass()
def check_software_version(device_list):
"""
Method to check software running on IOS device.
From list of predefined IOS versions.
"""
for software_version in device_list:
print("Checking for software version " + software_version)
output_version = net_connect.send_command("show version")
init_version = 0
int_version = output_version.find(software_version)
if int_version > 0:
print("Software version found on device: " + software_version)
return software_version
else:
print("Software not found on device: " + software_version)
def configure_device(software_version):
"""
Apply configuration from configuration file.
Applies only the configuration for the software version that is running on the device.
"""
if software_version == "vios_l2-ADVENTERPRISEK9-M":
print("Running " + software_version + " Commands")
output_config = net_connect.send_config_set(commands_list_switch)
return output_config
elif software_version == "VIOS-ADVENTERPRISEK9-M":
print("Running " + software_version + " Commands")
output_config = net_connect.send_config_set(commands_list_router)
return output_config
# Opening file for Switch configuration.
with open("commands_list_switch") as f:
commands_list_switch = f.read().splitlines()
# Opening file fore Router configuration.
with open("commands_list_router") as f:
commands_list_router = f.read().splitlines()
# Opening file for commands to send to all devices.
with open("command_file") as f:
commands_to_send = f.read().splitlines()
# Opening list of devices to apply configurations to.
with open("devices_file") as f:
devices_list = f.read().splitlines()
# Loop allowing the commands to be applied to all devices in devices_list, specifying OS, IP and credentials.
for devices in devices_list:
print("Connecting to device " + devices)
device_ip = devices
ios_device = {
"device_type": "cisco_ios",
"ip": device_ip,
"username": username,
"password": password
}
# Try statement with exceptions to catch connectivity issues and credential issues.
try:
net_connect = netmiko.ConnectHandler(**ios_device)
except netmiko.ssh_exception.AuthenticationException:
print("Authentication failure " + device_ip)
continue
except netmiko.ssh_exception.NetMikoTimeoutException:
print("Connection timed out " + device_ip)
continue
except EOFError:
print("End of file while attempting device " + device_ip)
continue
except netmiko.ssh_exception.SSHException:
print("SSH error " + device_ip)
continue
except Exception as unknown_error:
print(unknown_error)
continue
# Sending configuration from "command_file" and printing the device output.
device_output = net_connect.send_config_set(commands_to_send)
print(device_output)
# List of software version running in my topology
device_type = ["vios_l2-ADVENTERPRISEK9-M", "VIOS-ADVENTERPRISEK9-M"]
# Check software version running on device
software = check_software_version(device_type)
# Configure device
output = configure_device(software)
print(output)
| [
"[email protected]"
] | |
4844e849b8071a58ff807d6e75b08f584e26e7d7 | 0443d4b2c79568c0f39b0a966b9bc20cd3cec990 | /day2/day2.py | 7a8e10fdc42b8193467bc8089a193e7c4987fb51 | [] | no_license | OskarSigvardsson/adventofcode2020 | d1ee3f23a73c8cbf8734db1589143b3eb29a2bbf | c0883e4b0f2de19096fb840eee35c035d641daaa | refs/heads/master | 2023-02-16T08:02:06.771826 | 2021-01-03T21:36:46 | 2021-01-03T21:36:46 | 318,456,002 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 582 | py | import sys
import re
def valid1(line):
m = re.match(r"(\d+)-(\d+) (.): (.*)", line)
a, b, char, string = m.groups()
a = int(a)
b = int(b)
return a <= len([c for c in string if c == char]) <= b
def valid2(line):
m = re.match(r"(\d+)-(\d+) (.): (.*)", line)
a, b, char, string = m.groups()
a = int(a)
b = int(b)
return (string[a - 1] == char) != (string[b - 1] == char)
lines = [line.strip() for line in sys.stdin.readlines()]
print(len([line for line in lines if valid1(line)]))
print(len([line for line in lines if valid2(line)]))
| [
"[email protected]"
] | |
15c7f29986320a0bf73be21f29f997e888550da3 | cd184204d05399d44cea30373bad52882d2d1051 | /conta.py | 5bd4342991fcc5818630b04a669dbb16008fa6e4 | [] | no_license | davidapdf/Python_teste | b560d91dcd0bde05bff556ce65c60d80f5068a29 | 00fe5a821c7d1bf7e7b667161a573ae7c2d93a2e | refs/heads/master | 2020-12-22T00:07:50.542644 | 2020-02-04T15:40:08 | 2020-02-04T15:40:08 | 236,609,877 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,361 | py | class Conta:
def __init__(self, numero, titular, saldo, limite):
self.__numero = numero
self.__titular = titular
self.__saldo = saldo
self.__limite = limite
@property
def saldo(self):
return self.__saldo
@property
def titular(self):
return self.__titular
@property
def limite(self):
return self.__limite
@saldo.setter
def saldo(self, saldo):
self.__saldo = saldo
@titular.setter
def titular(self, titular):
self.__titular = titular
@limite.setter
def limite(self, limite):
self.__limite = limite
def __pode_sacar(self, valor):
valor_disponivel = self.saldo + self.limite
return valor <= valor_disponivel
def extrato(self):
print("Saldo {} Titular {}".format(self.__saldo, self.__titular))
def deposita(self, valor):
self.__saldo += valor
def saca(self, valor):
if(self.__pode_sacar(valor)):
self.__saldo -= valor
else:
print("Valor insuficiente {}".format(valor))
def trasferir(self,valor,conta):
self.saca(valor)
conta.deposita(valor)
@staticmethod
def codigo_banco():
return "001"
@staticmethod
def codigos_bancos():
return {'BB': '001', 'Caixa': '104', 'Bradesco': '237'}
| [
"[email protected]"
] | |
3f3c072b38918ee758b521266ef022a96d913f0d | 87c70b2a720311c3341bc6cd4fb49a6fcfdee232 | /polls/polls/settings.py | 95e69518c79c6dcb4ac8838e9ae786ae780a328f | [] | no_license | AlexKotl/python-exercises | 44b734d18dc7d126514ea86437e2b26c1d1fe310 | ddec09f1e5c0e8833df0c96c3533bf3b808d25da | refs/heads/master | 2020-03-21T19:03:39.944981 | 2018-07-04T19:55:09 | 2018-07-04T19:55:09 | 138,928,750 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,233 | py | """
Django settings for polls project.
Generated by 'django-admin startproject' using Django 2.0.7.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '=4^p8qw(c22x8zjm180b2rrvpetu@a1c@2un^ha-2#0_4qb$+6'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'poll.apps.PollConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'polls.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'polls.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'django_polls',
'USER': 'root',
'PASSWORD': 'root',
'HOST': '127.0.0.1',
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_ROOT = ''
STATIC_URL = '/static/'
STATICFILES_DIRS = ( os.path.join('static'), )
| [
"[email protected]"
] | |
5cff741a726a542d9473a208e7e9f1745c667589 | 738e54ead99e912a43a6b0cf0521bcf7324825cc | /ProjectX/src/HotelReservation/migrations/0032_best_choice.py | 2b15641680a5f0e4c72316a4ab95e12410d33164 | [] | no_license | gugajung/HotelReservation | 413abba1a64c3cafde909fbb48cd6c0a219c3bef | d3e150aa732fe94e95f43aa708ec21c654b4be4c | refs/heads/master | 2020-04-10T19:23:11.068556 | 2018-12-04T02:25:55 | 2018-12-04T02:25:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 865 | py | # Generated by Django 2.0.4 on 2018-09-11 01:41
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('HotelReservation', '0031_room_image1'),
]
operations = [
migrations.CreateModel(
name='Best_Choice',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('hotel', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='HotelReservation.HotelList')),
('user', models.OneToOneField(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"[email protected]"
] | |
24b1dbb3765a4c32784a7441086967dcc5356ff7 | 309d70c23cfad319073bfb5d390e93de01c2d712 | /python/questions/24.py | 9101465c455902732a82fe3841c98135f6178f8d | [] | no_license | BarryLiu/python | 53eb0accd778289b182a2b0b5cd837d22f6f0c2b | d07c3e74d2048715084c9c15ce0c883fcdfcc033 | refs/heads/master | 2022-01-18T08:01:13.502299 | 2022-01-07T13:12:24 | 2022-01-07T13:12:24 | 94,323,741 | 0 | 0 | null | 2018-07-30T23:07:58 | 2017-06-14T11:28:57 | Python | UTF-8 | Python | false | false | 440 | py | """
【程序24】
题目:给一个不多于5位的正整数,要求:一、求它是几位数,二、逆序打印出各位数字。
"""
def rever_num(num):#递归输出
if num==num_len-1:
print(input_number[num],end='')
else:
rever_num(num+1)
print(input_number[num],end='')
input_number=input('请输出数字:')
num_len=len(input_number)
print('该数字有%d 位' %num_len)
rever_num(0) | [
"[email protected]"
] | |
125a463b34ea48dd39865a7cd348be89e1fd7e91 | 072729bae2cef5a462d63dfe0c8ed896ac67114d | /weasyl/views/test/test_debug.py | 98d4b83b75e8bd029cfa421b3bb8f62a289d89b4 | [] | no_license | Weasyl/weasyl3 | b9d209decdbc08284b3ce9f69918ca592e7fd7ce | beeae4706d8779f34449f32e3911ef3428493c02 | refs/heads/master | 2020-04-06T07:02:30.818436 | 2019-12-21T06:28:08 | 2019-12-21T06:28:08 | 60,926,001 | 1 | 0 | null | 2019-12-21T06:28:09 | 2016-06-11T20:14:42 | CSS | UTF-8 | Python | false | false | 462 | py | import pytest
from libweasyl.exceptions import ExpectedWeasylError
from weasyl.views import debug
def test_expected_exception():
"""
expected_exception just raises an ExpectedWeasylError always.
"""
pytest.raises(ExpectedWeasylError, debug.expected_exception, None)
def test_unexpected_exception():
"""
unexpected_exception just raises a RuntimeError always.
"""
pytest.raises(RuntimeError, debug.unexpected_exception, None)
| [
"[email protected]"
] | |
a79f7c7977cd6f6139db715b281a671c4b23ca83 | 5151814817d3b7d11fcdf25cd11a7f2137838b4f | /printServer.py | 857055edb48b4357549b8848e8b4a4cb166ea79c | [
"MIT"
] | permissive | TsukuyomiJolno/virtualPrinter | e11c0f58092e2cdf9b884ed27d3ee5d3fc96a149 | 28d19ae9ee634ac4dd9115695272349932aa5227 | refs/heads/master | 2020-03-28T20:45:06.961942 | 2018-03-27T04:41:55 | 2018-03-27T04:41:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,172 | py | #!/usr/bin/env python
#
# We can use RedMon to redirect a port to a program, but the idea of this file is to bypass that.
# We simply set up a loopback ip and act like a network printer.
import socket
import os,time
import atexit
import select
class PrintServer:
def __init__(self,printerName='My Virtual Printer',ip='127.0.0.1',port=9001,autoInstallPrinter=True,printCallbackFn=None):
"""
You can do an ip other than 127.0.0.1 (localhost), but really
a better way is to install the printer and use windows sharing.
If you choose another port, you need to right click on your printer
and go into properties->Ports->Configure Port
and then change the port number.
autoInstallPrinter is used to install the printer in the operating system (currently only supports Windows)
printCallbackFn is a function to be called with received print data
if it is None, then will save it out to a file.
"""
self.ip=ip
if port==None:
port=0 # meaning, "any unused port"
self.port=port
self.buffersize=20 # Normally 1024, but we want fast response
self.autoInstallPrinter=autoInstallPrinter
self.printerName=printerName
self.running=False
self.keepGoing=False
self.osPrinterManager=None
self.printCallbackFn=printCallbackFn
def __del__(self):
if self: # this will always be called on program exit, so may come in again if the object is already deleted
if self.autoInstallPrinter:
self._uninstallPrinter()
def _installPrinter(self,ip,port):
atexit.register(self.__del__) # ensure that __del__ always gets called when the program exits
if os.name=='nt':
import windowsPrinters
self.osPrinterManager=windowsPrinters.WindowsPrinters()
self.printerPortName=self.printerName+' Port'
makeDefault=False
comment='Virtual printer created in Python'
self.osPrinterManager.addPrinter(self.printerName,ip,port,self.printerPortName,makeDefault,comment)
else:
print 'WARN: Auto printer installation not implemented for '+os.name
def _uninstallPrinter(self):
if self.osPrinterManager:
self.osPrinterManager.removePrinter(self.printerName)
self.osPrinterManager.removePort(self.printerPortName)
def run(self):
if self.running:
return
self.running=True
self.keepGoing=True
sock=socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind((self.ip,self.port))
ip,port=sock.getsockname()
print 'Opening',ip+':'+str(port)
if self.autoInstallPrinter:
self._installPrinter(ip,port)
#sock.setblocking(0)
sock.listen(1)
while self.keepGoing:
print '\nListening for incoming print job...'
while self.keepGoing: # let select() yield some time to this thread so we can detect ctrl+c and keepGoing change
inputready,outputready,exceptready=select.select([sock],[],[],1.0)
if sock in inputready:
break
if not self.keepGoing:
continue
print 'Incoming job... spooling...'
conn,addr=sock.accept()
if self.printCallbackFn==None:
f=open('I_printed_this.ps','wb')
while 1:
data=conn.recv(self.buffersize)
if not data:
break
f.write(data)
f.flush()
elif True:
buf=[]
while 1:
data=str(conn.recv(self.buffersize))
if not data:
break
buf.append(data)
buf=''.join(buf)
# get whatever meta info we can
author=None
title=None
filename=None
header='@'+buf.split('%!PS-',1)[0].split('@',1)[1]
#print header
for line in header.split('\n'):
line=line.strip()
if line.startswith('@PJL JOB NAME='):
n=line.split('"',1)[1].rsplit('"',1)[0]
if os.path.isfile(n):
filename=n
else:
title=n
elif line.startswith('@PJL COMMENT'):
line=line.split('"',1)[1].rsplit('"',1)[0].split(';')
for param in line:
param=param.split(':',1)
if len(param)>1:
param[0]=param[0].strip().lower()
param[1]=param[1].strip()
if param[0]=='username':
author=param[1]
elif param[0]=='app filename':
if title==None:
if os.path.isfile(param[1]):
filename=param[1]
else:
title=param[1]
if title==None and filename!=None:
title=filename.rsplit(os.sep,1)[-1].split('.',1)[0]
self.printCallbackFn(buf,title=title,author=author,filename=filename)
else:
buf=[]
printjobHeader=[]
fillingBuf=False
while 1:
data=str(conn.recv(self.buffersize))
if not data:
break
if not fillingBuf:
i=data.find('%!PS-')
if i<0:
printjobHeader.append(data)
elif i==0:
buf.append(data)
fillingBuf=True
else:
printjobHeader.append(data[0:i])
buf.append(data[i:])
fillingBuf=True
else:
buf.append(data)
if len(buf)>0:
self.printCallbackFn(''.join(buf))
conn.close()
time.sleep(0.1)
if __name__=='__main__':
import sys
port=9001
ip='127.0.0.1'
runit=True
for arg in sys.argv[1:]:
pass # TODO: do args
ps=PrintServer(ip, port)
ps.run()
| [
"[email protected]"
] | |
081daa6f2c35d6ea72e3992de9435bd63d2fce9b | 82b946da326148a3c1c1f687f96c0da165bb2c15 | /sdk/python/pulumi_azure_native/securityinsights/v20190101preview/threat_intelligence_indicator.py | 8d782a258d979e17d79d8f4a75dcda538209db73 | [
"Apache-2.0",
"BSD-3-Clause"
] | permissive | morrell/pulumi-azure-native | 3916e978382366607f3df0a669f24cb16293ff5e | cd3ba4b9cb08c5e1df7674c1c71695b80e443f08 | refs/heads/master | 2023-06-20T19:37:05.414924 | 2021-07-19T20:57:53 | 2021-07-19T20:57:53 | 387,815,163 | 0 | 0 | Apache-2.0 | 2021-07-20T14:18:29 | 2021-07-20T14:18:28 | null | UTF-8 | Python | false | false | 38,988 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from ._enums import *
from ._inputs import *
__all__ = ['ThreatIntelligenceIndicatorArgs', 'ThreatIntelligenceIndicator']
@pulumi.input_type
class ThreatIntelligenceIndicatorArgs:
def __init__(__self__, *,
kind: pulumi.Input[Union[str, 'ThreatIntelligenceResourceKind']],
operational_insights_resource_provider: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
workspace_name: pulumi.Input[str],
confidence: Optional[pulumi.Input[int]] = None,
created: Optional[pulumi.Input[str]] = None,
created_by_ref: Optional[pulumi.Input[str]] = None,
defanged: Optional[pulumi.Input[bool]] = None,
description: Optional[pulumi.Input[str]] = None,
display_name: Optional[pulumi.Input[str]] = None,
etag: Optional[pulumi.Input[str]] = None,
extensions: Optional[Any] = None,
external_id: Optional[pulumi.Input[str]] = None,
external_last_updated_time_utc: Optional[pulumi.Input[str]] = None,
external_references: Optional[pulumi.Input[Sequence[pulumi.Input['ThreatIntelligenceExternalReferenceArgs']]]] = None,
granular_markings: Optional[pulumi.Input[Sequence[pulumi.Input['ThreatIntelligenceGranularMarkingModelArgs']]]] = None,
indicator_types: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
kill_chain_phases: Optional[pulumi.Input[Sequence[pulumi.Input['ThreatIntelligenceKillChainPhaseArgs']]]] = None,
labels: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
language: Optional[pulumi.Input[str]] = None,
last_updated_time_utc: Optional[pulumi.Input[str]] = None,
modified: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
object_marking_refs: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
parsed_pattern: Optional[pulumi.Input[Sequence[pulumi.Input['ThreatIntelligenceParsedPatternArgs']]]] = None,
pattern: Optional[pulumi.Input[str]] = None,
pattern_type: Optional[pulumi.Input[str]] = None,
pattern_version: Optional[pulumi.Input[str]] = None,
revoked: Optional[pulumi.Input[bool]] = None,
source: Optional[pulumi.Input[str]] = None,
threat_intelligence_tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
threat_types: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
valid_from: Optional[pulumi.Input[str]] = None,
valid_until: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a ThreatIntelligenceIndicator resource.
:param pulumi.Input[Union[str, 'ThreatIntelligenceResourceKind']] kind: The kind of the entity.
:param pulumi.Input[str] operational_insights_resource_provider: The namespace of workspaces resource provider- Microsoft.OperationalInsights.
:param pulumi.Input[str] resource_group_name: The name of the resource group within the user's subscription. The name is case insensitive.
:param pulumi.Input[str] workspace_name: The name of the workspace.
:param pulumi.Input[int] confidence: Confidence of threat intelligence entity
:param pulumi.Input[str] created: Created by
:param pulumi.Input[str] created_by_ref: Created by reference of threat intelligence entity
:param pulumi.Input[bool] defanged: Is threat intelligence entity defanged
:param pulumi.Input[str] description: Description of a threat intelligence entity
:param pulumi.Input[str] display_name: Display name of a threat intelligence entity
:param pulumi.Input[str] etag: Etag of the azure resource
:param Any extensions: Extensions map
:param pulumi.Input[str] external_id: External ID of threat intelligence entity
:param pulumi.Input[str] external_last_updated_time_utc: External last updated time in UTC
:param pulumi.Input[Sequence[pulumi.Input['ThreatIntelligenceExternalReferenceArgs']]] external_references: External References
:param pulumi.Input[Sequence[pulumi.Input['ThreatIntelligenceGranularMarkingModelArgs']]] granular_markings: Granular Markings
:param pulumi.Input[Sequence[pulumi.Input[str]]] indicator_types: Indicator types of threat intelligence entities
:param pulumi.Input[Sequence[pulumi.Input['ThreatIntelligenceKillChainPhaseArgs']]] kill_chain_phases: Kill chain phases
:param pulumi.Input[Sequence[pulumi.Input[str]]] labels: Labels of threat intelligence entity
:param pulumi.Input[str] language: Language of threat intelligence entity
:param pulumi.Input[str] last_updated_time_utc: Last updated time in UTC
:param pulumi.Input[str] modified: Modified by
:param pulumi.Input[str] name: Threat intelligence indicator name field.
:param pulumi.Input[Sequence[pulumi.Input[str]]] object_marking_refs: Threat intelligence entity object marking references
:param pulumi.Input[Sequence[pulumi.Input['ThreatIntelligenceParsedPatternArgs']]] parsed_pattern: Parsed patterns
:param pulumi.Input[str] pattern: Pattern of a threat intelligence entity
:param pulumi.Input[str] pattern_type: Pattern type of a threat intelligence entity
:param pulumi.Input[str] pattern_version: Pattern version of a threat intelligence entity
:param pulumi.Input[bool] revoked: Is threat intelligence entity revoked
:param pulumi.Input[str] source: Source of a threat intelligence entity
:param pulumi.Input[Sequence[pulumi.Input[str]]] threat_intelligence_tags: List of tags
:param pulumi.Input[Sequence[pulumi.Input[str]]] threat_types: Threat types
:param pulumi.Input[str] valid_from: Valid from
:param pulumi.Input[str] valid_until: Valid until
"""
pulumi.set(__self__, "kind", kind)
pulumi.set(__self__, "operational_insights_resource_provider", operational_insights_resource_provider)
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "workspace_name", workspace_name)
if confidence is not None:
pulumi.set(__self__, "confidence", confidence)
if created is not None:
pulumi.set(__self__, "created", created)
if created_by_ref is not None:
pulumi.set(__self__, "created_by_ref", created_by_ref)
if defanged is not None:
pulumi.set(__self__, "defanged", defanged)
if description is not None:
pulumi.set(__self__, "description", description)
if display_name is not None:
pulumi.set(__self__, "display_name", display_name)
if etag is not None:
pulumi.set(__self__, "etag", etag)
if extensions is not None:
pulumi.set(__self__, "extensions", extensions)
if external_id is not None:
pulumi.set(__self__, "external_id", external_id)
if external_last_updated_time_utc is not None:
pulumi.set(__self__, "external_last_updated_time_utc", external_last_updated_time_utc)
if external_references is not None:
pulumi.set(__self__, "external_references", external_references)
if granular_markings is not None:
pulumi.set(__self__, "granular_markings", granular_markings)
if indicator_types is not None:
pulumi.set(__self__, "indicator_types", indicator_types)
if kill_chain_phases is not None:
pulumi.set(__self__, "kill_chain_phases", kill_chain_phases)
if labels is not None:
pulumi.set(__self__, "labels", labels)
if language is not None:
pulumi.set(__self__, "language", language)
if last_updated_time_utc is not None:
pulumi.set(__self__, "last_updated_time_utc", last_updated_time_utc)
if modified is not None:
pulumi.set(__self__, "modified", modified)
if name is not None:
pulumi.set(__self__, "name", name)
if object_marking_refs is not None:
pulumi.set(__self__, "object_marking_refs", object_marking_refs)
if parsed_pattern is not None:
pulumi.set(__self__, "parsed_pattern", parsed_pattern)
if pattern is not None:
pulumi.set(__self__, "pattern", pattern)
if pattern_type is not None:
pulumi.set(__self__, "pattern_type", pattern_type)
if pattern_version is not None:
pulumi.set(__self__, "pattern_version", pattern_version)
if revoked is not None:
pulumi.set(__self__, "revoked", revoked)
if source is not None:
pulumi.set(__self__, "source", source)
if threat_intelligence_tags is not None:
pulumi.set(__self__, "threat_intelligence_tags", threat_intelligence_tags)
if threat_types is not None:
pulumi.set(__self__, "threat_types", threat_types)
if valid_from is not None:
pulumi.set(__self__, "valid_from", valid_from)
if valid_until is not None:
pulumi.set(__self__, "valid_until", valid_until)
@property
@pulumi.getter
def kind(self) -> pulumi.Input[Union[str, 'ThreatIntelligenceResourceKind']]:
"""
The kind of the entity.
"""
return pulumi.get(self, "kind")
@kind.setter
def kind(self, value: pulumi.Input[Union[str, 'ThreatIntelligenceResourceKind']]):
pulumi.set(self, "kind", value)
@property
@pulumi.getter(name="operationalInsightsResourceProvider")
def operational_insights_resource_provider(self) -> pulumi.Input[str]:
"""
The namespace of workspaces resource provider- Microsoft.OperationalInsights.
"""
return pulumi.get(self, "operational_insights_resource_provider")
@operational_insights_resource_provider.setter
def operational_insights_resource_provider(self, value: pulumi.Input[str]):
pulumi.set(self, "operational_insights_resource_provider", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group within the user's subscription. The name is case insensitive.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="workspaceName")
def workspace_name(self) -> pulumi.Input[str]:
"""
The name of the workspace.
"""
return pulumi.get(self, "workspace_name")
@workspace_name.setter
def workspace_name(self, value: pulumi.Input[str]):
pulumi.set(self, "workspace_name", value)
@property
@pulumi.getter
def confidence(self) -> Optional[pulumi.Input[int]]:
"""
Confidence of threat intelligence entity
"""
return pulumi.get(self, "confidence")
@confidence.setter
def confidence(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "confidence", value)
@property
@pulumi.getter
def created(self) -> Optional[pulumi.Input[str]]:
"""
Created by
"""
return pulumi.get(self, "created")
@created.setter
def created(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "created", value)
@property
@pulumi.getter(name="createdByRef")
def created_by_ref(self) -> Optional[pulumi.Input[str]]:
"""
Created by reference of threat intelligence entity
"""
return pulumi.get(self, "created_by_ref")
@created_by_ref.setter
def created_by_ref(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "created_by_ref", value)
@property
@pulumi.getter
def defanged(self) -> Optional[pulumi.Input[bool]]:
"""
Is threat intelligence entity defanged
"""
return pulumi.get(self, "defanged")
@defanged.setter
def defanged(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "defanged", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
Description of a threat intelligence entity
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="displayName")
def display_name(self) -> Optional[pulumi.Input[str]]:
"""
Display name of a threat intelligence entity
"""
return pulumi.get(self, "display_name")
@display_name.setter
def display_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "display_name", value)
@property
@pulumi.getter
def etag(self) -> Optional[pulumi.Input[str]]:
"""
Etag of the azure resource
"""
return pulumi.get(self, "etag")
@etag.setter
def etag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "etag", value)
@property
@pulumi.getter
def extensions(self) -> Optional[Any]:
"""
Extensions map
"""
return pulumi.get(self, "extensions")
@extensions.setter
def extensions(self, value: Optional[Any]):
pulumi.set(self, "extensions", value)
@property
@pulumi.getter(name="externalId")
def external_id(self) -> Optional[pulumi.Input[str]]:
"""
External ID of threat intelligence entity
"""
return pulumi.get(self, "external_id")
@external_id.setter
def external_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "external_id", value)
@property
@pulumi.getter(name="externalLastUpdatedTimeUtc")
def external_last_updated_time_utc(self) -> Optional[pulumi.Input[str]]:
"""
External last updated time in UTC
"""
return pulumi.get(self, "external_last_updated_time_utc")
@external_last_updated_time_utc.setter
def external_last_updated_time_utc(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "external_last_updated_time_utc", value)
@property
@pulumi.getter(name="externalReferences")
def external_references(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ThreatIntelligenceExternalReferenceArgs']]]]:
"""
External References
"""
return pulumi.get(self, "external_references")
@external_references.setter
def external_references(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ThreatIntelligenceExternalReferenceArgs']]]]):
pulumi.set(self, "external_references", value)
@property
@pulumi.getter(name="granularMarkings")
def granular_markings(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ThreatIntelligenceGranularMarkingModelArgs']]]]:
"""
Granular Markings
"""
return pulumi.get(self, "granular_markings")
@granular_markings.setter
def granular_markings(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ThreatIntelligenceGranularMarkingModelArgs']]]]):
pulumi.set(self, "granular_markings", value)
@property
@pulumi.getter(name="indicatorTypes")
def indicator_types(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Indicator types of threat intelligence entities
"""
return pulumi.get(self, "indicator_types")
@indicator_types.setter
def indicator_types(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "indicator_types", value)
@property
@pulumi.getter(name="killChainPhases")
def kill_chain_phases(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ThreatIntelligenceKillChainPhaseArgs']]]]:
"""
Kill chain phases
"""
return pulumi.get(self, "kill_chain_phases")
@kill_chain_phases.setter
def kill_chain_phases(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ThreatIntelligenceKillChainPhaseArgs']]]]):
pulumi.set(self, "kill_chain_phases", value)
@property
@pulumi.getter
def labels(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Labels of threat intelligence entity
"""
return pulumi.get(self, "labels")
@labels.setter
def labels(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "labels", value)
@property
@pulumi.getter
def language(self) -> Optional[pulumi.Input[str]]:
"""
Language of threat intelligence entity
"""
return pulumi.get(self, "language")
@language.setter
def language(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "language", value)
@property
@pulumi.getter(name="lastUpdatedTimeUtc")
def last_updated_time_utc(self) -> Optional[pulumi.Input[str]]:
"""
Last updated time in UTC
"""
return pulumi.get(self, "last_updated_time_utc")
@last_updated_time_utc.setter
def last_updated_time_utc(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "last_updated_time_utc", value)
@property
@pulumi.getter
def modified(self) -> Optional[pulumi.Input[str]]:
"""
Modified by
"""
return pulumi.get(self, "modified")
@modified.setter
def modified(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "modified", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Threat intelligence indicator name field.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="objectMarkingRefs")
def object_marking_refs(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Threat intelligence entity object marking references
"""
return pulumi.get(self, "object_marking_refs")
@object_marking_refs.setter
def object_marking_refs(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "object_marking_refs", value)
@property
@pulumi.getter(name="parsedPattern")
def parsed_pattern(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ThreatIntelligenceParsedPatternArgs']]]]:
"""
Parsed patterns
"""
return pulumi.get(self, "parsed_pattern")
@parsed_pattern.setter
def parsed_pattern(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ThreatIntelligenceParsedPatternArgs']]]]):
pulumi.set(self, "parsed_pattern", value)
@property
@pulumi.getter
def pattern(self) -> Optional[pulumi.Input[str]]:
"""
Pattern of a threat intelligence entity
"""
return pulumi.get(self, "pattern")
@pattern.setter
def pattern(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "pattern", value)
@property
@pulumi.getter(name="patternType")
def pattern_type(self) -> Optional[pulumi.Input[str]]:
"""
Pattern type of a threat intelligence entity
"""
return pulumi.get(self, "pattern_type")
@pattern_type.setter
def pattern_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "pattern_type", value)
@property
@pulumi.getter(name="patternVersion")
def pattern_version(self) -> Optional[pulumi.Input[str]]:
"""
Pattern version of a threat intelligence entity
"""
return pulumi.get(self, "pattern_version")
@pattern_version.setter
def pattern_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "pattern_version", value)
@property
@pulumi.getter
def revoked(self) -> Optional[pulumi.Input[bool]]:
"""
Is threat intelligence entity revoked
"""
return pulumi.get(self, "revoked")
@revoked.setter
def revoked(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "revoked", value)
@property
@pulumi.getter
def source(self) -> Optional[pulumi.Input[str]]:
"""
Source of a threat intelligence entity
"""
return pulumi.get(self, "source")
@source.setter
def source(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "source", value)
@property
@pulumi.getter(name="threatIntelligenceTags")
def threat_intelligence_tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
List of tags
"""
return pulumi.get(self, "threat_intelligence_tags")
@threat_intelligence_tags.setter
def threat_intelligence_tags(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "threat_intelligence_tags", value)
@property
@pulumi.getter(name="threatTypes")
def threat_types(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Threat types
"""
return pulumi.get(self, "threat_types")
@threat_types.setter
def threat_types(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "threat_types", value)
@property
@pulumi.getter(name="validFrom")
def valid_from(self) -> Optional[pulumi.Input[str]]:
"""
Valid from
"""
return pulumi.get(self, "valid_from")
@valid_from.setter
def valid_from(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "valid_from", value)
@property
@pulumi.getter(name="validUntil")
def valid_until(self) -> Optional[pulumi.Input[str]]:
"""
Valid until
"""
return pulumi.get(self, "valid_until")
@valid_until.setter
def valid_until(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "valid_until", value)
class ThreatIntelligenceIndicator(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
confidence: Optional[pulumi.Input[int]] = None,
created: Optional[pulumi.Input[str]] = None,
created_by_ref: Optional[pulumi.Input[str]] = None,
defanged: Optional[pulumi.Input[bool]] = None,
description: Optional[pulumi.Input[str]] = None,
display_name: Optional[pulumi.Input[str]] = None,
etag: Optional[pulumi.Input[str]] = None,
extensions: Optional[Any] = None,
external_id: Optional[pulumi.Input[str]] = None,
external_last_updated_time_utc: Optional[pulumi.Input[str]] = None,
external_references: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ThreatIntelligenceExternalReferenceArgs']]]]] = None,
granular_markings: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ThreatIntelligenceGranularMarkingModelArgs']]]]] = None,
indicator_types: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
kill_chain_phases: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ThreatIntelligenceKillChainPhaseArgs']]]]] = None,
kind: Optional[pulumi.Input[Union[str, 'ThreatIntelligenceResourceKind']]] = None,
labels: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
language: Optional[pulumi.Input[str]] = None,
last_updated_time_utc: Optional[pulumi.Input[str]] = None,
modified: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
object_marking_refs: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
operational_insights_resource_provider: Optional[pulumi.Input[str]] = None,
parsed_pattern: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ThreatIntelligenceParsedPatternArgs']]]]] = None,
pattern: Optional[pulumi.Input[str]] = None,
pattern_type: Optional[pulumi.Input[str]] = None,
pattern_version: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
revoked: Optional[pulumi.Input[bool]] = None,
source: Optional[pulumi.Input[str]] = None,
threat_intelligence_tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
threat_types: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
valid_from: Optional[pulumi.Input[str]] = None,
valid_until: Optional[pulumi.Input[str]] = None,
workspace_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Threat intelligence information object.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[int] confidence: Confidence of threat intelligence entity
:param pulumi.Input[str] created: Created by
:param pulumi.Input[str] created_by_ref: Created by reference of threat intelligence entity
:param pulumi.Input[bool] defanged: Is threat intelligence entity defanged
:param pulumi.Input[str] description: Description of a threat intelligence entity
:param pulumi.Input[str] display_name: Display name of a threat intelligence entity
:param pulumi.Input[str] etag: Etag of the azure resource
:param Any extensions: Extensions map
:param pulumi.Input[str] external_id: External ID of threat intelligence entity
:param pulumi.Input[str] external_last_updated_time_utc: External last updated time in UTC
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ThreatIntelligenceExternalReferenceArgs']]]] external_references: External References
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ThreatIntelligenceGranularMarkingModelArgs']]]] granular_markings: Granular Markings
:param pulumi.Input[Sequence[pulumi.Input[str]]] indicator_types: Indicator types of threat intelligence entities
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ThreatIntelligenceKillChainPhaseArgs']]]] kill_chain_phases: Kill chain phases
:param pulumi.Input[Union[str, 'ThreatIntelligenceResourceKind']] kind: The kind of the entity.
:param pulumi.Input[Sequence[pulumi.Input[str]]] labels: Labels of threat intelligence entity
:param pulumi.Input[str] language: Language of threat intelligence entity
:param pulumi.Input[str] last_updated_time_utc: Last updated time in UTC
:param pulumi.Input[str] modified: Modified by
:param pulumi.Input[str] name: Threat intelligence indicator name field.
:param pulumi.Input[Sequence[pulumi.Input[str]]] object_marking_refs: Threat intelligence entity object marking references
:param pulumi.Input[str] operational_insights_resource_provider: The namespace of workspaces resource provider- Microsoft.OperationalInsights.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ThreatIntelligenceParsedPatternArgs']]]] parsed_pattern: Parsed patterns
:param pulumi.Input[str] pattern: Pattern of a threat intelligence entity
:param pulumi.Input[str] pattern_type: Pattern type of a threat intelligence entity
:param pulumi.Input[str] pattern_version: Pattern version of a threat intelligence entity
:param pulumi.Input[str] resource_group_name: The name of the resource group within the user's subscription. The name is case insensitive.
:param pulumi.Input[bool] revoked: Is threat intelligence entity revoked
:param pulumi.Input[str] source: Source of a threat intelligence entity
:param pulumi.Input[Sequence[pulumi.Input[str]]] threat_intelligence_tags: List of tags
:param pulumi.Input[Sequence[pulumi.Input[str]]] threat_types: Threat types
:param pulumi.Input[str] valid_from: Valid from
:param pulumi.Input[str] valid_until: Valid until
:param pulumi.Input[str] workspace_name: The name of the workspace.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ThreatIntelligenceIndicatorArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Threat intelligence information object.
:param str resource_name: The name of the resource.
:param ThreatIntelligenceIndicatorArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ThreatIntelligenceIndicatorArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
confidence: Optional[pulumi.Input[int]] = None,
created: Optional[pulumi.Input[str]] = None,
created_by_ref: Optional[pulumi.Input[str]] = None,
defanged: Optional[pulumi.Input[bool]] = None,
description: Optional[pulumi.Input[str]] = None,
display_name: Optional[pulumi.Input[str]] = None,
etag: Optional[pulumi.Input[str]] = None,
extensions: Optional[Any] = None,
external_id: Optional[pulumi.Input[str]] = None,
external_last_updated_time_utc: Optional[pulumi.Input[str]] = None,
external_references: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ThreatIntelligenceExternalReferenceArgs']]]]] = None,
granular_markings: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ThreatIntelligenceGranularMarkingModelArgs']]]]] = None,
indicator_types: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
kill_chain_phases: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ThreatIntelligenceKillChainPhaseArgs']]]]] = None,
kind: Optional[pulumi.Input[Union[str, 'ThreatIntelligenceResourceKind']]] = None,
labels: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
language: Optional[pulumi.Input[str]] = None,
last_updated_time_utc: Optional[pulumi.Input[str]] = None,
modified: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
object_marking_refs: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
operational_insights_resource_provider: Optional[pulumi.Input[str]] = None,
parsed_pattern: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ThreatIntelligenceParsedPatternArgs']]]]] = None,
pattern: Optional[pulumi.Input[str]] = None,
pattern_type: Optional[pulumi.Input[str]] = None,
pattern_version: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
revoked: Optional[pulumi.Input[bool]] = None,
source: Optional[pulumi.Input[str]] = None,
threat_intelligence_tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
threat_types: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
valid_from: Optional[pulumi.Input[str]] = None,
valid_until: Optional[pulumi.Input[str]] = None,
workspace_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ThreatIntelligenceIndicatorArgs.__new__(ThreatIntelligenceIndicatorArgs)
__props__.__dict__["confidence"] = confidence
__props__.__dict__["created"] = created
__props__.__dict__["created_by_ref"] = created_by_ref
__props__.__dict__["defanged"] = defanged
__props__.__dict__["description"] = description
__props__.__dict__["display_name"] = display_name
__props__.__dict__["etag"] = etag
__props__.__dict__["extensions"] = extensions
__props__.__dict__["external_id"] = external_id
__props__.__dict__["external_last_updated_time_utc"] = external_last_updated_time_utc
__props__.__dict__["external_references"] = external_references
__props__.__dict__["granular_markings"] = granular_markings
__props__.__dict__["indicator_types"] = indicator_types
__props__.__dict__["kill_chain_phases"] = kill_chain_phases
if kind is None and not opts.urn:
raise TypeError("Missing required property 'kind'")
__props__.__dict__["kind"] = kind
__props__.__dict__["labels"] = labels
__props__.__dict__["language"] = language
__props__.__dict__["last_updated_time_utc"] = last_updated_time_utc
__props__.__dict__["modified"] = modified
__props__.__dict__["name"] = name
__props__.__dict__["object_marking_refs"] = object_marking_refs
if operational_insights_resource_provider is None and not opts.urn:
raise TypeError("Missing required property 'operational_insights_resource_provider'")
__props__.__dict__["operational_insights_resource_provider"] = operational_insights_resource_provider
__props__.__dict__["parsed_pattern"] = parsed_pattern
__props__.__dict__["pattern"] = pattern
__props__.__dict__["pattern_type"] = pattern_type
__props__.__dict__["pattern_version"] = pattern_version
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["revoked"] = revoked
__props__.__dict__["source"] = source
__props__.__dict__["threat_intelligence_tags"] = threat_intelligence_tags
__props__.__dict__["threat_types"] = threat_types
__props__.__dict__["valid_from"] = valid_from
__props__.__dict__["valid_until"] = valid_until
if workspace_name is None and not opts.urn:
raise TypeError("Missing required property 'workspace_name'")
__props__.__dict__["workspace_name"] = workspace_name
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:securityinsights/v20190101preview:ThreatIntelligenceIndicator"), pulumi.Alias(type_="azure-native:securityinsights:ThreatIntelligenceIndicator"), pulumi.Alias(type_="azure-nextgen:securityinsights:ThreatIntelligenceIndicator")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(ThreatIntelligenceIndicator, __self__).__init__(
'azure-native:securityinsights/v20190101preview:ThreatIntelligenceIndicator',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'ThreatIntelligenceIndicator':
"""
Get an existing ThreatIntelligenceIndicator resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = ThreatIntelligenceIndicatorArgs.__new__(ThreatIntelligenceIndicatorArgs)
__props__.__dict__["etag"] = None
__props__.__dict__["kind"] = None
__props__.__dict__["name"] = None
__props__.__dict__["type"] = None
return ThreatIntelligenceIndicator(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def etag(self) -> pulumi.Output[Optional[str]]:
"""
Etag of the azure resource
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def kind(self) -> pulumi.Output[str]:
"""
The kind of the entity.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Azure resource name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Azure resource type
"""
return pulumi.get(self, "type")
| [
"[email protected]"
] | |
c8270736785050d121d5e0b23cd2728d7bfa27f6 | c38592e18f0492f43e14ded616cb0bcf246e51c5 | /gtp.py | b080cd578df31d54df6c128450b8f2f27c32eba2 | [] | no_license | CMACH508/2020-WangZhiwei | ed81a119391e5b35dc79d21a8c1282d4bf2a163b | d9112b3a0bfc688681ce9a85dcaec0230494bb1d | refs/heads/master | 2022-04-22T03:10:42.936674 | 2020-04-22T11:11:24 | 2020-04-22T11:11:24 | 251,605,983 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,576 | py |
import re
def unparse_pygtp_coords(c):
if c is None:
return PASS
return c[1] + 1, 19 - c[0]
def parse_pygtp_coords(vertex):
'Interprets coords. (1, 1) is bottom left; (1, 9) is top left.'
if vertex in (PASS, RESIGN):
return None
return 19 - vertex[1], vertex[0] - 1
def pre_engine(s):
s = re.sub("[^\t\n -~]", "", s)
s = s.split("#")[0]
s = s.replace("\t", " ")
return s
def pre_controller(s):
s = re.sub("[^\t\n -~]", "", s)
s = s.replace("\t", " ")
return s
def gtp_boolean(b):
return "true" if b else "false"
def gtp_list(l):
return "\n".join(l)
def gtp_color(color):
# an arbitrary choice amongst a number of possibilities
return {BLACK: "B", WHITE: "W"}[color]
def gtp_vertex(x, y):
if x == 9 and y == 9:
return "resign"
else:
return "{}{}".format("ABCDEFGHIJKLMNOPQRSTYVWYZ"[y], x + 1)
def gtp_move(color, x, y):
return " ".join([gtp_vertex(x, y)])
def parse_message(message):
message = pre_engine(message).strip()
first, rest = (message.split(" ", 1) + [None])[:2]
if first.isdigit():
message_id = int(first)
if rest is not None:
command, arguments = (rest.split(" ", 1) + [None])[:2]
else:
command, arguments = None, None
else:
message_id = None
command, arguments = first, rest
return message_id, command, arguments
WHITE = -1
BLACK = +1
EMPTY = 0
PASS = (0, 0)
RESIGN = "resign"
def parse_color(color):
if color.lower() in ["b", "black"]:
return BLACK
elif color.lower() in ["w", "white"]:
return WHITE
else:
return False
def parse_vertex(vertex_string):
if vertex_string is None:
return False
elif vertex_string.lower() == "pass":
return PASS
elif vertex_string.lower() == "resign":
return 9, 9
elif len(vertex_string) > 1:
y = "abcdefghijklmnopqrstuvwxyz".find(vertex_string[0].lower())
if y == -1:
return False
if vertex_string[1:].isdigit():
x = int(vertex_string[1:])
else:
return False
else:
return False
return x - 1, y
def parse_move(move_string):
color_string, vertex_string = (move_string.split(" ") + [None])[:2]
color = parse_color(color_string)
if color is False:
return False
vertex = parse_vertex(vertex_string)
if vertex is False:
return False
return color, vertex
MIN_BOARD_SIZE = 7
MAX_BOARD_SIZE = 19
def format_success(message_id, response=None):
if response is None:
response = ""
else:
response = " {}".format(response)
if message_id:
return "={}{}\n\n".format(message_id, response)
else:
return "={}\n\n".format(response)
def format_error(message_id, response):
if response:
response = " {}".format(response)
if message_id:
return "?{}{}\n\n".format(message_id, response)
else:
return "?{}\n\n".format(response)
class Engine(object):
def __init__(self, game_obj, name="gtp (python library)", version="0.2"):
self.size = 9
self.komi = 6.5
self._game = game_obj
self._game.clear()
self._name = name
self._version = version
self.disconnect = False
self.known_commands = [
field[4:] for field in dir(self) if field.startswith("cmd_")]
def send(self, message):
message_id, command, arguments = parse_message(message)
if command in self.known_commands:
try:
return getattr(self, "cmd_" + command)(arguments)
'''return format_success(
message_id, getattr(self, "cmd_" + command)(arguments))'''
except ValueError as exception:
return format_error(message_id, exception.args[0])
else:
return format_error(message_id, "unknown command")
def vertex_in_range(self, vertex):
if vertex == PASS:
return True
if 1 <= vertex[0] <= self.size and 1 <= vertex[1] <= self.size:
return True
else:
return False
# commands
def cmd_protocol_version(self, arguments):
return 2
def cmd_showboard(self, arguments):
return "haha"
return self._game.board.__repr__()
def cmd_name(self, arguments):
return self._name
def cmd_version(self, arguments):
return self._version
def cmd_known_command(self, arguments):
return gtp_boolean(arguments in self.known_commands)
def cmd_list_commands(self, arguments):
return gtp_list(self.known_commands)
def cmd_quit(self, arguments):
self.disconnect = True
return ""
def cmd_boardsize(self, arguments):
return ""
'''if arguments.isdigit():
size = int(arguments)
if MIN_BOARD_SIZE <= size <= MAX_BOARD_SIZE:
self.size = size
self._game.set_size(size)
else:
raise ValueError("unacceptable size")
else:
raise ValueError("non digit size")'''
def cmd_clear_board(self, arguments):
self._game.clear()
def cmd_komi(self, arguments):
try:
komi = float(arguments)
self.komi = komi
except ValueError:
raise ValueError("syntax error")
def cmd_play(self, arguments):
color, move = parse_move(arguments)
x, y = move
#print("play", x, y)
#print("vertex: ", vertex)
#if self._game.make_move(color, vertex):
if self._game.make_move(x, y):
return ""
#return gtp_move(color, x, y)
else:
return "Illegal move"
#raise ValueError("illegal move")
def cmd_genmove(self, arguments):
c = parse_color(arguments)
if c:
x, y = self._game.get_move()
#print("gen", x, y)
self._game.make_move(x, y)
#return gtp_vertex(move)
return gtp_vertex(x, y)
else:
raise ValueError("unknown player: {}".format(arguments))
def cmd_final_score(self, arguments):
over, res = self._game.is_game_over()
if over:
if res > 0:
return "B+"
else:
return "W+"
return "Not over"
def cmd_result(self, arguments):
over, res = self._game.show_result()
if over:
return res
return "Not over"
class MinimalGame(object):
def __init__(self, size=19, komi=6.5):
self.size = size
self.komi = 6.5
self.board = [EMPTY] * (self.size * self.size)
def _flatten(self, vertex):
(x, y) = vertex
return (x - 1) * self.size + (y - 1)
def clear(self):
self.board = [EMPTY] * (self.size * self.size)
def make_move(self, color, vertex):
# no legality check other than the space being empty..
# no side-effects beyond placing the stone..
if vertex == PASS:
return True # noop
idx = self._flatten(vertex)
if self.board[idx] == EMPTY:
self.board[idx] = color
return True
else:
return False
def set_size(self, n):
self.size = n
self.clear()
def set_komi(self, k):
self.komi = k
def get_move(self, color):
# pass every time. At least it's legal
return 0, 0
| [
"[email protected]"
] | |
a39e5237a2f64dd433e68606927b0b35856daaf7 | 3423fa49ec691b5111e30234036bdf7ce27b59a0 | /basic_oop_with_python/3-main.py | 720e4c22b6fcd27316f8a6988c8e1180a1cb2690 | [] | no_license | DoraKorpar/holbertonschool-higher_level_programming | 69254dbe626cb1ea021791f1ba4f1c1e1e41e0d8 | 07f895d25a57eda1f4c516a6effa42a538a7462a | refs/heads/master | 2021-01-21T04:33:20.210091 | 2016-07-02T20:37:53 | 2016-07-02T20:37:53 | 51,212,112 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 216 | py | from circle import Circle
c = Circle(4)
c.set_center([0, 0])
c.set_color("Yellow")
c.name = "Sun"
print "Area of %s is %f" % (c.name, c.area())
print "Center of %s sun is [%d, %d]" % (c.get_color(), c.get_center())
| [
"[email protected]"
] | |
580b1f7c8954ad727b99f744821d25755d8610ec | 51f887286aa3bd2c3dbe4c616ad306ce08976441 | /pybind/nos/v7_2_0/interface/port_channel/ip/interface_po_dhcp_conf/dhcp/__init__.py | 43d3dcd59779544ca02ce50f9c3fb363039a675d | [
"Apache-2.0"
] | permissive | b2220333/pybind | a8c06460fd66a97a78c243bf144488eb88d7732a | 44c467e71b2b425be63867aba6e6fa28b2cfe7fb | refs/heads/master | 2020-03-18T09:09:29.574226 | 2018-04-03T20:09:50 | 2018-04-03T20:09:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,154 | py |
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
import relay
class dhcp(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-interface - based on the path /interface/port-channel/ip/interface-po-dhcp-conf/dhcp. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__relay',)
_yang_name = 'dhcp'
_rest_name = 'dhcp'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__relay = YANGDynClass(base=relay.relay, is_container='container', presence=False, yang_name="relay", rest_name="relay", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'DHCP relay agent configuration', u'callpoint': u'DHCPRelayPoInterfaceBaseCallPoint', u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-dhcp', defining_module='brocade-dhcp', yang_type='container', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'interface', u'port-channel', u'ip', u'interface-po-dhcp-conf', u'dhcp']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'interface', u'Port-channel', u'ip', u'dhcp']
def _get_relay(self):
"""
Getter method for relay, mapped from YANG variable /interface/port_channel/ip/interface_po_dhcp_conf/dhcp/relay (container)
"""
return self.__relay
def _set_relay(self, v, load=False):
"""
Setter method for relay, mapped from YANG variable /interface/port_channel/ip/interface_po_dhcp_conf/dhcp/relay (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_relay is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_relay() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=relay.relay, is_container='container', presence=False, yang_name="relay", rest_name="relay", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'DHCP relay agent configuration', u'callpoint': u'DHCPRelayPoInterfaceBaseCallPoint', u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-dhcp', defining_module='brocade-dhcp', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """relay must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=relay.relay, is_container='container', presence=False, yang_name="relay", rest_name="relay", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'DHCP relay agent configuration', u'callpoint': u'DHCPRelayPoInterfaceBaseCallPoint', u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-dhcp', defining_module='brocade-dhcp', yang_type='container', is_config=True)""",
})
self.__relay = t
if hasattr(self, '_set'):
self._set()
def _unset_relay(self):
self.__relay = YANGDynClass(base=relay.relay, is_container='container', presence=False, yang_name="relay", rest_name="relay", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'DHCP relay agent configuration', u'callpoint': u'DHCPRelayPoInterfaceBaseCallPoint', u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-dhcp', defining_module='brocade-dhcp', yang_type='container', is_config=True)
relay = __builtin__.property(_get_relay, _set_relay)
_pyangbind_elements = {'relay': relay, }
| [
"[email protected]"
] | |
e9683267b636a822e2ab4e7e3f11d76614b0273c | 16a48ec3a2f15db89ba819db80721aeea4e10bd1 | /EX2_Ramp angle_Joshua.py | 8905c60f73df79ff5090b2849767bbdad487a261 | [] | no_license | joshualxndrs/ALGOPRO-HW-1_Exercise1-2_JOSHUA | 58ccb0312914f0da7f385855a90f42c130ca8dd1 | 36ba244e3e67061040535c7dd5facd9aa2c10f03 | refs/heads/master | 2023-08-19T14:27:15.066238 | 2021-10-14T16:23:54 | 2021-10-14T16:23:54 | 416,177,991 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 374 | py | #Input mass, Input Force
#C = 9.8
#Formula for angle --> angle = f/(m*c)
#Asin(angle) --> degrees
import math
m = float(input("Please enter the mass of the cart (unit in kg): "))
f = float(input("Please enter the force acting on the cart (unit in N): "))
c = 9.8
angle = f/(m*c)
A = math.asin(angle)
Final = math.degrees(A)
print("The angle of the ramp is %2.1f"%Final)
| [
"[email protected]"
] | |
03d889a33613d1136e0bc0ffb09a5aa16709224e | bc2000f224d4f555a54c1fb43cb55e027bc8f64f | /ctc/chapter_3/problem_1.py | 7435f9b2f44d7acf0efd7685317573e1abe9a5fb | [] | no_license | nplutt/learning | 4779aed3a62da4de85e99c86f92c0fc7c36e69ee | 0d4635ff5c1e72353776ac9930749fe37b3b9603 | refs/heads/master | 2023-08-14T23:10:13.400627 | 2021-09-27T16:04:17 | 2021-09-27T16:04:17 | 104,288,020 | 0 | 0 | null | 2020-12-29T00:18:19 | 2017-09-21T01:49:17 | Python | UTF-8 | Python | false | false | 1,174 | py | class ArrayStack(object):
def __init__(self):
self.arr = []
self.index_1 = 0
self.index_2 = 0
self.index_3 = 0
def get_index(self, stack):
if stack == 1:
return self.index_1
elif stack == 2:
return self.index_1 + self.index_2
elif stack == 3:
return self.index_1 + self.index_2 + self.index_3
def update_index(self, stack, num):
if stack == 1:
self.index_1 += num
elif stack == 2:
self.index_2 += num
elif stack == 3:
self.index_3 += num
def push(self, stack, value):
self.arr.insert(self.get_index(stack), value)
self.update_index(stack, 1)
def pop(self, stack):
import pdb; pdb.set_trace()
return_value = self.arr.pop(self.get_index(stack) - 1)
self.update_index(stack, -1)
return return_value
if __name__ == "__main__":
stack = ArrayStack()
stack.push(3, 1)
stack.push(2, 4)
stack.push(2, 6)
stack.push(1, 3)
stack.push(3, 9)
stack.push(1, 15)
print(stack.pop(3))
print(stack.pop(1))
print(stack.pop(2))
| [
"[email protected]"
] | |
7e0f926a9bf981d7d265549da68801d92684d17e | 555cef093fc32ecd9212bcc24c277fa7ad8a5a06 | /code/heatmap.py | cc4cc51fe5952984b6952dacaf5c426bcda73acb | [] | no_license | rushilgoyal/San_francisco_Crime_Classification | e00568c2c2bd98b8a923c1f63a7a0188d0c466bd | c4e5ccf0a062b99c2fcab39eb78501154fad9408 | refs/heads/master | 2021-07-12T04:27:23.343351 | 2017-10-16T21:15:13 | 2017-10-16T21:15:13 | 107,182,093 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 961 | py |
# coding: utf-8
# In[1]:
import numpy as np
import pandas as pd
from pandas import Series,DataFrame
import matplotlib.pyplot as plt
get_ipython().magic(u'matplotlib inline')
class heatmap():
def __init__(self):
self.sf_df = pd.read_csv('/Users/rushilgoyal/Desktop/train2.csv') #load the dataset
def crime_heatmap(self):
#self.sf_df.info()
#self.sf_df.columns#columns used
#self.sf_df.head(5)
self.sf_df.PdDistrict.value_counts().plot(kind="bar")
self.sf_df_crosstab = pd.crosstab(self.sf_df.PdDistrict,self.sf_df.Category,margins=True)
del self.sf_df_crosstab['All']#delete All column
self.sf_df_crosstab = self.sf_df_crosstab.ix[:-1]#delete last row (All)
# PLOTTING HEATMAP OF CRIME INCIDENTS BY Police District
column_labels = list(self.sf_df_crosstab.columns.values)
row_labels = self.sf_df_crosstab.index.values.tolist()
fig,ax = plt.subplots()
heatmap = ax.pcolor(self.sf_df_crosstab,cmap=plt.cm.Blues)
| [
"[email protected]"
] | |
749ac50eb84d8577418fcad493dda78c942f223f | d89db6f6278174c0f1062e964541681218a85ac9 | /src/openbiolink/graph_creation/file_processor/edge/edgeStitchBindInhProcessor.py | d117636c2e282e96bc59bbda54c2ad5b5a11e017 | [
"MIT",
"CC-BY-NC-4.0",
"CC-BY-SA-3.0",
"LicenseRef-scancode-public-domain",
"CC-BY-4.0"
] | permissive | cthoyt/OpenBioLink | b6e8e2e88b4491385fe53a1b00a8111b0a5d0930 | c5f85b99f9104f70493136c343e4554261e990a5 | refs/heads/master | 2020-12-27T16:36:45.033091 | 2020-02-03T13:37:22 | 2020-02-03T13:38:08 | 237,973,611 | 0 | 0 | MIT | 2020-02-03T13:38:33 | 2020-02-03T13:38:32 | null | UTF-8 | Python | false | false | 1,161 | py | from openbiolink.graph_creation.file_processor.fileProcessor import FileProcessor
from openbiolink.graph_creation.metadata_infile import InMetaEdgeStitchBindInh
from openbiolink.graph_creation.types.infileType import InfileType
from openbiolink.graph_creation.types.readerType import ReaderType
class EdgeStitchBindInhProcessor(FileProcessor):
IN_META_CLASS = InMetaEdgeStitchBindInh
def __init__(self):
self.use_cols = self.IN_META_CLASS.USE_COLS
super().__init__(self.use_cols, readerType=ReaderType.READER_EDGE_STITCH_ACTION,
infileType=InfileType.IN_EDGE_STITCH_BINDINH, mapping_sep=self.IN_META_CLASS.MAPPING_SEP)
def individual_preprocessing(self, data):
# only drug -> protein connections of single compounds (no merged)
drug_protein = data.item_id_a.str.startswith('CIDs')
mode = data['mode'] == 'binding'
action_short = data['action'] == 'in'
action_long = data['action'] == 'inhibition'
data = data[drug_protein & mode & (action_long | action_short)]
self.stitch_to_pubchem_id(data,self.use_cols.index('item_id_a'))
return data | [
"[email protected]"
] | |
6b7af3bfcd7fc3d5e1f5edf634c7ebc6a18c736e | 0a84b080f6f6c03e27f61b1a3481c007e444cec8 | /Interpolation/testcase7.py | f50cd17e2a8a3d790966b8ce36236b2af52948de | [] | no_license | andersas/numeric | ee00192279bba215089c6cc40e6cb3d5962e62b8 | 924282fed257d9af678a406bf7d3349c833b1dfc | refs/heads/master | 2021-01-21T22:58:31.045460 | 2012-05-15T11:15:55 | 2012-05-15T11:15:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 162 | py | #!/usr/bin/env python
from math import *;
from random import *;
def f(x):
return 0.75 + gauss(0,0.02);
for i in range(-10,11):
print(i, "\t", f(1.0 * i));
| [
"[email protected]"
] | |
bb6cc272291525c1e3332815bf161285720d7557 | 4bd7895dc8277a061a3fdbf3b91e3b0842baa08d | /DigiAnalyzer/python/__init__.py | 973ed98b0de22ad4f432105ca59e564819f211d0 | [] | no_license | ramankhurana/HGCAnalyzer | 69dab165754876e8ea26bc7a038b98d5d502f47b | a2a314048d41295c7c57203f9988a67d2e76e1c6 | refs/heads/master | 2021-01-04T14:18:58.886153 | 2014-09-26T09:27:05 | 2014-09-26T09:27:05 | 20,794,713 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 200 | py | #Automatically created by SCRAM
import os
__path__.append(os.path.dirname(os.path.abspath(__file__).rsplit('/HGCAnalyzer/DigiAnalyzer/',1)[0])+'/cfipython/slc6_amd64_gcc472/HGCAnalyzer/DigiAnalyzer')
| [
"[email protected]"
] | |
499fe5961bfb74e9857dfca8a118a7d17511860f | 37e7988db5a84dc2e7bb9bdb3148fd2152a0f996 | /tmowx/admin.py | bfabcec8f84edf4912ca4099cfc5465680a19711 | [] | no_license | dunjian/django-weixin | e6381757b8c07546a0b8cfbf35e12621c74897b0 | d4f1ea2254458cd97826ba118f5ad4727e0f5107 | refs/heads/master | 2020-12-11T09:09:03.333900 | 2015-07-23T16:02:23 | 2015-07-23T16:02:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 650 | py | #!coding=utf-8
from django.contrib import admin
from api.menu import send_menu
from models import *
from mptt.admin import MPTTModelAdmin
class WMenuAdmin(MPTTModelAdmin):
list_display = ('name', 'menu_type', 'value')
list_filter = ('menu_type',)
actions = ['update_menu']
def update_menu(self, request, queryset):
result, code = send_menu()
if not result:
self.message_user(request, u'%s' % code)
else:
self.message_user(request, u'菜单更新成功!')
update_menu.short_description = "更新菜单到微信"
admin.site.register(WMenu, WMenuAdmin)
admin.site.register(User)
| [
"[email protected]"
] | |
a8e6954e32e6f7f81feebfb0d061c9902352e68d | 491da668958aa4f36e2dd85833cca194b5eb8598 | /venv/bin/pip3.8 | 4c954639d0176b3927089add5165edbd39c55f34 | [] | no_license | KisLupin/ML_Math | 262d802587cdc0066a5dbe279e5b7cc954d15516 | 5e81182adb81e46858c7cd2e8310896f6a3ae620 | refs/heads/master | 2022-04-24T20:46:04.381713 | 2020-04-27T16:01:58 | 2020-04-27T16:01:58 | 259,078,871 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 407 | 8 | #!/Users/kislupin/PycharmProjects/svcc/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3.8'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3.8')()
)
| [
"[email protected]"
] | |
d46781c1b05729cf472e800330d9a3e0ec09696b | b65764888ebca49c78daf845aab00d08e9a6aa9d | /tractseg/libs/preprocessing.py | 3781f9203c986e63c5f18716cc222f62efb8596e | [
"Apache-2.0"
] | permissive | chamberm/TractSeg | 7ad56b885f60eb87cca07ec6f29a2417d6300c43 | 4c7d55bc8ff698c2f1af0b0ccca96bb0ba6f57d2 | refs/heads/master | 2022-12-26T08:24:19.196497 | 2020-10-12T09:23:12 | 2020-10-12T09:23:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,717 | py |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from os.path import join
from pkg_resources import resource_filename
from tqdm import tqdm
from tractseg.libs import img_utils
def reorient_to_std_space(input_file, bvals, bvecs, brain_mask, output_dir):
print("Reorienting input to MNI space...")
# Only working with FSL 6
# os.system("fslreorient2std -m " + output_dir + "/reorient2std.mat " + input_file +
# " " + output_dir + "/Diffusion_MNI.nii.gz")
# Working with FSL 5 and 6
os.system("fslreorient2std " + input_file + " > " + output_dir + "/reorient2std.mat")
os.system("fslreorient2std " + input_file + " " + output_dir + "/Diffusion_MNI.nii.gz")
os.system("cp " + bvals + " " + output_dir + "/Diffusion_MNI.bvals")
os.system("rotate_bvecs -i " + bvecs + " -t " + output_dir + "/reorient2std.mat -o " +
output_dir + "/Diffusion_MNI.bvecs")
os.system("flirt -ref " + output_dir + "/Diffusion_MNI.nii.gz -in " + brain_mask +
" -out " + output_dir + "/nodif_brain_mask_MNI.nii.gz -applyxfm -init " +
output_dir + "/reorient2std.mat -dof 6")
new_input_file = join(output_dir, "Diffusion_MNI.nii.gz")
bvecs = join(output_dir, "Diffusion_MNI.bvecs")
bvals = join(output_dir, "Diffusion_MNI.bvals")
brain_mask = join(output_dir, "nodif_brain_mask_MNI.nii.gz")
return new_input_file, bvals, bvecs, brain_mask
def move_to_MNI_space(input_file, bvals, bvecs, brain_mask, output_dir):
print("Moving input to MNI space...")
os.system("calc_FA -i " + input_file + " -o " + output_dir + "/FA.nii.gz --bvals " + bvals +
" --bvecs " + bvecs + " --brain_mask " + brain_mask)
dwi_spacing = img_utils.get_image_spacing(input_file)
template_path = resource_filename('tractseg.resources', 'MNI_FA_template.nii.gz')
os.system("flirt -ref " + template_path + " -in " + output_dir + "/FA.nii.gz -out " + output_dir +
"/FA_MNI.nii.gz -omat " + output_dir + "/FA_2_MNI.mat -dof 6 -cost mutualinfo -searchcost mutualinfo")
os.system("flirt -ref " + template_path + " -in " + input_file + " -out " + output_dir +
"/Diffusion_MNI.nii.gz -applyisoxfm " + dwi_spacing + " -init " + output_dir +
"/FA_2_MNI.mat -dof 6 -interp trilinear")
os.system("cp " + bvals + " " + output_dir + "/Diffusion_MNI.bvals")
os.system("rotate_bvecs -i " + bvecs + " -t " + output_dir + "/FA_2_MNI.mat" +
" -o " + output_dir + "/Diffusion_MNI.bvecs")
os.system("flirt -ref " + template_path + " -in " + brain_mask +
" -out " + output_dir + "/nodif_brain_mask_MNI.nii.gz -applyisoxfm " + dwi_spacing + " -init " +
output_dir + "/FA_2_MNI.mat -dof 6 -interp nearestneighbour")
new_input_file = join(output_dir, "Diffusion_MNI.nii.gz")
bvecs = join(output_dir, "Diffusion_MNI.bvecs")
bvals = join(output_dir, "Diffusion_MNI.bvals")
brain_mask = join(output_dir, "nodif_brain_mask_MNI.nii.gz")
return new_input_file, bvals, bvecs, brain_mask
def move_to_subject_space_single_file(output_dir, experiment_type, output_subdir, output_float=False):
print("Moving output to subject space...")
os.system("mv " + output_dir + "/" + output_subdir + ".nii.gz " + output_dir + "/" + output_subdir + "_MNI.nii.gz")
file_path_in = output_dir + "/" + output_subdir + "_MNI.nii.gz"
file_path_out = output_dir + "/" + output_subdir + ".nii.gz"
dwi_spacing = img_utils.get_image_spacing(file_path_in)
os.system("convert_xfm -omat " + output_dir + "/MNI_2_FA.mat -inverse " + output_dir + "/FA_2_MNI.mat")
os.system("flirt -ref " + output_dir + "/FA.nii.gz -in " + file_path_in + " -out " + file_path_out +
" -applyisoxfm " + dwi_spacing + " -init " + output_dir + "/MNI_2_FA.mat -dof 6" +
" -interp trilinear")
if not output_float:
os.system("fslmaths " + file_path_out + " -thr 0.5 -bin " + file_path_out)
def move_to_subject_space(output_dir, bundles, experiment_type, output_subdir, output_float=False):
print("Moving output to subject space...")
os.system("mkdir -p " + output_dir + "/" + output_subdir + "_MNI")
os.system("mv " + output_dir + "/" + output_subdir + "/* " + output_dir + "/" + output_subdir + "_MNI")
os.system("convert_xfm -omat " + output_dir + "/MNI_2_FA.mat -inverse " + output_dir + "/FA_2_MNI.mat")
for bundle in tqdm(bundles):
file_path_in = output_dir + "/" + output_subdir + "_MNI/" + bundle + ".nii.gz"
file_path_out = output_dir + "/" + output_subdir + "/" + bundle + ".nii.gz"
dwi_spacing = img_utils.get_image_spacing(file_path_in)
if experiment_type == "peak_regression":
os.system("flip_peaks -i " + file_path_in + " -o " + file_path_in[:-7] + "_flip.nii.gz -a x") # flip to fsl format
os.system("vecreg -i " + file_path_in[:-7] + "_flip.nii.gz -o " + file_path_out +
" -r " + output_dir + "/FA.nii.gz -t " + output_dir + "/MNI_2_FA.mat") # Use vecreg to transform peaks
os.system("flip_peaks -i " + file_path_out + " -o " + file_path_out + " -a x") # flip back to mrtrix format
os.system("rm " + file_path_in[:-7] + "_flip.nii.gz") # remove flipped tmp file
else:
# do not use spline interpolation because makes a lot of holes into masks
os.system("flirt -ref " + output_dir + "/FA.nii.gz -in " + file_path_in + " -out " + file_path_out +
" -applyisoxfm " + dwi_spacing + " -init " + output_dir + "/MNI_2_FA.mat -dof 6" +
" -interp trilinear")
if not output_float:
os.system("fslmaths " + file_path_out + " -thr 0.5 -bin " + file_path_out)
def create_brain_mask(input_file, output_dir):
print("Creating brain mask...")
os.system("export PATH=/usr/local/fsl/bin:$PATH")
input_dir = os.path.dirname(input_file)
input_file_without_ending = os.path.basename(input_file).split(".")[0]
os.system("bet " + join(input_dir, input_file_without_ending) + " " +
output_dir + "/nodif_brain_mask.nii.gz -f 0.3 -g 0 -m")
os.system("rm " + output_dir + "/nodif_brain_mask.nii.gz") # masked brain
os.system("mv " + output_dir + "/nodif_brain_mask_mask.nii.gz " + output_dir + "/nodif_brain_mask.nii.gz")
return join(output_dir, "nodif_brain_mask.nii.gz")
def create_fods(input_file, output_dir, bvals, bvecs, brain_mask, csd_type, nr_cpus=-1):
os.system("export PATH=/code/mrtrix3/bin:$PATH")
if nr_cpus > 0:
nthreads = " -nthreads " + str(nr_cpus)
else:
nthreads = ""
if csd_type == "csd_msmt_5tt":
# MSMT 5TT
print("Creating peaks (1 of 4)...")
t1_file = join(os.path.dirname(input_file), "T1w_acpc_dc_restore_brain.nii.gz")
os.system("5ttgen fsl " + t1_file + " " + output_dir + "/5TT.nii.gz -premasked" + nthreads)
print("Creating peaks (2 of 4)...")
os.system("dwi2response msmt_5tt " + input_file + " " + output_dir + "/5TT.nii.gz " + output_dir +
"/RF_WM.txt " + output_dir + "/RF_GM.txt " + output_dir + "/RF_CSF.txt -voxels " + output_dir +
"/RF_voxels.nii.gz -fslgrad " + bvecs + " " + bvals + " -mask " + brain_mask + nthreads)
print("Creating peaks (3 of 4)...")
os.system("dwi2fod msmt_csd " + input_file + " " + output_dir + "/RF_WM.txt " + output_dir +
"/WM_FODs.nii.gz " + output_dir + "/RF_GM.txt " + output_dir + "/GM.nii.gz " + output_dir +
"/RF_CSF.txt " + output_dir + "/CSF.nii.gz -mask " + brain_mask +
" -fslgrad " + bvecs + " " + bvals + nthreads) # multi-shell, multi-tissue
print("Creating peaks (4 of 4)...")
os.system("sh2peaks " + output_dir + "/WM_FODs.nii.gz " + output_dir + "/peaks.nii.gz -quiet" + nthreads)
elif csd_type == "csd_msmt":
# MSMT DHollander (only works with msmt_csd, not with csd)
# (Dhollander does not need a T1 image to estimate the response function)
print("Creating peaks (1 of 3)...")
os.system("dwi2response dhollander -mask " + brain_mask + " " + input_file + " " + output_dir + "/RF_WM.txt " +
output_dir + "/RF_GM.txt " + output_dir + "/RF_CSF.txt -fslgrad " + bvecs + " " + bvals +
" -mask " + brain_mask + nthreads)
print("Creating peaks (2 of 3)...")
os.system("dwi2fod msmt_csd " + input_file + " " +
output_dir + "/RF_WM.txt " + output_dir + "/WM_FODs.nii.gz " +
output_dir + "/RF_GM.txt " + output_dir + "/GM_FODs.nii.gz " +
output_dir + "/RF_CSF.txt " + output_dir + "/CSF_FODs.nii.gz " +
"-fslgrad " + bvecs + " " + bvals + " -mask " + brain_mask + nthreads)
print("Creating peaks (3 of 3)...")
os.system("sh2peaks " + output_dir + "/WM_FODs.nii.gz " + output_dir + "/peaks.nii.gz -quiet" + nthreads)
elif csd_type == "csd":
# CSD Tournier
print("Creating peaks (1 of 3)...")
os.system("dwi2response tournier " + input_file + " " + output_dir + "/response.txt -mask " + brain_mask +
" -fslgrad " + bvecs + " " + bvals + " -quiet" + nthreads)
print("Creating peaks (2 of 3)...")
os.system("dwi2fod csd " + input_file + " " + output_dir + "/response.txt " + output_dir +
"/WM_FODs.nii.gz -mask " + brain_mask + " -fslgrad " + bvecs + " " + bvals + " -quiet" + nthreads)
print("Creating peaks (3 of 3)...")
os.system("sh2peaks " + output_dir + "/WM_FODs.nii.gz " + output_dir + "/peaks.nii.gz -quiet" + nthreads)
else:
raise ValueError("'csd_type' contains invalid String")
def clean_up(keep_intermediate_files, predict_img_output, csd_type, preprocessing_done=False):
if not keep_intermediate_files:
os.chdir(predict_img_output)
# os.system("rm -f nodif_brain_mask.nii.gz")
# os.system("rm -f peaks.nii.gz")
os.system("rm -f WM_FODs.nii.gz")
if csd_type == "csd_msmt" or csd_type == "csd_msmt_5tt":
os.system("rm -f 5TT.nii.gz")
os.system("rm -f RF_WM.txt")
os.system("rm -f RF_GM.txt")
os.system("rm -f RF_CSF.txt")
os.system("rm -f RF_voxels.nii.gz")
os.system("rm -f CSF.nii.gz")
os.system("rm -f GM.nii.gz")
os.system("rm -f CSF_FODs.nii.gz")
os.system("rm -f GM_FODs.nii.gz")
else:
os.system("rm -f response.txt")
| [
"[email protected]"
] | |
38178f626de03f0d8f5098858870ec70a4883763 | 76baa6c56311eb214aae3581a0b50a8727e4770b | /env/Scripts/pilprint.py | 8a2be037126ed2d426790edb2b59d47c54962d6a | [] | no_license | GitDeus/django-online-market | 3d02c9f95c17a89d09f95751d034ce2435886554 | e3e0be842224c6e586e00c6a3b5dc8fe07e1ca71 | refs/heads/master | 2021-01-12T04:06:31.264202 | 2016-12-28T04:42:56 | 2016-12-28T04:42:56 | 77,502,861 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,658 | py | #!c:\users\deus\desktop\django-online-shop-master\myshop\env\scripts\python.exe
#
# The Python Imaging Library.
# $Id$
#
# print image files to postscript printer
#
# History:
# 0.1 1996-04-20 fl Created
# 0.2 1996-10-04 fl Use draft mode when converting.
# 0.3 2003-05-06 fl Fixed a typo or two.
#
from __future__ import print_function
import getopt
import os
import sys
import subprocess
VERSION = "pilprint 0.3/2003-05-05"
from PIL import Image
from PIL import PSDraw
letter = (1.0*72, 1.0*72, 7.5*72, 10.0*72)
def description(filepath, image):
title = os.path.splitext(os.path.split(filepath)[1])[0]
format = " (%dx%d "
if image.format:
format = " (" + image.format + " %dx%d "
return title + format % image.size + image.mode + ")"
if len(sys.argv) == 1:
print("PIL Print 0.3/2003-05-05 -- print image files")
print("Usage: pilprint files...")
print("Options:")
print(" -c colour printer (default is monochrome)")
print(" -d debug (show available drivers)")
print(" -p print via lpr (default is stdout)")
print(" -P <printer> same as -p but use given printer")
sys.exit(1)
try:
opt, argv = getopt.getopt(sys.argv[1:], "cdpP:")
except getopt.error as v:
print(v)
sys.exit(1)
printerArgs = [] # print to stdout
monochrome = 1 # reduce file size for most common case
for o, a in opt:
if o == "-d":
# debug: show available drivers
Image.init()
print(Image.ID)
sys.exit(1)
elif o == "-c":
# colour printer
monochrome = 0
elif o == "-p":
# default printer channel
printerArgs = ["lpr"]
elif o == "-P":
# printer channel
printerArgs = ["lpr", "-P%s" % a]
for filepath in argv:
try:
im = Image.open(filepath)
title = description(filepath, im)
if monochrome and im.mode not in ["1", "L"]:
im.draft("L", im.size)
im = im.convert("L")
if printerArgs:
p = subprocess.Popen(printerArgs, stdin=subprocess.PIPE)
fp = p.stdin
else:
fp = sys.stdout
ps = PSDraw.PSDraw(fp)
ps.begin_document()
ps.setfont("Helvetica-Narrow-Bold", 18)
ps.text((letter[0], letter[3]+24), title)
ps.setfont("Helvetica-Narrow-Bold", 8)
ps.text((letter[0], letter[1]-30), VERSION)
ps.image(letter, im)
ps.end_document()
if printerArgs:
fp.close()
except:
print("cannot print image", end=' ')
print("(%s:%s)" % (sys.exc_info()[0], sys.exc_info()[1]))
| [
"[email protected]"
] | |
7e5b4662dfeffa20c7a76130c4a799862891251a | 59166105545cdd87626d15bf42e60a9ee1ef2413 | /test/test_surfer_api.py | 83c9232867753d43d3aac45774a3c8f63d646ba2 | [] | no_license | mosoriob/dbpedia_api_client | 8c594fc115ce75235315e890d55fbf6bd555fa85 | 8d6f0d04a3a30a82ce0e9277e4c9ce00ecd0c0cc | refs/heads/master | 2022-11-20T01:42:33.481024 | 2020-05-12T23:22:54 | 2020-05-12T23:22:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 921 | py | # coding: utf-8
"""
DBpedia
This is the API of the DBpedia Ontology # noqa: E501
The version of the OpenAPI document: v0.0.1
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import dbpedia
from dbpedia.api.surfer_api import SurferApi # noqa: E501
from dbpedia.rest import ApiException
class TestSurferApi(unittest.TestCase):
"""SurferApi unit test stubs"""
def setUp(self):
self.api = dbpedia.api.surfer_api.SurferApi() # noqa: E501
def tearDown(self):
pass
def test_surfers_get(self):
"""Test case for surfers_get
List all instances of Surfer # noqa: E501
"""
pass
def test_surfers_id_get(self):
"""Test case for surfers_id_get
Get a single Surfer by its id # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
48459734f57ca2aff30cb9fb72e12f0f1e30aaac | dfd39dcb9eea14ca132753f32001c40375d737ba | /NEMS/tests/produtil/NCEPLIBS-pyprodutil/produtil/testing/__init__.py | 59c6074a83b55cd98a3e888e79e49ef734b60022 | [
"CC0-1.0",
"LGPL-3.0-only"
] | permissive | pvelissariou1/ADC-WW3-NWM-SCHISM-NEMS | 8e1a003b8df8d5676398b4fa5d6d809b850d7dbf | 707ddcd84417211e3a7c92aa15d8cd8ddfa080ab | refs/heads/main | 2023-05-10T19:10:47.979097 | 2021-06-02T19:04:07 | 2021-06-02T19:04:07 | 372,129,845 | 0 | 1 | CC0-1.0 | 2021-06-02T19:04:08 | 2021-05-30T05:19:35 | Fortran | UTF-8 | Python | false | false | 555 | py | ##@namespace produtil.testing
# Test suite automation utilities.
#
# The produtil.testing package contains a system that automates
# testing of a suite of programs. This is intended to be used for
# regression testing, not large-scale retrospective science tests.
#
# The produtil.testing suite has its own test description language. A
# parser, produtil.testing.parse, parses this into an object tree. A
# set of compilers (produtil.testing.rocoto and
# produtil.testing.script) compiles the object tree into a script or
# set of scripts for testing.
| [
"[email protected]"
] | |
41e740e93235349f30ec5b8dc68543b2c9b87ef0 | 6b36d717a64c93e780664e83228b89e0929812c4 | /chapter5/section3/imagine/main.py | 04e3da2799a2e39713e0084a2d6333d75120c806 | [
"Apache-2.0"
] | permissive | KazuoASANO/ird_make_gui_in_python_with_qt | ae77e396deb60a6aaa3c7b3168de36c8543b13f3 | e98d5019392f3286c1efc090a5fcba6b2bfcd8e3 | refs/heads/master | 2020-04-11T12:44:39.943468 | 2018-12-14T14:37:52 | 2018-12-14T14:37:52 | 161,790,454 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,345 | py | import os # 環境変数を設定する為に、 os モジュールをインポート
import sys
from PySide2.QtWidgets import QApplication
from PySide2.QtQml import QQmlApplicationEngine
from PySide2.QtCore import QUrl
def main():
""" 環境変数に Qt Quick Controls 2 のコンフィグファイル設定 を追加する
環境変数 QT_QUICK_CONTROLS_CONF に対して、本 Code と同じ
ディレクトリにある qtquickcontrols2.conf
( Qt Quick Controls 2 の Configuration File ファイル)
を設定
"""
os.environ["QT_QUICK_CONTROLS_CONF"] = "qtquickcontrols2.conf"
app = QApplication([])
engine = QQmlApplicationEngine()
url = QUrl("../Ui/Main.qml")
engine.load(url)
if not engine.rootObjects():
sys.exit(-1)
""" QMLのrootオブジェクトのtitle プロパティを変更
QQmlApplicationEngine経由で、rootObjects()を参照し
setProperty()でセットする
See : http://doc.qt.io/qt-5/qtqml-cppintegration-interactqmlfromcpp.html
https://doc.qt.io/qtforpython/overviews/properties.html#reading-and-writing-properties-with-the-meta-object-system
"""
root = engine.rootObjects()[0]
root.setProperty("title", "Style-Imagine")
ret = app.exec_()
sys.exit(ret)
if __name__ == '__main__':
main() | [
"[email protected]"
] | |
db4e1fbdf4f4daa269c76ee2db02b5f22ca44af0 | 2e56d8c64533ae6a6f58785ab3cbe0f790a5a081 | /processing/data_reader.py | 790b71e7dbdf626e1d8110a114eb485c1cfcaf46 | [] | no_license | ryanho9453/HAND | d655e0425ca0cab312772ed456c75d03f8d5e369 | 0684cb62d1f061287dbb5d634c55aa8668ddf82d | refs/heads/master | 2020-03-24T10:16:03.010102 | 2018-07-29T06:05:48 | 2018-07-29T06:05:48 | 142,651,846 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,083 | py | import cv2 as cv
import numpy as np
import random
import os
import re
"""
data_size (2062, 64, 64)
"""
class DataReader:
def __init__(self, config, mode=None, process=None):
self.config = config
self.index = 0
self.exclude_folder = ['.DS_Store']
self.finish_all_data = False
self.number_n_batch = 0
X, self.Y = self.__load_data(mode=mode)
self.data_size = len(X)
self.X = self.__preprocess(X, process=process)
def next_batch(self):
start_idx = self.index
end_idx = self.index + self.config['batch_size']
imgs = self.X[start_idx: end_idx]
labels = self.Y[start_idx: end_idx]
# complement
if len(imgs) < self.config['batch_size']:
complement = self.config['batch_size'] - len(imgs)
imgs += self.X[:complement]
labels += self.Y[:complement]
random.shuffle(imgs)
random.shuffle(labels)
self.index = self.data_size
self.index = end_idx
if self.index == self.data_size:
self.finish_all_data = True
self.number_n_batch += 1
return imgs, labels
def __load_data(self, mode=None):
"""
img array value btw 0~1 , so multiply 255
"""
if mode == 'train':
imgs = np.load(self.config['data_path'] + 'X.npy')[:self.config['train_size']]
labels = np.load(self.config['data_path'] + 'Y.npy')[:self.config['train_size']]
elif mode == 'test':
imgs = np.load(self.config['data_path'] + 'X.npy')[self.config['train_size']:]
labels = np.load(self.config['data_path'] + 'Y.npy')[self.config['train_size']:]
else:
imgs = np.load(self.config['data_path'] + 'X.npy')
labels = np.load(self.config['data_path'] + 'Y.npy')
imgs = imgs * 255
imgs = imgs.astype('uint8')
return imgs, labels
def __preprocess(self, imgs, process=None):
"""
if process = contour , threshold first, then find contour
"""
if process == 'threshold':
X = self.__threshold_process(imgs)
return X
elif process == 'contour':
thresholds = self.__threshold_process(imgs)
X = self.__find_contour(thresholds)
return X
def __threshold_process(self, imgs):
thresholds = []
for img in imgs:
ret, thresh = cv.threshold(img, 127, 255, cv.THRESH_OTSU)
thresholds.append(thresh)
return np.array(thresholds)
def __find_contour(self, thresholds):
img_size = thresholds[0].shape
contour_pool = []
for thresh in thresholds:
_, contours, hierarchy = cv.findContours(thresh, cv.RETR_TREE, cv.CHAIN_APPROX_NONE)
canvas = np.zeros(img_size, dtype='uint8')
for cnt in contours:
cv.drawContours(canvas, [cnt], 0, (0, 255, 0), 1)
contour_pool.append(canvas)
return np.array(contour_pool)
| [
"[email protected]"
] | |
cf38e723f308e6cb69aa55eabc523f2fe10c4339 | bdf94f37cf0ca8a7b14f495a16899922b86013a4 | /daily coding/remove-nth-last-element-from-list.py | 1eaa19a9067a2c521a8a88cf5d229df328f80814 | [
"MIT"
] | permissive | sirken/coding-practice | d1f00f1f48378c853c3644c92efc1c888acc7400 | b1617f0d48bf0a544eaea12a4c8d679a5a0fd870 | refs/heads/master | 2023-07-09T01:00:54.764631 | 2023-07-01T05:51:59 | 2023-07-01T05:51:59 | 254,274,042 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 692 | py | from Test import Test
'''
Given a singly linked list and an integer k, remove the kth last element from the list. k is guaranteed to be smaller than the length of the list.
The list is very long, so making more than one pass is prohibitively expensive.
Do this in constant space and in one pass.
'''
def del_from_list(arr, k):
del(arr[-k])
return arr
def del_from_list(arr, k):
return [i for i in arr if i != arr[-k]]
Test.assert_equals(del_from_list([10, 15, 3, 7], 1), [10, 15, 3])
Test.assert_equals(del_from_list([10, 15, 3, 7], 2), [10, 15, 7])
Test.assert_equals(del_from_list([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13], 5), [1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 12, 13])
| [
"[email protected]"
] | |
7f5fbd9996c1007612b2685bd47102a929aec41d | 301e55ee3990b2daf135197eac81e1cc244e6cd3 | /python/last-stone-weight.py | 030bca282591c92a277769382f86d0902d9b3236 | [
"MIT"
] | permissive | alirezaghey/leetcode-solutions | 74b1b645c324ea7c1511d9ce3a97c8d622554417 | c32b786e52dd25ff6e4f84242cec5ff1c5a869df | refs/heads/master | 2022-08-22T16:28:05.459163 | 2022-08-18T12:02:51 | 2022-08-18T12:02:51 | 203,028,081 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 541 | py | import heapq
from typing import List
class Solution:
# Time complexity: O(n * log n)
# Space complexity: O(1)
def lastStoneWeight(self, stones: List[int]) -> int:
for i in range(len(stones)):
stones[i] = -stones[i]
heapq.heapify(stones)
while len(stones) > 1:
larger, smaller = heapq.heappop(stones), heapq.heappop(stones)
larger -= smaller
if larger < 0:
heapq.heappush(stones, larger)
return abs(stones.pop()) if stones else 0
| [
"[email protected]"
] | |
2568a2ae2fe8a84f888e363d6229b616c41b738b | d6ff68dbcb85488fac60f57d90c97f50ce2cb36b | /tests/test_search_api.py | 4a2d264e9115d003074f6f501ac5c7c6049fa675 | [
"MIT"
] | permissive | cksachdev/redis-sitesearch | 0b6e0ecd0dbf44ac8997eb828a28cee0046a1fdf | 376cd19b59a68817c7c00ea181c2ca3d21ca596e | refs/heads/master | 2023-03-25T12:56:07.755279 | 2021-03-23T22:48:40 | 2021-03-23T22:48:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 562 | py | import time
def test_query_python(docs, client):
result = client.simulate_get('/search?q=*')
assert result.status_code == 200
assert result.json['total'] > 0
titles = [doc['title'] for doc in result.json['results']]
assert 'Database Persistence with Redis Enterprise Software' in titles
def test_cloud_landing_page(client):
result = client.simulate_get('/search?q=cloud')
assert result.json['results'][0]['title'] == 'Redis Enterprise Cloud'
assert result.json['results'][0]['url'] == 'https://docs.redislabs.com/latest/rc/'
| [
"[email protected]"
] | |
cf236ac38a90af930689696647a23828d4053ba1 | 05e9ee0e4a20a6a75fdf28c40a85b2df08160bd9 | /src/datasets/build_datasetV1.py | dc1ac08c3be13f97eda37d5998631ba1a7b76b9c | [] | no_license | mpearmain/merc | 1fd0e53a0c37890106cff015fdeadc8c1c27a6ac | 54b90d738484559de2462f5cef07cfe16c53d1e9 | refs/heads/master | 2021-01-23T04:13:54.046757 | 2017-06-08T20:48:13 | 2017-06-08T20:48:13 | 92,920,866 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,055 | py | import fastparquet
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from src.features.build_rowwise_binary_features import binary_counts, binary_hashmap
# from src.features.build_ordinal_features import
pd.options.mode.chained_assignment = None
pd.options.display.max_columns = 999
"""
A script to build different styles of dataset based on the files in ./src/features.
Scripts should be able to run from ./data/raw and final results should be stored in ./data/processed
For example, we may want to create a dataset based only on transformed missing values, or the combination of missing
values, and floats.
Simply pick the functions required to 'build' a dataset and run models on these different datasets.
"""
BUILD_NAME = '_build_datasetV1'
NOT_BINARY_COLS = ["X0", "X1", "X2", "X3", "X4", "X5", "X6", "X8"]
CONSTANT_COLS = ['X11', 'X93', 'X107', 'X233', 'X235', 'X268', 'X289', 'X290', 'X293', 'X297', 'X330', 'X347']
# https://www.kaggle.com/c/mercedes-benz-greener-manufacturing/discussion/34137 - Complementary cols
COMPLEMENTARY_COLS = ['X29', 'X52', 'X54', 'X76', 'X128', 'X142', 'X186', 'X204', 'X232', 'X263', 'X156']
# Read the base data
print('Loading data ...')
train_df = pd.read_csv('./data/raw/train.csv')
test_df = pd.read_csv('./data/raw/test.csv')
print('Loaded')
print("Remove Outliers in y")
train_df = train_df[train_df.y < 180]
print("Removed")
y_train = train_df['y'].values
id_train = train_df['ID'].values
id_test = test_df['ID'].values
print("Remove constant cols")
train_df = train_df.drop(['ID', 'y'] + CONSTANT_COLS, axis=1)
test_df = test_df.drop(['ID'] + CONSTANT_COLS, axis=1)
print("Removed")
ntrain = train_df.shape[0]
print("Joining train and test for row-wise transforms")
df = pd.concat([train_df, test_df], axis=0)
###########################################################
### Working on binaries Row-wise.
print("Building Binary counts values")
df['zero_counts'] = binary_counts(df[df.columns.difference(NOT_BINARY_COLS)], 0)
df['one_counts'] = binary_counts(df[df.columns.difference(NOT_BINARY_COLS)], 1)
df['binary_prop'] = df['one_counts'] / df['zero_counts']
df['binary_ones_pct'] = df['one_counts'] / (df['one_counts'] + df['zero_counts'])
df['binary_zeros_pct'] = df['zero_counts'] / (df['one_counts'] + df['zero_counts'])
df['binary_hashmap'] = binary_hashmap(df[df.columns.difference(NOT_BINARY_COLS)], low_count=4)
# For Tree based models only - Lets recode non-binary cols to be used in base models.
print("Recoding object values")
for col in NOT_BINARY_COLS + ['binary_hashmap']:
df[col] = pd.factorize(df[col])[0]
# Now split into train and test and save the output of the processed dataset.
xtrain = df[:ntrain].copy()
xtest = df[ntrain:].copy()
xtrain['ID'] = id_train
xtrain['y'] = y_train
xtest['ID'] = id_test
print('Writing Parquets')
# store
fastparquet.write('./data/processed/xtrain' + BUILD_NAME + '.parq', xtrain, write_index=False)
fastparquet.write('./data/processed/xtest' + BUILD_NAME + '.parq', xtest, write_index=False)
print('Finished')
| [
"[email protected]"
] | |
53c74fbda0f5822bb095ce083dfe2ca4b48cf755 | 6c375d17a1a0be1c8fda5305548694b5d0d4f1b6 | /deep_learning_computer_vision_python/Centroid/detect_faces_video.py | 9519edf0f77676c7add082d7d815f66c886a64da | [] | no_license | LinearPi/pycode | d566e7c2450e2ace2bb14ed09b4f310078b0c42d | b97582ba89b4f3d981eda35ea6db660db3ceaa05 | refs/heads/master | 2022-12-02T14:46:20.935006 | 2019-09-12T01:58:45 | 2019-09-12T01:58:45 | 171,231,401 | 1 | 0 | null | 2022-11-22T02:57:30 | 2019-02-18T06:58:27 | Jupyter Notebook | UTF-8 | Python | false | false | 2,505 | py | # import the necessary packages
from imutils.video import VideoStream
import numpy as np
import argparse
import imutils
import time
import cv2
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-p", "--prototxt", required=True,
help="path to Caffe 'deploy' prototxt file")
ap.add_argument("-m", "--model", required=True,
help="path to Caffe pre-trained model")
ap.add_argument("-c", "--confidence", type=float, default=0.5,
help="minimum probability to filter weak detections")
args = vars(ap.parse_args())
# load our serialized model from disk
print("[INFO] loading model...")
net = cv2.dnn.readNetFromCaffe(args["prototxt"], args["model"])
# initialize the video stream and allow the camera sensor to warm up
print("[INFO] starting video stream...")
vs = VideoStream(src=1).start()
time.sleep(2.0)
# loop over the frames from the video stream
while True:
# grab the frame from the threaded video stream and resize it
# to have a maximum width of 400 pixels
frame = vs.read()
frame = imutils.resize(frame, width=400)
# grab the frame dimensions and convert it to a blob
(h, w) = frame.shape[:2]
blob = cv2.dnn.blobFromImage(cv2.resize(frame, (300, 300)), 1.0,
(300, 300), (104.0, 177.0, 123.0))
# pass the blob through the network and obtain the detections and
# predictions
net.setInput(blob)
detections = net.forward()
# loop over the detections
for i in range(0, detections.shape[2]):
# extract the confidence (i.e., probability) associated with the
# prediction
confidence = detections[0, 0, i, 2]
# filter out weak detections by ensuring the `confidence` is
# greater than the minimum confidence
if confidence < args["confidence"]:
continue
# compute the (x, y)-coordinates of the bounding box for the
# object
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
# draw the bounding box of the face along with the associated
# probability
text = "{:.2f}%".format(confidence * 100)
y = startY - 10 if startY - 10 > 10 else startY + 10
cv2.rectangle(frame, (startX, startY), (endX, endY),
(0, 0, 255), 2)
cv2.putText(frame, text, (startX, y),
cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 0, 255), 2)
# show the output frame
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
# if the `q` key was pressed, break from the loop
if key == ord("q"):
break
# do a bit of cleanup
cv2.destroyAllWindows()
vs.stop() | [
"[email protected]"
] | |
9834494975b1d64b80c28e44a79b7587fe1c1701 | 22930121054cd6607a0888af4c9bc52a025fd6b0 | /models/item.py | d57c866de7d8c1721ad191d68ce05d01b3fc17e8 | [] | no_license | suruisunstat/REST-API | 6e0ae0f13bc154211fc6f84fc4a3111f9c5c53cf | 69fc91528b7dbc9bb8718f3be8a14f6dd22d7ed4 | refs/heads/master | 2020-04-27T02:23:55.892531 | 2019-03-06T18:23:17 | 2019-03-06T18:23:17 | 173,992,568 | 0 | 0 | null | 2019-03-06T09:46:45 | 2019-03-05T17:40:05 | Python | UTF-8 | Python | false | false | 764 | py | from db import db
class ItemModel(db.Model):
__tablename__ = "items"
id = db.Column(db.Integer,primary_key=True)
name = db.Column(db.String(80))
price = db.Column(db.Float(precision=2))
store_id = db.Column(db.Integer,db.ForeignKey('stores.id'))
store = db.relationship('StoreModel')
def __init__(self,name,price,store_id):
self.name = name
self.price = price
self.store_id = store_id
def json(self):
return {'name': self.name, 'price':self.price}
@classmethod
def find_by_name(cls, name):
return cls.query.filter_by(name=name).first() # SELECT * FROM items WHERE name = name limit 1
def save_to_db(self):
db.session.add(self)
db.session.commit()
def delete_from_db(self):
db.session.delete(self)
db.session.commit() | [
"[email protected]"
] | |
3ecbe576c1f90fba607b933299f498c2d5ab6aa5 | ef9f11a89a1629e15fd8c1afb55e6bee2dcaa33e | /Rest/Code/src/models/__init__.py | 562361d548c63b979eb5e54e78657c89ef06c923 | [] | no_license | learntogithub/Database | 7d42114bca40dde25ff1ff960499bb330fe6215e | 64c1870f2d6908980af3c833e2d858f6a5694bf8 | refs/heads/master | 2020-04-25T00:37:52.896684 | 2019-02-24T20:09:04 | 2019-02-24T20:09:04 | 172,384,173 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129 | py | from flask_sqlalchemy import SQLAlchemy
from flask_bcrypt import Bcrypt
# initializion db
db = SQLAlchemy()
bcrypt = Bcrypt()
| [
"[email protected]"
] | |
cd136c6271b102f4cae77ae4226e47ba77b8eefd | c8930c7310d6a3a11cfac86501866b7d96891dfb | /gui/overlayplotter.py | e73f5612a0056e405ab97c5d8e94af18cf6282e3 | [] | no_license | JonasNorling/nowplot | 88f3703bdf0d17e1f89c91e167bc38674dcabfdd | 85a37bf8b3cb4942c29dc96abb16eed125575b90 | refs/heads/master | 2021-03-12T20:28:01.486592 | 2013-09-03T19:09:01 | 2013-09-03T19:09:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,173 | py | import cairo
class OverlayPlotter(object):
def __init__(self, surface, size, xAxis, yAxis):
self.surface = surface
self.size = size
self.xAxis = xAxis
self.yAxis = yAxis
self.ctx = cairo.Context(self.surface)
# Flip coordinate system upside-down
self.ctx.set_matrix(cairo.Matrix(1, 0, 0, -1, 0, self.size[1]))
self.axiscolor = (0, 0, 0, 0.5)
self.gridcolor = (0, 0, 0, 0.1)
self.textcolor = (1, 1, 1, 0.7)
def setXAxis(self, xAxis):
self.xAxis = xAxis
def setYAxis(self, yAxis):
self.yAxis = yAxis
def draw(self):
ctx = self.ctx
ctx.set_source_rgba(0, 0, 0, 0)
ctx.paint()
# Draw axes
ctx.set_source_rgba(*self.axiscolor)
ctx.set_line_width(2)
zeroy = self.yAxis.map(0.0) * self.size[1]
ctx.move_to(0, zeroy)
ctx.line_to(self.size[0], zeroy)
ctx.stroke()
zerox = self.xAxis.map(0.0) * self.size[0]
ctx.move_to(zerox, 0)
ctx.line_to(zerox, self.size[1])
ctx.stroke()
# Draw grid and labels
ctx.set_source_rgba(*self.gridcolor)
ctx.set_font_size(10)
x_tick = round(self.xAxis.min, self.xAxis.tickdecimals)
while x_tick <= self.xAxis.max:
x = (x_tick - self.xAxis.min) * (self.size[0] / float(self.xAxis.max - self.xAxis.min))
ctx.move_to(x, self.size[1])
ctx.line_to(x, 0)
ctx.stroke()
ctx.save()
ctx.set_source_rgba(*self.textcolor)
pattern = "%.0f"
if self.xAxis.tickdecimals > 0:
pattern = "%%.%df" % self.xAxis.tickdecimals
text = pattern % x_tick
extents = ctx.text_extents(text)
if zeroy > 50:
ctx.move_to(x - extents[2] / 2.0, zeroy - extents[3] - 2)
else:
ctx.move_to(x - extents[2] / 2.0, 3)
ctx.set_matrix(cairo.Matrix())
ctx.show_text(text)
ctx.stroke()
ctx.restore()
x_tick += self.xAxis.ticksize
y_tick = round(self.yAxis.min, self.yAxis.tickdecimals)
while y_tick <= self.yAxis.max:
y = (y_tick - self.yAxis.min) * (self.size[1] / float(self.yAxis.max - self.yAxis.min))
ctx.move_to(self.size[0], y)
ctx.line_to(0, y)
ctx.stroke()
ctx.save()
ctx.set_source_rgba(*self.textcolor)
pattern = "%.0f"
if self.yAxis.tickdecimals > 0:
pattern = "%%.%df" % self.yAxis.tickdecimals
text = pattern % y_tick
extents = ctx.text_extents(text)
if zerox > 50:
ctx.move_to(zerox -5 - extents[2] - 3, y - extents[3] / 2.0)
else:
ctx.move_to(3, y - extents[3] / 2.0)
ctx.set_matrix(cairo.Matrix())
ctx.show_text(text)
ctx.stroke()
ctx.restore()
y_tick += self.yAxis.ticksize
| [
"[email protected]"
] | |
04ec99c8d693cb82d323837fe0c3ab785223569f | ed671ae2bbcab3f9a60194b15042fb6fdd9fce73 | /week5/week5-bestR.py | 813b731087d8b23aeaa9744c922bb332bee6cc1c | [] | no_license | yved/python_lesson1 | 23305366685c8de51b68f1fd8d5077f47aeeea00 | 079b552376bb7560d02c99dce25a87236fa73782 | refs/heads/master | 2020-04-29T11:28:34.347581 | 2019-03-07T08:12:19 | 2019-03-07T08:12:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 937 | py | #過去銷售
salesStr = "14,23,26,17,17,12,24,19,10,18,22,31,19,16,22,28,20,27,20,32"
sales = salesStr.split(',')
for i in range(len(sales)):
sales[i] = int(sales[i]) #把銷售數據轉成整數
#原本有的資訊
stgCost = 2 #缺貨成本: 指的是當有缺貨時,要給客人的折扣
invCost = 1000*0.073/365 #1000為買一個存貨的成本 # 把1000快拿去銀行存會有7.3%的年利率,而這邊算的則是一天的存貨成本
Q = 23 #每一次跟廠商叫貨的進貨量
I = 20 #期初 存貨
bestR = 0
costofbestR = 1000000000000000000000000000
for R in range(Q):
totalcost = 0
for s in sales:
I-=s
if I<0:
totalcost+= (-I)*stgCost #如果存貨小於零 會有缺成本
I+=Q
elif I<R: #如果存貨小於R 要訂貨
I+=Q
if I >0:
totalcost+= I*invCost
print(R,totalcost)
if totalcost<costofbestR:
bestR = R
costofbestR = totalcost
print('bestR : ',bestR,'min cost',costofbestR) | [
"[email protected]"
] | |
06271ae173105379204ab264845d67f31664140d | fa4ff377f5ec0aa12b6db7e1bb2c24f1cd90542f | /qa-pairs-relation-classification/SANN/text_sann.py | 654ef723121f6bcc46d8e5cdfd7b4cb13935589c | [
"MIT"
] | permissive | NiceMartin/pynlp | 5e0d76103bf73da93e8d502d647e70a60257864d | 21b6d5177da04c90a626131bc83a3b5b767f50bd | refs/heads/master | 2020-04-24T10:08:51.443230 | 2019-02-17T12:27:47 | 2019-02-17T12:27:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,289 | py | # -*- coding:utf-8 -*-
__author__ = 'sliderSun'
import numpy as np
import tensorflow as tf
from tensorflow import sigmoid
from tensorflow import tanh
from tensorflow.contrib import rnn
from tensorflow.contrib.layers import batch_norm
from tensorflow.python.ops import array_ops
class BatchNormLSTMCell(rnn.RNNCell):
"""Batch normalized LSTM (cf. http://arxiv.org/abs/1603.09025)"""
def __init__(self, num_units, is_training=False, forget_bias=1.0,
activation=tanh, reuse=None):
"""Initialize the BNLSTM cell.
Args:
num_units: int, The number of units in the BNLSTM cell.
forget_bias: float, The bias added to forget gates (see above).
Must set to `0.0` manually when restoring from CudnnLSTM-trained
checkpoints.
activation: Activation function of the inner states. Default: `tanh`.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
"""
self._num_units = num_units
self._is_training = is_training
self._forget_bias = forget_bias
self._activation = activation
self._reuse = reuse
@property
def state_size(self):
return rnn.LSTMStateTuple(self._num_units, self._num_units)
@property
def output_size(self):
return self._num_units
def __call__(self, inputs, state, scope=None):
with tf.variable_scope(scope or type(self).__name__, reuse=self._reuse):
c, h = state
input_size = inputs.get_shape().as_list()[1]
W_xh = tf.get_variable('W_xh',
[input_size, 4 * self._num_units],
initializer=orthogonal_initializer())
W_hh = tf.get_variable('W_hh',
[self._num_units, 4 * self._num_units],
initializer=bn_lstm_identity_initializer(0.95))
bias = tf.get_variable('bias', [4 * self._num_units])
xh = tf.matmul(inputs, W_xh)
hh = tf.matmul(h, W_hh)
bn_xh = batch_norm(xh, self._is_training)
bn_hh = batch_norm(hh, self._is_training)
hidden = bn_xh + bn_hh + bias
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
i, j, f, o = array_ops.split(value=hidden, num_or_size_splits=4, axis=1)
new_c = (c * sigmoid(f + self._forget_bias) + sigmoid(i) * self._activation(j))
bn_new_c = batch_norm(new_c, 'c', self._is_training)
new_h = self._activation(bn_new_c) * sigmoid(o)
new_state = rnn.LSTMStateTuple(new_c, new_h)
return new_h, new_state
def orthogonal(shape):
flat_shape = (shape[0], np.prod(shape[1:]))
a = np.random.normal(0.0, 1.0, flat_shape)
u, _, v = np.linalg.svd(a, full_matrices=False)
q = u if u.shape == flat_shape else v
return q.reshape(shape)
def bn_lstm_identity_initializer(scale):
def _initializer(shape, dtype=tf.float32, partition_info=None):
"""
Ugly cause LSTM params calculated in one matrix multiply
"""
size = shape[0]
# gate (j) is identity
t = np.zeros(shape)
t[:, size:size * 2] = np.identity(size) * scale
t[:, :size] = orthogonal([size, size])
t[:, size * 2:size * 3] = orthogonal([size, size])
t[:, size * 3:] = orthogonal([size, size])
return tf.constant(t, dtype=dtype)
return _initializer
def orthogonal_initializer():
def _initializer(shape, dtype=tf.float32, partition_info=None):
return tf.constant(orthogonal(shape), dtype)
return _initializer
class TextSANN(object):
"""A SANN for text classification."""
def __init__(
self, sequence_length, num_classes, vocab_size, lstm_hidden_size, attention_unit_size,
attention_hops_size, fc_hidden_size, embedding_size, embedding_type, l2_reg_lambda=0.0,
pretrained_embedding=None):
# Placeholders for input, output, dropout_prob and training_tag
self.input_x_front = tf.placeholder(tf.int32, [None, sequence_length], name="input_x_front")
self.input_x_behind = tf.placeholder(tf.int32, [None, sequence_length], name="input_x_behind")
self.input_y = tf.placeholder(tf.float32, [None, num_classes], name="input_y")
self.dropout_keep_prob = tf.placeholder(tf.float32, name="dropout_keep_prob")
self.is_training = tf.placeholder(tf.bool, name="is_training")
self.global_step = tf.Variable(0, trainable=False, name="Global_Step")
def _linear(input_, output_size, scope="SimpleLinear"):
"""
Linear map: output[k] = sum_i(Matrix[k, i] * args[i] ) + Bias[k]
Args:
input_: a tensor or a list of 2D, batch x n, Tensors.
output_size: int, second dimension of W[i].
scope: VariableScope for the created subgraph; defaults to "SimpleLinear".
Returns:
A 2D Tensor with shape [batch x output_size] equal to
sum_i(args[i] * W[i]), where W[i]s are newly created matrices.
Raises:
ValueError: if some of the arguments has unspecified or wrong shape.
"""
shape = input_.get_shape().as_list()
if len(shape) != 2:
raise ValueError("Linear is expecting 2D arguments: {0}".format(str(shape)))
if not shape[1]:
raise ValueError("Linear expects shape[1] of arguments: {0}".format(str(shape)))
input_size = shape[1]
# Now the computation.
with tf.variable_scope(scope):
W = tf.get_variable("W", [input_size, output_size], dtype=input_.dtype)
b = tf.get_variable("b", [output_size], dtype=input_.dtype)
return tf.nn.xw_plus_b(input_, W, b)
def _highway_layer(input_, size, num_layers=1, bias=-2.0, f=tf.nn.relu):
"""
Highway Network (cf. http://arxiv.org/abs/1505.00387).
t = sigmoid(Wy + b)
z = t * g(Wy + b) + (1 - t) * y
where g is nonlinearity, t is transform gate, and (1 - t) is carry gate.
"""
for idx in range(num_layers):
g = f(_linear(input_, size, scope=("highway_lin_{0}".format(idx))))
t = tf.sigmoid(_linear(input_, size, scope=("highway_gate_{0}".format(idx))) + bias)
output = t * g + (1. - t) * input_
input_ = output
return output
# Embedding Layer
with tf.device("/cpu:0"), tf.name_scope("embedding"):
# Use random generated the word vector by default
# Can also be obtained through our own word vectors trained by our corpus
if pretrained_embedding is None:
self.embedding = tf.Variable(tf.random_uniform([vocab_size, embedding_size], minval=-1.0, maxval=1.0,
dtype=tf.float32), trainable=True, name="embedding")
else:
if embedding_type == 0:
self.embedding = tf.constant(pretrained_embedding, dtype=tf.float32, name="embedding")
if embedding_type == 1:
self.embedding = tf.Variable(pretrained_embedding, trainable=True,
dtype=tf.float32, name="embedding")
self.embedded_sentence_front = tf.nn.embedding_lookup(self.embedding, self.input_x_front)
self.embedded_sentence_behind = tf.nn.embedding_lookup(self.embedding, self.input_x_behind)
# Bi-LSTM Layer
with tf.name_scope("Bi-lstm"):
lstm_fw_cell = rnn.BasicLSTMCell(lstm_hidden_size) # forward direction cell
lstm_bw_cell = rnn.BasicLSTMCell(lstm_hidden_size) # backward direction cell
if self.dropout_keep_prob is not None:
lstm_fw_cell = rnn.DropoutWrapper(lstm_fw_cell, output_keep_prob=self.dropout_keep_prob)
lstm_bw_cell = rnn.DropoutWrapper(lstm_bw_cell, output_keep_prob=self.dropout_keep_prob)
# Creates a dynamic bidirectional recurrent neural network
# shape of `outputs`: tuple -> (outputs_fw, outputs_bw)
# shape of `outputs_fw`: [batch_size, sequence_length, hidden_size]
# shape of `state`: tuple -> (outputs_state_fw, output_state_bw)
# shape of `outputs_state_fw`: tuple -> (c, h) c: memory cell; h: hidden state
outputs_front, state_front = tf.nn.bidirectional_dynamic_rnn(
lstm_fw_cell, lstm_bw_cell, self.embedded_sentence_front, dtype=tf.float32)
outputs_behind, state_front = tf.nn.bidirectional_dynamic_rnn(
lstm_fw_cell, lstm_bw_cell, self.embedded_sentence_behind, dtype=tf.float32)
# Concat output
# [batch_size, sequence_length, lstm_hidden_size * 2]
self.lstm_out_front = tf.concat(outputs_front, axis=2)
self.lstm_out_behind = tf.concat(outputs_behind, axis=2)
# Add attention
with tf.name_scope("attention"):
W_s1 = tf.Variable(tf.truncated_normal(shape=[attention_unit_size, lstm_hidden_size * 2],
stddev=0.1, dtype=tf.float32), name="W_s1")
W_s2 = tf.Variable(tf.truncated_normal(shape=[attention_hops_size, attention_unit_size],
stddev=0.1, dtype=tf.float32), name="W_s2")
self.attention_front = tf.map_fn(
fn=lambda x: tf.matmul(W_s2, x),
elems=tf.tanh(
tf.map_fn(
fn=lambda x: tf.matmul(W_s1, tf.transpose(x)),
elems=self.lstm_out_front,
dtype=tf.float32
)
)
)
self.attention_behind = tf.map_fn(
fn=lambda x: tf.matmul(W_s2, x),
elems=tf.tanh(
tf.map_fn(
fn=lambda x: tf.matmul(W_s1, tf.transpose(x)),
elems=self.lstm_out_behind,
dtype=tf.float32
)
)
)
# [batch_size, attention_hops_size, sequence_length]
self.attention_out_front = tf.nn.softmax(self.attention_front)
self.attention_out_behind = tf.nn.softmax(self.attention_behind)
# [batch_size, attention_hops_size, lstm_hidden_size * 2]
self.M_front = tf.matmul(self.attention_out_front, self.lstm_out_front)
self.M_behind = tf.matmul(self.attention_out_behind, self.lstm_out_behind)
# shape of `M_flat`: [batch_size, attention_hops_size * lstm_hidden_size * 2]
self.M_flat_front = tf.reshape(self.M_front, shape=[-1, attention_hops_size * lstm_hidden_size * 2])
self.M_flat_behind = tf.reshape(self.M_behind, shape=[-1, attention_hops_size * lstm_hidden_size * 2])
# shape of `M_flat_combine`: [batch_size, attention_hops_size * lstm_hidden_size * 2 * 2]
self.M_flat_combine = tf.concat([self.M_flat_front, self.M_flat_behind], axis=1)
# Fully Connected Layer
with tf.name_scope("fc"):
W = tf.Variable(tf.truncated_normal(shape=[attention_hops_size * lstm_hidden_size * 2 * 2, fc_hidden_size],
stddev=0.1, dtype=tf.float32), name="W")
b = tf.Variable(tf.constant(value=0.1, shape=[fc_hidden_size], dtype=tf.float32), name="b")
self.fc = tf.nn.xw_plus_b(self.M_flat_combine, W, b)
# Batch Normalization Layer
self.fc_bn = batch_norm(self.fc, is_training=self.is_training, trainable=True, updates_collections=None)
# Apply nonlinearity
self.fc_out = tf.nn.relu(self.fc_bn, name="relu")
# Highway Layer
with tf.name_scope("highway"):
self.highway = _highway_layer(self.fc_out, self.fc_out.get_shape()[1], num_layers=1, bias=0)
# Add dropout
with tf.name_scope("dropout"):
self.h_drop = tf.nn.dropout(self.highway, self.dropout_keep_prob)
# Final scores and predictions
with tf.name_scope("output"):
W = tf.Variable(tf.truncated_normal(shape=[fc_hidden_size, num_classes],
stddev=0.1, dtype=tf.float32), name="W")
b = tf.Variable(tf.constant(value=0.1, shape=[num_classes], dtype=tf.float32), name="b")
self.logits = tf.nn.xw_plus_b(self.h_drop, W, b, name="logits")
self.softmax_scores = tf.nn.softmax(self.logits, name="softmax_scores")
self.predictions = tf.argmax(self.logits, 1, name="predictions")
self.topKPreds = tf.nn.top_k(self.softmax_scores, k=1, sorted=True, name="topKPreds")
# Calculate mean cross-entropy loss, L2 loss and attention penalization loss
with tf.name_scope("loss"):
losses = tf.nn.softmax_cross_entropy_with_logits_v2(labels=self.input_y, logits=self.logits)
losses = tf.reduce_mean(losses, name="softmax_losses")
l2_losses = tf.add_n([tf.nn.l2_loss(tf.cast(v, tf.float32)) for v in tf.trainable_variables()],
name="l2_losses") * l2_reg_lambda
self.loss = tf.add(losses, l2_losses, name="loss")
# Accuracy
with tf.name_scope("accuracy"):
correct_predictions = tf.equal(self.predictions, tf.argmax(self.input_y, 1))
self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy")
# TODO: Reconsider the metrics calculation
# Number of correct predictions
with tf.name_scope("num_correct"):
correct = tf.equal(self.predictions, tf.argmax(self.input_y, 1))
self.num_correct = tf.reduce_sum(tf.cast(correct, "float"), name="num_correct")
# Calculate Fp
with tf.name_scope("fp"):
fp = tf.metrics.false_positives(labels=tf.argmax(self.input_y, 1), predictions=self.predictions)
self.fp = tf.reduce_sum(tf.cast(fp, "float"), name="fp")
# Calculate Fn
with tf.name_scope("fn"):
fn = tf.metrics.false_negatives(labels=tf.argmax(self.input_y, 1), predictions=self.predictions)
self.fn = tf.reduce_sum(tf.cast(fn, "float"), name="fn")
# Calculate Recall
with tf.name_scope("recall"):
self.recall = self.num_correct / (self.num_correct + self.fn)
# Calculate Precision
with tf.name_scope("precision"):
self.precision = self.num_correct / (self.num_correct + self.fp)
# Calculate F1
with tf.name_scope("F1"):
self.F1 = (2 * self.precision * self.recall) / (self.precision + self.recall)
# Calculate AUC
with tf.name_scope("AUC"):
self.AUC = tf.metrics.auc(self.softmax_scores, self.input_y, name="AUC")
| [
"[email protected]"
] | |
64404eac3284461a9319d05c77721c504a57cf3b | 7929295353a68037f0166b1ce90b73ec424f846a | /userbot/plugins/snake.py | 95297089760eb8000f45f95f0ac823ba3b639fae | [
"MIT"
] | permissive | Marshmellow098/AK-CRAZY-TECH-BOT | 4030c5b80fe0ced93e98a1d502387975826e2d09 | 66b6f57810447b31bb81d675bd1fd5a58740ceb6 | refs/heads/main | 2023-02-14T22:29:23.017523 | 2021-01-03T09:42:57 | 2021-01-03T09:42:57 | 326,139,350 | 0 | 0 | MIT | 2021-01-02T08:21:08 | 2021-01-02T08:21:07 | null | UTF-8 | Python | false | false | 5,220 | py |
from telethon import events
import asyncio
from userbot.utils import admin_cmd
@borg.on(admin_cmd(pattern=r"snk"))
async def _(event):
if event.fwd_from:
return
animation_interval = 0.3
animation_ttl = range(0, 27)
await event.edit("Snake")
animation_chars = [
"◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️",
"◻️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️",
"◻️◻️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️",
"◻️◻️◻️️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️",
"◻️◻️◻️◻️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️",
"◻️◻️◻️◻️◻️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️",
"◻️◻️◻️◻️◻️\n◼️◼️◼️◼️◻️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️",
"◻️◻️◻️◻️◻️\n◼️◼️◼️◼️◻️\n◼️◼️◼️◼️◻️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️",
"◻️◻️◻️◻️◻️\n◼️◼️◼️◼️◻️\n◼️◼️◼️◼️◻️\n◼️◼️◼️◼️◻️\n◼️◼️◼️◼️◼️",
"◻️◻️◻️◻️◻️\n◼️◼️◼️◼️◻️\n◼️◼️◼️◼️◻️\n◼️◼️◼️◼️◻️\n◼️◼️◼️◼️◻️",
"◻️◻️◻️◻️◻️\n◼️◼️◼️◼️◻️\n◼️◼️◼️◼️◻️\n◼️◼️◼️◼️◻️\n◼️◼️◼️◻️◻️",
"◻️◻️◻️◻️◻️\n◼️◼️◼️◼️◻️\n◼️◼️◼️◼️◻️\n◼️◼️◼️◼️◻️\n◼️◼️◻️◻️◻️",
"◻️◻️◻️◻️◻️\n◼️◼️◼️◼️◻️\n◼️◼️◼️◼️◻️\n◼️◼️◼️◼️◻️\n◼️◻️◻️◻️◻️",
"◻️◻️◻️◻️◻️\n◼️◼️◼️◼️◻️\n◼️◼️◼️◼️◻️\n◼️◼️◼️◼️◻️\n◻️◻️◻️◻️◻️",
"◻️◻️◻️◻️◻️\n◼️◼️◼️◼️◻️\n◼️◼️◼️◼️◻️\n◻️◼️◼️◼️◻️\n◻️◻️◻️◻️◻️",
"◻️◻️◻️◻️◻️\n◼️◼️◼️◼️◻️\n◻️◼️◼️◼️◻️\n◻️◼️◼️◼️◻️\n◻️◻️◻️◻️◻️",
"◻️◻️◻️◻️◻️\n◻️◼️◼️◼️◻️\n◻️◼️◼️◼️◻️\n◻️◼️◼️◼️◻️\n◻️◻️◻️◻️◻️",
"◻️◻️◻️◻️◻️\n◻️◻️◼️◼️◻️\n◻️◼️◼️◼️◻️\n◻️◼️◼️◼️◻️\n◻️◻️◻️◻️◻️",
"◻️◻️◻️◻️◻️\n◻️◻️◻️◼️◻️\n◻️◼️◼️◼️◻️\n◻️◼️◼️◼️◻️\n◻️◻️◻️◻️◻️",
"◻️◻️◻️◻️◻️\n◻️◻️◻️◻️◻️\n◻️◼️◼️◼️◻️\n◻️◼️◼️◼️◻️\n◻️◻️◻️◻️◻️",
"◻️◻️◻️◻️◻️\n◻️◻️◻️◻️◻️\n◻️◼️◼️◻️◻️\n◻️◼️◼️◼️◻️\n◻️◻️◻️◻️◻️",
"◻️◻️◻️◻️◻️\n◻️◻️◻️◻️◻️\n◻️◼️◼️◻️◻️\n◻️◼️◼️◻️◻️\n◻️◻️◻️◻️◻️",
"◻️◻️◻️◻️◻️\n◻️◻️◻️◻️◻️\n◻️◼️◼️◻️◻️\n◻️◼️◻️◻️◻️\n◻️◻️◻️◻️◻️",
"◻️◻️◻️◻️◻️\n◻️◻️◻️◻️◻️\n◻️◼️◼️◻️◻️\n◻️◻️◻️◻️◻️\n◻️◻️◻️◻️◻️",
"◻️◻️◻️◻️◻️\n◻️◻️◻️◻️◻️\n◻️◻️◼️◻️◻️\n◻️◻️◻️◻️◻️\n◻️◻️◻️◻️◻️",
"◻️◻️◻️◻️◻️\n◻️◻️◻️◻️◻️\n◻️◻️◻️◻️◻️\n◻️◻️◻️◻️◻️\n◻️◻️◻️◻️◻️",
"◻️◻️◻️◻️◻️\n◻️◼️◻️◼️◻️\n◻️◻️◻️◻️◻️\n◻️◼️◼️◼️◻️\n◻️◻️◻️◻️◻️"
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 27])
| [
"[email protected]"
] | |
1ad94d636b1c70bbc8bbbc5db86b98be3519b966 | 307e52d79c9068a2648ae82bbe11cd58733bba37 | /calib/CalcBase.py | adc7f797998490a249948cd460d275154adc74bd | [] | no_license | greatofdream/Recon1t | 0aa775c43dcfa5b3da7b5894e2567fbe8e7b2991 | 80e58ba3c2c23f1efa962d02fcb2205a95aa716f | refs/heads/master | 2022-11-09T14:12:55.747488 | 2020-06-09T02:43:24 | 2020-06-09T02:43:24 | 263,953,536 | 0 | 0 | null | 2020-05-14T15:31:27 | 2020-05-14T15:31:26 | null | UTF-8 | Python | false | false | 4,129 | py | import numpy as np
import h5py
def ReadPMT():
f = open(r"../PMT_1t.txt")
line = f.readline()
data_list = []
while line:
num = list(map(float,line.split()))
data_list.append(num)
line = f.readline()
f.close()
PMT_pos = np.array(data_list)
return PMT_pos
def readfile(filename):
h1 = tables.open_file(filename,'r')
print(filename)
truthtable = h1.root.GroundTruth
EventID = truthtable[:]['EventID']
ChannelID = truthtable[:]['ChannelID']
x = h1.root.TruthData[:]['x']
y = h1.root.TruthData[:]['y']
z = h1.root.TruthData[:]['z']
h1.close()
#print(x.shape, EventID.shape, np.unique(EventID).shape, np.std(y),np.sum(x**2+y**2+z**2>0.1))
dn = np.where((x==0) & (y==0) & (z==0))
dn_index = (x==0) & (y==0) & (z==0)
#print(np.sum(dn_index))
pin = dn[0] + np.min(EventID)
if(np.sum(x**2+y**2+z**2>0.1)>0):
cnt = 0
for ID in np.arange(np.min(EventID), np.max(EventID)+1):
if ID in pin:
cnt = cnt+1
#print('Trigger No:', EventID[EventID==ID])
#print('Fired PMT', ChannelID[EventID==ID])
ChannelID = ChannelID[~(EventID == ID)]
EventID = EventID[~(EventID == ID)]
#print(cnt, ID, EventID.shape,(np.unique(EventID)).shape)
x = x[~dn_index]
y = y[~dn_index]
z = z[~dn_index]
#print(x.shape, EventID.shape, np.unique(EventID).shape,np.std(y),np.sum(x**2+y**2+z**2>0.1))
return (EventID, ChannelID, x, y, z)
def readchain(radius, path, axis):
for i in np.arange(0, 3):
if(i == 0):
#filename = path + '1t_' + radius + '.h5'
filename = '%s1t_%s_%s.h5' % (path, radius, axis)
EventID, ChannelID, x, y, z = readfile(filename)
else:
try:
filename = '%s1t_%s_%s_%d.h5' % (path, radius, axis, i)
EventID1, ChannelID1, x1, y1, z1 = readfile(filename)
EventID = np.hstack((EventID, EventID1))
ChannelID = np.hstack((ChannelID, ChannelID1))
x = np.hstack((x, x1))
y = np.hstack((y, y1))
z = np.hstack((z, z1))
except:
pass
return EventID, ChannelID, x, y, z
def CalMean(axis):
data = []
PMT_pos = ReadPMT()
ra = -0.0001
EventID, ChannelID, x, y, z = readchain('%+.3f' % ra, '/mnt/stage/douwei/Simulation/1t_root/2.0MeV_dn/', axis)
size = np.size(np.unique(EventID))
total_pe = np.zeros(np.size(PMT_pos[:,0])*size)
for k_index, k in enumerate(np.unique(EventID)):
if not k_index % 1e4:
print('preprocessing %d-th event' % k_index)
hit = ChannelID[EventID == k]
tabulate = np.bincount(hit)
event_pe = np.zeros(np.size(PMT_pos[:,0]))
# tabulate begin with 0
event_pe[0:np.size(tabulate)] = tabulate
total_pe[(k_index) * np.size(PMT_pos[:,0]) : (k_index + 1) * np.size(PMT_pos[:,0])] = event_pe
# although it will be repeated, mainly for later EM:
# vertex[0,(k_index) * np.size(PMT_pos[:,0]) : (k_index + 1) * np.size(PMT_pos[:,0])] = x[k_index]
# vertex[1,(k_index) * np.size(PMT_pos[:,0]) : (k_index + 1) * np.size(PMT_pos[:,0])] = y[k_index]
# vertex[2,(k_index) * np.size(PMT_pos[:,0]) : (k_index + 1) * np.size(PMT_pos[:,0])] = z[k_index]
# total_pe[(k_index) * np.size(PMT_pos[:,0]) : (k_index + 1) * np.size(PMT_pos[:,0])] = event_pe
data.append(np.mean((np.reshape(total_pe, (30,-1), order='F')), axis=1))
print(total_pe.shape, np.size(np.unique(EventID)))
return size, data
size_x, ax0 = CalMean('x')
size_y, ay0 = CalMean('y')
size_z, az0 = CalMean('z')
mean = (size_x*ax0 + size_y*ay0 + size_z*az0)/(size_x+size_y+size_z)
base = np.exp(np.mean(np.log(mean)))
correct = mean/base
with h5py.File('base.h5','w') as out:
out.create_dataset('base', data = base)
out.create_dataset('correct', data = correct) | [
"[email protected]"
] | |
e6b1b161ddc04c71e8fe91ef84a0980df2bfd177 | caa460b627869c2f1e7bb3a024741d671e9986ff | /redlib/web/htmlparser.py | d5c02208ba2c08e2ced896f9013de8f57fb70bc0 | [
"MIT"
] | permissive | amol9/redlib | 8f920c5055b6bd2d167eb40201bd574f839bae7b | 25d2085925c41473424583bbfeb7ca6033937e62 | refs/heads/master | 2022-12-11T22:32:19.823857 | 2022-12-01T15:41:40 | 2022-12-01T15:41:40 | 32,393,559 | 1 | 1 | null | 2017-10-19T14:47:09 | 2015-03-17T13:02:37 | Python | UTF-8 | Python | false | false | 2,090 | py | import re
import sys
from xml.etree.ElementTree import XMLParser, Element, SubElement, ElementTree
from ..system.common import *
from six.moves.html_parser import HTMLParser
from .htmlparser_debugger import HtmlParserDebugger
__all__ = ['HtmlParser', 'HtmlStripper']
class HtmlParser(HTMLParser):
def __init__(self, skip_tags=[], debugger=None):
self._root = None
self._stack = []
self._skip_tags = skip_tags
self._skip = False, None
self._hpd = debugger if debugger is not None else HtmlParserDebugger(debug=False)
if is_py3():
HTMLParser.__init__(self, convert_charrefs=True)
else:
HTMLParser.__init__(self)
def handle_starttag(self, tag, attrs):
if self._skip[0] == True:
return
if tag in self._skip_tags:
self._skip = True, tag
return
self._hpd.dump_tag(tag, attrs, level=len(self._stack))
attr_dict = dict((k, v) for (k, v) in attrs)
if self._root == None:
self._root = Element(tag, attr_dict)
self._stack.append(self._root)
else:
e = SubElement(self._stack[-1], tag, attr_dict)
self._stack.append(e)
def handle_endtag(self, tag):
if self._skip[0] == True:
if tag == self._skip[1]:
self._skip = False, None
return
if tag == self._stack[-1].tag or True:
self._stack.pop()
else:
if self._hpd.debugging:
m = self._stack[-1]
attrs = [(k, v) for (k, v) in list(m.attrib.items())]
self._hpd.dump_tag(m.tag, attrs=attrs, level=len(self._stack), msg='mismatch')
self._hpd.dump_tag(tag, end=True, level=len(self._stack))
def handle_data(self, data):
if self._skip[0]:
return
if self._stack:
if self._stack[-1].text:
self._stack[-1].text += data
else:
self._stack[-1].text = data
def get_element_tree(self):
return self._root
etree = property(get_element_tree)
class HtmlStripper(HTMLParser):
def __init__(self):
if is_py3():
HTMLParser.__init__(self, convert_charrefs=True)
else:
HTMLParser.__init__(self)
self._output = ''
def handle_data(self, data):
self._output += data + ' '
def get_output(self):
return self._output
| [
"[email protected]"
] | |
6c66e32cf836eef9c9e9d11ed5ba6c404bc97a0e | 1b319753b42d1c61c29eddd9fcf1be15a09efc34 | /tango_with_django_project/rango/migrations/0001_initial.py | 5665331801c0338b7fe6672e67c8cb3f9ba6a861 | [] | no_license | JubyThomas/Disneyfanclubs | 1eb2466933a1849271459aec8ec8494bc351d359 | e380f23e7649cad05ef425013e27b2fdc20e5df3 | refs/heads/main | 2023-07-02T21:32:30.946433 | 2021-08-06T07:56:08 | 2021-08-06T07:56:08 | 393,302,000 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,955 | py | # Generated by Django 2.1.5 on 2021-07-30 23:27
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128, unique=True)),
('views', models.IntegerField(default=0)),
('likes', models.IntegerField(default=0)),
('slug', models.SlugField(unique=True)),
],
options={
'verbose_name_plural': 'Categories',
},
),
migrations.CreateModel(
name='Page',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=128)),
('url', models.URLField()),
('views', models.IntegerField(default=0)),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='rango.Category')),
],
),
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('age', models.IntegerField(default=12)),
('location', models.CharField(max_length=150)),
('picture', models.ImageField(blank=True, upload_to='profile_images')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"[email protected]"
] |
Subsets and Splits