blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
listlengths 1
1
| author
stringlengths 0
175
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
45c2a04f57d7ecef273f6f64a6c580e235c26178
|
f72775ed28235c0f12beaac0b3462b870f06d178
|
/8-0-in-operator.py
|
2a675a76bb47da30e12adb638b86782924ab69a0
|
[] |
no_license
|
cheykoff/thinkpython2e
|
29b975ccbbf043372cc14ac4bddf3d342b4c3524
|
ed9dfe24fb2a03cfe125aa1b1b2867eb871d05e9
|
refs/heads/master
| 2020-07-31T16:04:25.426387 | 2019-11-16T07:08:33 | 2019-11-16T07:08:33 | 210,666,740 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 133 |
py
|
def in_both(word1, word2):
for letter in word1:
if letter in word2:
print(letter)
in_both('banana', 'anna')
|
[
"[email protected]"
] | |
d80010fadea778a900cc29285ed6d860ee871169
|
e7590291241d8b3a949558d097c0bd3ed405e47a
|
/Bidwork/Bidwork/urls.py
|
c425942200dc86fddcd5003187dd5b825f18b6ea
|
[] |
no_license
|
hooman130/team01
|
752c438f8349505605fac8d5d247fa3066929b97
|
3a62175b93a02f12b75ac0680e6fb90288b6e5ef
|
refs/heads/master
| 2022-12-26T11:11:23.012303 | 2020-10-11T06:26:04 | 2020-10-11T06:26:04 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 891 |
py
|
"""Bidwork URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
from users import views as user_views
urlpatterns = [
path('admin/', admin.site.urls),
path('register/', user_views.register,name='register'),
path('', include('app1.urls')),
]
|
[
"[email protected]"
] | |
facb0b14a7261fba5e6e4c140856b20c543ebd84
|
144202935037b93dc49e637a6289ee30c776dfa7
|
/value.py
|
f6157755c7e8013be896b93febf97b2192976068
|
[] |
no_license
|
zack-norton/deckofcards
|
f7e491022746d4147fa93ad389bce257669823f1
|
9f07b0b12776637384fdef76808321547d198567
|
refs/heads/master
| 2022-12-08T05:12:09.688046 | 2020-09-01T16:36:02 | 2020-09-01T16:36:02 | 292,051,905 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 174 |
py
|
from enum import Enum
class Value(Enum):
TWO = 0
THREE = 1
FOUR = 2
FIVE = 3
SIX = 4
SEVEN = 5
EIGHT = 6
NINE = 7
TEN = 8
JACK = 9
QUEEN = 10
KING = 11
ACE = 12
|
[
"[email protected]"
] | |
4bc8d29a66d49cf58f2052c06433371ac5522828
|
a1b7ebe24a123b61efeef314e6b4d8c4f2dc7275
|
/UserInterface/phones.py
|
e3655f2addce23cee959c89081facbd15e1ef515
|
[] |
no_license
|
maleacristina/PythonApplications
|
3c06657ab389acfe9f9905f71a5f7e88640669e8
|
82c1996de9e3822fc3a332c8bc809501d8a8c9cc
|
refs/heads/master
| 2021-04-06T02:32:30.531084 | 2018-11-12T13:14:36 | 2018-11-12T13:14:36 | 125,254,196 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 287 |
py
|
phonelist = [
['Meyers, Chris', '343-4349'],
['Smith, Robert', '689-1234'],
['Jones, Janet', '483-5432'],
['Barnhart, Ralph', '683-2341'],
['Nelson, Eric', '485-2689'],
['Prefect, Ford', '987-6543'],
['Zigler, Mary', '567-8901'],
['Smith, Bob', '689-1234']
]
|
[
"[email protected]"
] | |
4e7f9287f95f670dde7a6b932c73beb0db42f2db
|
0cf34274f469957da8b0db6806892e7f9d4ccfa8
|
/app/views.py
|
780bc428b8e57e23a6b65929d90de87dc42daa94
|
[] |
no_license
|
CO18308/Titanic-Disaster-ML
|
cdd1f1858f198b3236a93db29903aaeee78670fc
|
c49f55b92c4b20bf21f14ded07c48770d597e21e
|
refs/heads/master
| 2023-06-23T02:49:44.003269 | 2021-07-22T19:31:24 | 2021-07-22T19:31:24 | 388,558,991 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 881 |
py
|
from django.shortcuts import render
import os
from django.http import HttpResponse
def homepage(request):
return render(request,'ML_homepage.html')
def graph1(request):
os.system(r"python F:\project\titanicML\app\graph1.py")
return render(request,"ML_homepage.html")
def graph2(request):
os.system(r"python F:\project\titanicML\app\graph2.py")
return render(request,"ML_homepage.html")
def graph3(request):
os.system(r"python F:\project\titanicML\app\graph3.py")
return render(request,"ML_homepage.html")
def histo(request):
os.system(r"python F:\project\titanicML\app\histo.py")
return render(request,"ML_homepage.html")
def predict(request):
os.system(r"python F:\project\titanicML\app\predict.py")
return render(request,"ML_homepage.html")
def genCSV(request):
os.system(r"python F:\project\titanicML\app\genCSV.py")
return render(request,"ML_homepage.html")
|
[
"[email protected]"
] | |
2b0d3ac122481b1191cc4054a913030596296730
|
4bd0a699519d9b8b93eb0efc6ea9aef0f85245ed
|
/db_repository/versions/009_migration.py
|
1eb1a7acb2725171e504286f2d6c10e92611c2d6
|
[] |
no_license
|
cthtuf/apres-work.co
|
8dee257d8795e32b87b6710bca4070f325280b96
|
1c01e6ba69b335c75eabb713d0cb0cb49139f604
|
refs/heads/master
| 2021-01-20T18:10:22.616582 | 2015-06-12T13:00:16 | 2015-06-12T13:00:16 | 90,910,444 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 929 |
py
|
from sqlalchemy import *
from migrate import *
from migrate.changeset import schema
pre_meta = MetaData()
post_meta = MetaData()
webcamera = Table('webcamera', pre_meta,
Column('id', INTEGER, primary_key=True, nullable=False),
Column('location_id', INTEGER),
Column('resort_id', INTEGER),
Column('img_link', VARCHAR(length=255)),
Column('img_na', VARCHAR(length=255)),
Column('name', VARCHAR(length=50)),
)
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine; bind
# migrate_engine to your metadata
pre_meta.bind = migrate_engine
post_meta.bind = migrate_engine
pre_meta.tables['webcamera'].columns['location_id'].drop()
def downgrade(migrate_engine):
# Operations to reverse the above upgrade go here.
pre_meta.bind = migrate_engine
post_meta.bind = migrate_engine
pre_meta.tables['webcamera'].columns['location_id'].create()
|
[
"[email protected]"
] | |
451f0111f0a96c661b464dbc17bd4158d273d417
|
6a1b2239e9115dc75e0947d00143a8a7005a317e
|
/ZBase_Framework/Utilities/Loghandler.py
|
e3020c96ff672e2b340c5e801fcc32a8831e78b4
|
[] |
no_license
|
vinaytalkin/Testing
|
2c68d1810a9455aa31d9eeebba6b8c67a4feb5b0
|
03854a4e0afd6065d48db623961330f1dae7da61
|
refs/heads/master
| 2020-07-12T00:43:44.022313 | 2020-03-04T11:42:50 | 2020-03-04T11:42:50 | 204,677,575 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 827 |
py
|
import logging
from datetime import datetime
import sys
import ZBase_Framework.Utilities.ConfigFile as drivepath
class Logging():
newfilename = ''
def __init__(self):
LOG_FILENAME = datetime.now().strftime(drivepath.Logfilename)
#LOG_date_for_file = datetime.now().strftime('E:\\Testing\\Logs\\Test_%H_%M_%S_%d_%m_%Y')
print(LOG_FILENAME)
logging.basicConfig(filename=LOG_FILENAME, level=logging.INFO)
logging.info("Log File started" + drivepath.Logfilename)
#newfilename = LOG_FILENAME
'''LEVELS = {'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL} '''
|
[
"[email protected]"
] | |
d2890dc2c182e19a172de32259e3c4a1bec88acf
|
351af1fb32ccc340bf8e7b2842fc57f25ad930fc
|
/crawlgui.py
|
5b30f1124a40c1990559cebf314fcf47cbf26e2a
|
[] |
no_license
|
jay24rajput/Song-Crawler-and-Player
|
0e6fd7211bd2b1e4f5059a2689e6cd42482f2b78
|
008b1f2bb87311ee8f302d3f0b9bc84a20ef462e
|
refs/heads/master
| 2020-03-14T15:34:21.554495 | 2019-02-10T15:26:06 | 2019-02-10T15:26:06 | 131,679,250 | 0 | 2 | null | 2018-05-25T18:02:32 | 2018-05-01T05:20:42 |
Python
|
UTF-8
|
Python
| false | false | 1,111 |
py
|
from tkinter import *
import webcrawler
root = Tk()
root.geometry('896x504')
searchVar = StringVar(root)
searchsong = ""
songfileText = Text()
def openfile():
f = open(webcrawler.getSongName(searchsong, False)+'.txt')
global songfileText
songfileText.insert('1.0', f)
def leftArrowPress(event = None):
searchEntry.focus_set()
def rightArrowPress(event = None):
searchButton.focus_set()
def getSong():
searchVar = searchEntry.get()
global searchsong
searchsong = str(searchVar)
webcrawler.getSongName(searchsong,True)
searchLabel = Label(root, text = "Name of the song :")
searchEntry = Entry(root, width = 75)
searchIcon = PhotoImage(file = "searchicon.png")
searchButton = Button(root, image = searchIcon, command = getSong)
openButton = Button(text="View Links", command = openfile)
searchEntry.focus_set()
root.bind('<Left>', leftArrowPress)
root.bind('<Right>', rightArrowPress)
searchLabel.grid(row = 0, column = 0, sticky = W)
searchEntry.grid(row = 1, column = 0)
searchButton.grid(row = 1, column = 1)
openButton.grid(row=2,column=0)
songfileText.grid(row=3, column = 0)
root.mainloop()
|
[
"[email protected]"
] | |
f05332ee9816b4e26ef141933888aec46b3a5ed9
|
376da4fb6acff7adf195936c417a0ae2c5d435dc
|
/python/hands-on/flask-05-Handling-SQL-with-Flask-Web-Application/app-with-sqlite.py
|
f8ceedd59c0a7f0da3a911dcfbbe33d36d522e77
|
[] |
no_license
|
gulizarsubay/my-projects
|
7ed2e490d31cfa49e439283c2eecd976d6bce474
|
5cc0464706896d054aa8e6e80288afb9625c18c3
|
refs/heads/main
| 2023-09-02T23:49:57.413431 | 2021-11-03T18:40:44 | 2021-11-03T18:40:44 | 411,764,123 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,617 |
py
|
# import required libraries and define object
from flask import Flask, render_template, request
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
# - configure required environmental variables for SQLite
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///./email.db'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
# - drop users table if exists, create new users table and add some rows for sample
drop_table = 'DROP TABLE IF EXISTS users;'
users_table = """
CREATE TABLE users(
username VARCHAR NOT NULL PRIMARY KEY,
email VARCHAR);
"""
data= """
INSERT INTO users
VALUES
("Stevie", "[email protected]"),
("Tugba", "[email protected]"),
("Murat", "[email protected]"),
("Zeynep", "[email protected]"),
("Tuncay", "[email protected]"),
("Selman", "[email protected]");
"""
# - Execute sql commands and commit them
db.session.execute(drop_table)
db.session.execute(users_table)
db.session.execute(data)
db.session.commit()
# - Write a function named `find_emails` which find emails using keyword from the user table in the db,
# - and returns result as tuples `(name, email)`.
def find_email(keyword):
query = f"""
SELECT * FROM users WHERE username like '%{keyword}%';
"""
result = db.session.execute(query)
user_emails = [(row[0], row[1]) for row in result]
if not any(user_emails):
user_emails = [("Not Found", "Not Found")]
return user_emails
# - Write a function named `insert_email` which adds new email to users table the db.
def insert_email(name,email):
query = f"""
SELECT * FROM users WHERE username like '{name}'
"""
result = db.session.execute(query)
response = ''
if name == None or email == None:
response = 'Username or email can not be empty!!'
elif not any(result):
insert = f"""
INSERT INTO users
VALUES ('{name}', '{email}');
"""
result = db.session.execute(insert)
db.session.commit()
response = f"User {name} and {email} have been added successfully"
else:
response = f"User {name} already exist"
return response
# - Write a function named `emails` which finds email addresses by keyword using `GET` and `POST` methods,
# - using template files named `emails.html` given under `templates` folder
# - and assign to the static route of ('/')
@app.route('/', methods = ['POST', 'GET'])
def emails():
if request.method == 'POST':
user_app_name = request.form['user_keyword']
user_emails = find_email(user_app_name)
return render_template('emails.html', show_result = True, keyword = user_app_name, name_emails = user_emails)
else:
return render_template('emails.html', show_result = False)
# - Write a function named `add_email` which inserts new email to the database using `GET` and `POST` methods,
# - using template files named `add-email.html` given under `templates` folder
# - and assign to the static route of ('/add')
@app.route('/add', methods=['GET', 'POST'])
def add_email():
if request.method == 'POST':
user_app_name = request.form['username']
user_app_email = request.form['useremail']
result_app = insert_email(user_app_name, user_app_email)
return render_template('add-email.html', result_html=result_app, show_result=True)
else:
return render_template('add-email.html', show_result=False)
# - Add a statement to run the Flask application which can be reached from any host on port 80.
if __name__ == '__main__':
# app.run(debug=True)
app.run(host='0.0.0.0', port=80)
|
[
"[email protected]"
] | |
73331282f2b44eed8de8cb3952a571ea7e194a0d
|
7fc30deeb0e882b97c1604a1df2435ec0494179e
|
/makespan.py
|
74fdd51a630833b98a3feba7a0184b43cd83c112
|
[] |
no_license
|
yt100323/FJSSP-1
|
93b430f432d98778692c35b822760bb2946957b6
|
3600591293ef27c810b8be8c15e8aee893ddbaec
|
refs/heads/master
| 2021-09-16T12:31:53.929114 | 2018-06-20T16:34:11 | 2018-06-20T16:34:11 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,573 |
py
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Fri May 25 15:46:05 2018
L: is a large number
overview is a 'number-of-jobs' x 2 array consisting of a list of operations and a list of machines for every job=row i
data is a dictionary of the form {(job_number, process_number, machine_number) : processing_time}, which carries the
available data
prec is a list with entries of type ((job_num, proc_num, machine),(other job_num, other proc_num, same machine))
d is a list of job due dates
@author: marios
"""
print('this is makespan')
from pulp import *
import numpy as np
L = 10**9
#d = [1000]*10
jobs = range(1,len(overview)+1)
## -- problem
prob = LpProblem("FJSSP", LpMinimize)
## -- (decision) variables
routing = LpVariable.dicts('routing', (i for i in data.keys()),0,1,LpBinary)
preced = LpVariable.dicts('different job precedence', (i for i in prec),0, 1, LpBinary)
delta = LpVariable("delta",0,None, LpContinuous) # delta --> to be minimized
S = LpVariable.dicts("Starting Times",(i for i in data.keys()), 0,None,LpContinuous)
C = LpVariable.dicts("Completion time of process",(i for i in data.keys()),0,None,LpContinuous)
Comp = LpVariable.dicts("Job completion times",range(1,len(overview)+1), 0,None,LpContinuous)
## --- objective function ---
prob += delta
## --- constraints ---
# new constraint
for i in jobs:
prob += Comp[i] <= delta
# each machine is assigned only one operation
for i in jobs:
for j in overview[i-1][0]: # operations of job i
prob += lpSum(routing[(i,j,k)] for k in avail_machines[i-1][j-1]) == 1
# if an operation is not assigned to a machine, then S and C for that operation are set to 0
for key in data.keys():
prob += routing[key]*L >= S[key] + C[key]
# completion time of operation
for key in data.keys():
prob += C[key] >= S[key] + data[key] - (1 - routing[key]) * L
# operation precedence
for i in jobs:
for j in overview[i-1][0]: # operations of job i
if j > 1:
prob += lpSum(S[(i,j,k)] for k in avail_machines[i-1][j-1]) >= lpSum(C[(i,j-1,k)] for k in avail_machines[i-1][j-2])
# Compeletion time of job i definition
for i in jobs:
j = overview[i-1][0][-1]
prob += Comp[i] >= lpSum(C[(i,j,k)] for k in avail_machines[i-1][j-1])
# different-job precedence on same machine, condition
#for r in prec:
# prob += L*preced[r] + (C[r[0]] - S[r[1]]) <= 2*L
#k = 0
for r in prec:
prob += S[r[1]] >= C[r[0]] - (1 - preced[r])*L
for r in prec:
prob += S[r[0]] >= C[r[1]] - preced[r]*L
|
[
"[email protected]"
] | |
51bf37336733ebb7290fae17e6bd80600e6c18a6
|
54d140dc8c88e7300c76b9cca48ed796bca5390e
|
/TensorFlow/t4.3.2.py
|
ac5897131500aa3b1e7554db87c94d4c17b9c00f
|
[] |
no_license
|
kulinbin/-
|
e051274a882c4a2476d3bf9cb4bd3eb1daee7e9d
|
4cd6b76bcff74a430e33ce2f6727f7ebbaebdb26
|
refs/heads/master
| 2020-03-22T13:05:03.548206 | 2018-11-13T11:36:42 | 2018-11-13T11:36:42 | 140,082,047 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,969 |
py
|
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from sklearn import datasets
sess=tf.Session()
iris=datasets.load_iris()
x_vals=np.array([x[3] for x in iris.data])
y_vals=np.array([y[0] for y in iris.data])
train_indices=np.random.choice(len(x_vals),round(len(x_vals)*0.8),replace=False)
test_indices=np.array(list(set(range(len(x_vals)))-set(train_indices)))
x_vals_train=x_vals[train_indices]
x_vals_test=x_vals[test_indices]
y_vals_train=y_vals[train_indices]
y_vals_test=y_vals[test_indices]
#声明批量 创建线性模型
batch_size=50
x_data=tf.placeholder(shape=[None,1],dtype=tf.float32)
y_target=tf.placeholder(shape=[None,1],dtype=tf.float32)
A=tf.Variable(tf.random_normal(shape=[1,1]))
b=tf.Variable(tf.random_normal(shape=[1,1]))
model_output=tf.add(tf.matmul(x_data,A),b)
#声明损失函数
epsilon=tf.constant([0.5])
loss=tf.reduce_mean(tf.maximum(0.,tf.subtract(tf.abs(tf.subtract(model_output,y_target)),epsilon)))
#创建优化器
my_opt=tf.train.GradientDescentOptimizer(0.075)
train_step=my_opt.minimize(loss)
init=tf.global_variables_initializer()
sess.run(init)
train_loss=[]
test_loss=[]
for i in range(200):
rand_index=np.random.choice(len(x_vals_train),size=batch_size)
rand_x=np.transpose([x_vals_train[rand_index]])
rand_y = np.transpose([y_vals_train[rand_index]])
sess.run(train_step,feed_dict={x_data:rand_x,y_target:rand_y})
temp_train_loss=sess.run(loss,feed_dict={x_data:np.transpose([x_vals_test]),y_target:np.transpose([y_vals_train])})
train_loss.append(temp_train_loss)
temp_test_loss = sess.run(loss,feed_dict={x_data: np.transpose([x_vals_test]), y_target: np.transpose([y_vals_train])})
train_loss.append(temp_test_loss)
if(i+1)%50==0:
print('-----------')
print('generation:'+str(i))
print('A='+str(sess.run(A))+'b='+str(sess.run(b)))
print('trsin loss='+str(temp_train_loss))
print('test loss='+str(temp_test_loss))
|
[
"[email protected]"
] | |
168fbad224225dc65797910e6826cf85779b21de
|
e194b23d8b3acdc6faecc4ae3cdd1b90732877f7
|
/blogproject/blog/admin.py
|
345cd1b8f8a01ba05f40db3b4fe60f26218bdf5d
|
[
"Apache-2.0"
] |
permissive
|
D0m1no/blog
|
8f2234ad4b7f5d95b72f90e5032d7721dec65d18
|
71099da3671c586e9d4df51f3649f440dbed897d
|
refs/heads/master
| 2020-03-16T19:05:07.236278 | 2018-05-10T13:06:14 | 2018-05-10T13:06:14 | 132,900,193 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 350 |
py
|
from django.contrib import admin
from django.contrib import admin
from .models import Post, Category, Tag
# Register your models here.
class PostAdmin(admin.ModelAdmin):
list_display = ['title', 'created_time', 'modified_time', 'category', 'author']
admin.site.register(Post, PostAdmin)
admin.site.register(Category)
admin.site.register(Tag)
|
[
"[email protected]"
] | |
1895f71eae05aca65fce05b45fb13a3823efd665
|
a1ab8b6635fa331b3615e05b63795cad5c4c7d68
|
/userImages/tests/test_permissions.py
|
057840736508427dd8db870367e19f8962b8eb7e
|
[] |
no_license
|
tayadavison/Image-Repo
|
6d1f4143266ef799a91b7e421e081675d21078e7
|
ff7eea8305faa767873ee7ad6b635bf03e89e171
|
refs/heads/main
| 2023-04-18T01:17:04.652233 | 2021-05-06T19:28:42 | 2021-05-06T19:28:42 | 364,024,224 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,589 |
py
|
from django.test import TestCase
from django.contrib.auth.models import User
from userImages.models import Image
from userImages.api.permissions import IsOwnerOrReadOnly
from rest_framework.test import APIRequestFactory
# Tests the custom permissions for images where only owners have edit/delete permissions
class TestApiPermissions(TestCase):
@classmethod
def setUpTestData(cls):
cls.factory = APIRequestFactory()
cls.imageOwner = User.objects.create(username='owner', password="test1234")
cls.notImageOwner = User.objects.create(username='notOwner', password="test1234")
Image.objects.create(title="testingKitten", image="pictures/CuteKittens.webp",price=57.49, discount=22.50, available=True, owner=cls.imageOwner )
pass
def testOwnerGetPermission(self):
image = Image.objects.get(title="testingKitten")
request = self.factory.get('/')
request.user = self.imageOwner
permission = IsOwnerOrReadOnly()
self.assertTrue(permission.has_object_permission(request, None, image))
def testOwnerPutPermission(self):
image = Image.objects.get(title="testingKitten")
request = self.factory.put('/')
request.user = self.imageOwner
permission = IsOwnerOrReadOnly()
self.assertTrue(permission.has_object_permission(request, None, image))
def testOwnerDeletePermission(self):
image = Image.objects.get(title="testingKitten")
request = self.factory.delete('/')
request.user = self.imageOwner
permission = IsOwnerOrReadOnly()
self.assertTrue(permission.has_object_permission(request, None, image))
def testNotOwnerGetPermission(self):
image = Image.objects.get(title="testingKitten")
request = self.factory.get('/')
request.user = self.notImageOwner
permission = IsOwnerOrReadOnly()
self.assertTrue(permission.has_object_permission(request, None, image))
def testNotOwnerPutPermission(self):
image = Image.objects.get(title="testingKitten")
request = self.factory.put('/')
request.user = self.notImageOwner
permission = IsOwnerOrReadOnly()
self.assertFalse(permission.has_object_permission(request, None, image))
def testNotOwnerDeletePermission(self):
image = Image.objects.get(title="testingKitten")
request = self.factory.delete('/')
request.user = self.notImageOwner
permission = IsOwnerOrReadOnly()
self.assertFalse(permission.has_object_permission(request, None, image))
|
[
"[email protected]"
] | |
0ac1d251becbfc30fab18f4aec7840bb6984a301
|
e2d756cdef92155373a2ad21c21c8882bb22fa6c
|
/pyexamples/examples/__init__.py
|
3e9e3fc9eb9df93930434b98e1742624f56ca467
|
[] |
no_license
|
fred-yu-2013/avatar
|
87d1e371f3ecb907cc1facbabc0163fdbf4ef321
|
65058dd243c401934054779ab31c46806507a67e
|
refs/heads/master
| 2020-03-30T20:59:10.771106 | 2016-02-21T05:50:06 | 2016-02-21T05:50:06 | 7,955,229 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 349 |
py
|
# -*- coding: utf-8 -*-
"""
一些实际应用的例子,其下按功能分类。
"""
import linecache
line_number = 0
line_number += 1
print linecache.getline("input.txt", line_number)
class Foo():
def __init__(self):
self.a = 3
foo = Foo()
print dir(foo)
print getattr(foo, 'a')
# print getattr(foo, 'b')
print hasattr(foo, 'c')
|
[
"[email protected]"
] | |
d0b4eccc76eaf52e4c7b40751b8c425cce2ee625
|
e55fc37c93b38bce9b6d2205eac4c678745e2b9a
|
/TestingProject/venv/Scripts/easy_install-3.8-script.py
|
715013c42d9e32b41eb47b848fbfdafb41598642
|
[] |
no_license
|
Sbessonov/testlab4
|
61107dd1ea255badaaae83a79e83b42f1490c78f
|
b346c1dea63edd7dd20b0c66ba43c1a33edf6194
|
refs/heads/master
| 2020-11-30T02:12:48.292696 | 2020-01-09T07:07:24 | 2020-01-09T07:07:24 | 230,272,108 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 453 |
py
|
#!D:\GitHub\testlab4\TestingProject\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install-3.8'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install-3.8')()
)
|
[
"[email protected]"
] | |
3c595de877657acc75859e6f2924f0c1ef1469da
|
5ddd0bfb6f3e9847ee34caff324e025f9b331644
|
/tokens.py
|
c169a48d4600581539994ed554cf070af3e6bb01
|
[
"Apache-2.0"
] |
permissive
|
olvitar/python_training
|
a980ed508a2419c7341e68f42d10fa4bc06ebdf9
|
4c2ca819b1781fc785dc634f4e13e88fc0b45fc2
|
refs/heads/master
| 2020-03-28T05:54:58.007287 | 2018-09-22T10:49:59 | 2018-09-22T10:49:59 | 147,803,053 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 125 |
py
|
ADV_LOGIN = "[email protected]"
ADV_PASSWORD = "Vbhjy_2018"
PYT_LOGIN: str = "oliver"
PYT_PASSWORD: str = "Vbhjy_30"
|
[
"[email protected]"
] | |
cad88990fdb11975a67ff69d4bc9db7e5ee6a354
|
da2fcabb4873422722916b82b4fa5616555c49df
|
/tasks/migrations/0001_initial.py
|
6fa52c428260e569bc099facd1ab27eaf57e50c3
|
[] |
no_license
|
Stretchddt/Todo-App
|
2703b837fbe358b83edd6dded9a74bf0e18653b6
|
bc350da0e18b36844785bdd5289f0485bce51f34
|
refs/heads/main
| 2023-07-02T15:49:58.420339 | 2021-08-02T03:11:52 | 2021-08-02T03:11:52 | 391,799,025 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 578 |
py
|
# Generated by Django 3.2.5 on 2021-07-31 17:02
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Task',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=400)),
('completed', models.BooleanField(blank=True, default=False, null=True)),
],
),
]
|
[
"[email protected]"
] | |
4aff87513b6569e20448ffadc6e221fd99aa6626
|
cac1361422799eda841fa68821c7d3c03d448dd8
|
/src/envs/utils.py
|
c92ee36731b20376c97c357578d24c0f93dec8aa
|
[
"MIT"
] |
permissive
|
NinaMaz/eco-dqn
|
ee30525e667d24891cc81d8d9cb0dda2e821c668
|
d9ea164c59014e4209ae069005029af818372ade
|
refs/heads/master
| 2022-10-15T14:19:40.557533 | 2020-06-05T21:06:27 | 2020-06-05T21:06:27 | 269,739,516 | 1 | 0 |
MIT
| 2020-06-05T18:39:42 | 2020-06-05T18:39:41 | null |
UTF-8
|
Python
| false | false | 14,455 |
py
|
import random
from abc import ABC, abstractmethod
from enum import Enum
import networkx as nx
import numpy as np
class EdgeType(Enum):
UNIFORM = 1
DISCRETE = 2
RANDOM = 3
class RewardSignal(Enum):
DENSE = 1
BLS = 2
SINGLE = 3
CUSTOM_BLS = 4
class ExtraAction(Enum):
PASS = 1
RANDOMISE = 2
NONE = 3
class OptimisationTarget(Enum):
CUT = 1
ENERGY = 2
class SpinBasis(Enum):
SIGNED = 1
BINARY = 2
class Observable(Enum):
# Local observations that differ between nodes.
SPIN_STATE = 1
IMMEDIATE_REWARD_AVAILABLE = 2
TIME_SINCE_FLIP = 3
# Global observations that are the same for all nodes.
EPISODE_TIME = 4
TERMINATION_IMMANENCY = 5
NUMBER_OF_GREEDY_ACTIONS_AVAILABLE = 6
DISTANCE_FROM_BEST_SCORE = 7
DISTANCE_FROM_BEST_STATE = 8
DEFAULT_OBSERVABLES = [Observable.SPIN_STATE,
Observable.IMMEDIATE_REWARD_AVAILABLE,
Observable.TIME_SINCE_FLIP,
Observable.DISTANCE_FROM_BEST_SCORE,
Observable.DISTANCE_FROM_BEST_STATE,
Observable.NUMBER_OF_GREEDY_ACTIONS_AVAILABLE,
Observable.TERMINATION_IMMANENCY]
class GraphGenerator(ABC):
def __init__(self, n_spins, edge_type, biased=False):
self.n_spins = n_spins
self.edge_type = edge_type
self.biased = biased
def pad_matrix(self, matrix):
dim = matrix.shape[0]
m = np.zeros((dim+1,dim+1))
m[:-1,:-1] = matrix
return matrix
def pad_bias(self, bias):
return np.concatenate((bias,[0]))
@abstractmethod
def get(self, with_padding=False):
raise NotImplementedError
###################
# Unbiased graphs #
###################
class RandomGraphGenerator(GraphGenerator):
def __init__(self, n_spins=20, edge_type=EdgeType.DISCRETE, biased=False):
super().__init__(n_spins, edge_type, biased)
if self.edge_type == EdgeType.UNIFORM:
self.get_w = lambda : 1
elif self.edge_type == EdgeType.DISCRETE:
self.get_w = lambda : np.random.choice([+1, -1])
elif self.edge_type == EdgeType.RANDOM:
self.get_w = lambda : np.random.uniform(-1, 1)
else:
raise NotImplementedError()
def get(self, with_padding=False):
g_size = self.n_spins
density = np.random.uniform()
matrix = np.zeros((g_size, g_size))
for i in range(self.n_spins):
for j in range(i):
if np.random.uniform() < density:
w = self.get_w()
matrix[i, j] = w
matrix[j, i] = w
matrix = self.pad_matrix(matrix) if with_padding else matrix
if self.biased:
bias = np.array([self.get_w() if np.random.uniform() < density else 0 for _ in range(self.n_spins)])
bias = self.pad_bias(bias) if with_padding else bias
return matrix, bias
else:
return matrix
m = self.pad_matrix(self.matrix) if with_padding else self.matrix
if self.biased:
b = self.pad_bias(self.bias) if with_padding else self.bias
return m, b
else:
return m
class RandomErdosRenyiGraphGenerator(GraphGenerator):
def __init__(self, n_spins=20, p_connection=[0.1,0], edge_type=EdgeType.DISCRETE):
super().__init__(n_spins, edge_type, False)
if type(p_connection) not in [list,tuple]:
p_connection = [p_connection, 0]
assert len(p_connection)==2, "p_connection must have length 2"
self.p_connection = p_connection
if self.edge_type == EdgeType.UNIFORM:
self.get_connection_mask = lambda : np.ones((self.n_spins,self.n_spins))
elif self.edge_type == EdgeType.DISCRETE:
def get_connection_mask():
mask = 2. * np.random.randint(2, size=(self.n_spins, self.n_spins)) - 1.
mask = np.tril(mask) + np.triu(mask.T, 1)
return mask
self.get_connection_mask = get_connection_mask
elif self.edge_type == EdgeType.RANDOM:
def get_connection_mask():
mask = 2.*np.random.rand(self.n_spins,self.n_spins)-1
mask = np.tril(mask) + np.triu(mask.T, 1)
return mask
self.get_connection_mask = get_connection_mask
else:
raise NotImplementedError()
def get(self, with_padding=False):
p = np.clip(np.random.normal(*self.p_connection),0,1)
g = nx.erdos_renyi_graph(self.n_spins, p)
adj = np.multiply(nx.to_numpy_array(g), self.get_connection_mask())
# No self-connections (this modifies adj in-place).
np.fill_diagonal(adj, 0)
return self.pad_matrix(adj) if with_padding else adj
class RandomBarabasiAlbertGraphGenerator(GraphGenerator):
def __init__(self, n_spins=20, m_insertion_edges=4, edge_type=EdgeType.DISCRETE):
super().__init__(n_spins, edge_type, False)
self.m_insertion_edges = m_insertion_edges
if self.edge_type == EdgeType.UNIFORM:
self.get_connection_mask = lambda : np.ones((self.n_spins,self.n_spins))
elif self.edge_type == EdgeType.DISCRETE:
def get_connection_mask():
mask = 2. * np.random.randint(2, size=(self.n_spins, self.n_spins)) - 1.
mask = np.tril(mask) + np.triu(mask.T, 1)
return mask
self.get_connection_mask = get_connection_mask
elif self.edge_type == EdgeType.RANDOM:
def get_connection_mask():
mask = 2.*np.random.rand(self.n_spins,self.n_spins)-1
mask = np.tril(mask) + np.triu(mask.T, 1)
return mask
self.get_connection_mask = get_connection_mask
else:
raise NotImplementedError()
def get(self, with_padding=False):
g = nx.barabasi_albert_graph(self.n_spins, self.m_insertion_edges)
adj = np.multiply(nx.to_numpy_array(g), self.get_connection_mask())
# No self-connections (this modifies adj in-place).
np.fill_diagonal(adj, 0)
return self.pad_matrix(adj) if with_padding else adj
class RandomRegularGraphGenerator(GraphGenerator):
def __init__(self, n_spins=20, d_node=[2,0], edge_type=EdgeType.DISCRETE, biased=False):
super().__init__(n_spins, edge_type, biased)
if type(d_node) not in [list,tuple]:
d_node = [d_node, 0]
assert len(d_node)==2, "k_neighbours must have length 2"
self.d_node = d_node
if self.edge_type == EdgeType.UNIFORM:
self.get_connection_mask = lambda : np.ones((self.n_spins,self.n_spins))
elif self.edge_type == EdgeType.DISCRETE:
def get_connection_mask():
mask = 2. * np.random.randint(2, size=(self.n_spins, self.n_spins)) - 1.
mask = np.tril(mask) + np.triu(mask.T, 1)
return mask
self.get_connection_mask = get_connection_mask
elif self.edge_type == EdgeType.RANDOM:
def get_connection_mask():
mask = 2.*np.random.rand(self.n_spins,self.n_spins)-1
mask = np.tril(mask) + np.triu(mask.T, 1)
return mask
self.get_connection_mask = get_connection_mask
else:
raise NotImplementedError()
def get(self, with_padding=False):
k = np.clip(int(np.random.normal(*self.d_node)),0,self.n_spins)
g = nx.random_regular_graph(k, self.n_spins)
adj = np.multiply(nx.to_numpy_array(g), self.get_connection_mask())
if not self.biased:
# No self-connections (this modifies adj in-place).
np.fill_diagonal(adj, 0)
return self.pad_matrix(adj) if with_padding else adj
class RandomWattsStrogatzGraphGenerator(GraphGenerator):
def __init__(self, n_spins=20, k_neighbours=[2,0], edge_type=EdgeType.DISCRETE, biased=False):
super().__init__(n_spins, edge_type, biased)
if type(k_neighbours) not in [list,tuple]:
k_neighbours = [k_neighbours, 0]
assert len(k_neighbours)==2, "k_neighbours must have length 2"
self.k_neighbours = k_neighbours
if self.edge_type == EdgeType.UNIFORM:
self.get_connection_mask = lambda: np.ones((self.n_spins, self.n_spins))
elif self.edge_type == EdgeType.DISCRETE:
def get_connection_mask():
mask = 2. * np.random.randint(2, size=(self.n_spins, self.n_spins)) - 1.
mask = np.tril(mask) + np.triu(mask.T, 1)
return mask
self.get_connection_mask = get_connection_mask
elif self.edge_type == EdgeType.RANDOM:
def get_connection_mask():
mask = 2. * np.random.rand(self.n_spins, self.n_spins) - 1
mask = np.tril(mask) + np.triu(mask.T, 1)
return mask
self.get_connection_mask = get_connection_mask
else:
raise NotImplementedError()
def get(self, with_padding=False):
k = np.clip(int(np.random.normal(*self.k_neighbours)),0,self.n_spins)
g = nx.watts_strogatz_graph(self.n_spins, k, 0)
adj = np.multiply(nx.to_numpy_array(g), self.get_connection_mask())
if not self.biased:
# No self-connections (this modifies adj in-place).
np.fill_diagonal(adj, 0)
return self.pad_matrix(adj) if with_padding else adj
################
# Known graphs #
################
class SingleGraphGenerator(GraphGenerator):
def __init__(self, matrix, bias=None):
n_spins = matrix.shape[0]
if np.isin(matrix,[0,1]).all():
edge_type=EdgeType.UNIFORM
elif np.isin(matrix,[0,-1,1]).all():
edge_type=EdgeType.DISCRETE
else:
edge_type = EdgeType.RANDOM
super().__init__(n_spins, edge_type, bias is not None)
self.matrix = matrix
self.bias = bias
def get(self, with_padding=False):
m = self.pad_matrix(self.matrix) if with_padding else self.matrix
if self.biased:
b = self.pad_bias(self.bias) if with_padding else self.bias
return m, b
else:
return m
class SetGraphGenerator(GraphGenerator):
def __init__(self, matrices, biases=None, ordered=False):
if len(set([m.shape[0]-1 for m in matrices]))==1:
n_spins = matrices[0].shape[0]
else:
raise NotImplementedError("All graphs in SetGraphGenerator must have the same dimension.")
if all([np.isin(m,[0,1]).all() for m in matrices]):
edge_type=EdgeType.UNIFORM
elif all([np.isin(m,[0,-1,1]).all() for m in matrices]):
edge_type=EdgeType.DISCRETE
else:
edge_type = EdgeType.RANDOM
super().__init__(n_spins, edge_type, biases is not None)
if not self.biased:
self.graphs = matrices
else:
assert len(matrices)==len(biases), "Must pass through the same number of matrices and biases."
assert all([len(b)==self.n_spins+1 for b in biases]), "All biases and must have the same dimension as the matrices."
self.graphs = list(zip(matrices, biases))
self.ordered = ordered
if self.ordered:
self.i = 0
def get(self, with_padding=False):
if self.ordered:
m = self.graphs[self.i]
self.i = (self.i + 1)%len(self.graphs)
else:
m = random.sample(self.graphs, k=1)[0]
return self.pad_matrix(m) if with_padding else m
class PerturbedGraphGenerator(GraphGenerator):
def __init__(self, matrices, perturb_mean=0, perturb_std=0.01, biases=None, ordered=False):
if type(matrices) != list:
matrices = list(matrices)
if biases is not None:
if type(biases) != list:
biases = list(biases)
if len(set([m.shape[0] - 1 for m in matrices])) == 1:
n_spins = matrices[0].shape[0]
else:
raise NotImplementedError("All graphs passed to PerturbedGraphGenerator must have the same dimension.")
super().__init__(n_spins, EdgeType.RANDOM, biases is not None)
self.perturb_mean = perturb_mean
self.perturb_std = perturb_std
if not self.biased:
self.graphs = matrices
else:
raise NotImplementedError("Not implemented PerturbedGraphGenerator for biased graphs yet.")
self.ordered = ordered
if self.ordered:
self.i = 0
def get(self, with_padding=False):
if self.ordered:
m = self.graphs[self.i]
self.i = (self.i + 1)%len(self.graphs)
if self.biased:
m, b = m
else:
if not self.biased:
m = random.sample(self.graphs, k=1)[0]
else:
m, b = random.sample(self.graphs, k=1)[0]
# Sample noise.
noise = np.random.normal(self.perturb_mean, self.perturb_std, size=m.shape)
# Set noise to 0 for non-edges in the adjacency matrix.
np.putmask(noise, m == 0, 0)
# Ensure noise is symettric.
noise = np.tril(noise) + np.triu(noise.T, 1)
m = m + noise
return self.pad_matrix(m) if with_padding else m
class HistoryBuffer():
def __init__(self):
self.buffer = {}
self.current_action_hist = set([])
self.current_action_hist_len = 0
def update(self, action):
new_action_hist = self.current_action_hist.copy()
if action in self.current_action_hist:
new_action_hist.remove(action)
self.current_action_hist_len -= 1
else:
new_action_hist.add(action)
self.current_action_hist_len += 1
try:
list_of_states = self.buffer[self.current_action_hist_len]
if new_action_hist in list_of_states:
self.current_action_hist = new_action_hist
return False
except KeyError:
list_of_states = []
list_of_states.append(new_action_hist)
self.current_action_hist = new_action_hist
self.buffer[self.current_action_hist_len] = list_of_states
return True
|
[
"[email protected]"
] | |
362dea07929b2f462d1cd31ccaddf63ea6516831
|
ca909d90f466f4b2b8a637046692a07ff5084548
|
/tools/data_utils.py
|
43cbaefaf1a09f7236e947214de68959a691886b
|
[] |
no_license
|
simsimiSION/simVoice
|
da48d7ba9f48ddb257d67c14d3db55a35e34029a
|
eac905051a3794ce820b47487794a21aa2988d1a
|
refs/heads/master
| 2020-04-29T06:40:36.864920 | 2019-03-16T04:15:17 | 2019-03-16T04:15:17 | 175,924,977 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,913 |
py
|
#!/usr/bin/env python
# encoding: utf-8
# @author: simsimi
# @contact: dail:911
# @software: pycharm-anaconda3
# @file: data_utils.py
from tqdm import tqdm
from pprint import pprint
import pandas as pd
import numpy as np
import librosa
import pickle
# ----------------------csv 文件处理--------------------------#
# 整合csv文件
def csv_concat(csv_path_list, save_path):
if len(csv_path_list) == 0:
raise Exception(' csv地址列表不能为空 ')
else:
for csv_path_inner in csv_path_list:
csv = pd.read_csv(csv_path_inner, index_col=None)
csv.to_csv(save_path, index=False, mode='a+',)
# csv文件转化为pickle文件
def csv_to_pickle(csv_path, pickle_path):
csv = pd.read_csv(csv_path).values
labels = csv[:,0]
labels = [int(i) for i in labels]
paths = csv[:,1]
for path, label in tqdm(zip(voice_path, labels)):
complete, mfcc = self.voice_mfcc(path)
if complete == False:
continue
elif isinstance(label, int):
aishell_mfcc.append(mfcc)
aishell_label.append(label)
aishell_indices.append(index)
index += 1
with open(pickle_path, 'wb') as mfcc_pickle:
pickle.dump(aishell_mfcc, mfcc_pickle)
pickle.dump(aishell_label, mfcc_pickle)
pickle.dump(aishell_indices, mfcc_pickle)
# ----------------------pickle 文件处理--------------------------#
# 读取保存的pickle文件
def load_mfcc_pickle(pickle_path):
with open(pickle_path, 'rb') as mfcc_pickle:
aishell_mfcc = pickle.load(mfcc_pickle)
aishell_label = pickle.load(mfcc_pickle)
aishell_indices = pickle.load(mfcc_pickle)
return aishell_mfcc, aishell_label, aishell_indices
# 读取format pickle文件
def load_format_pickle(pickle_path):
with open(pickle_path, 'rb') as mfcc_pickle:
aishell = pickle.load(mfcc_pickle)
return aishell
# pickle format
def pickle_format(pickle_path, save_path):
aishell_mfcc, aishell_label, aishell_indices = load_mfcc_pickle(pickle_path)
indice_dict = {}
for mfcc, label in tqdm(zip(aishell_mfcc, aishell_label)):
if label not in indice_dict.keys():
indice_dict[label] = [mfcc]
else:
indice_dict[label].append(mfcc)
with open(save_path, 'wb') as mfcc_pickle:
pickle.dump(indice_dict, mfcc_pickle)
# ----------------------dataset 处理--------------------------#
# 数据采样
def sample_voices(dataset, people_pre_batch, voices_pre_person):
"""
获得的mfcc数据未经过prehandle
:param dataset:
:param people_pre_batch:
:param voices_pre_person:
:return:
"""
nrof_voices = people_pre_batch * voices_pre_person
# 对类进行采样
nrof_classes = len(dataset)
class_indices = np.arange(nrof_classes)
np.random.shuffle(class_indices)
i = 0
voices_arrays = []
num_pre_class = []
sampled_class_indices = []
# 音频采样
while len(voices_arrays) < nrof_voices:
# 获取文件索引
class_index = class_indices[i]
# 获取类中的音频数量,索引
nrof_voices_in_class = len(dataset[class_index])
voices_indices = np.arange(nrof_voices_in_class)
np.random.shuffle(voices_indices)
# 获取数据索引
nrof_voices_from_class = min(nrof_voices_in_class,
voices_pre_person,
nrof_voices-len(voices_arrays))
idx = voices_indices[:nrof_voices_from_class]
# 获取batch音频
voices_arrays_for_class = [mfcc_prehandle(dataset[class_index][j]) for j in idx]
# 获取label
sampled_class_indices += [class_index] * nrof_voices_from_class
# 获取mfcc
voices_arrays += voices_arrays_for_class
num_pre_class.append(nrof_voices_from_class)
# ^-^
i += 1
return voices_arrays, sampled_class_indices, num_pre_class
def selcet_triplet(embeddings, nrof_voices_per_class, voice_arrays, people_per_batch, alpha):
"""
Select the triplets for training
:param embeddings:
:param nrof_voices_per_class:
:param voice_arrays:
:param people_per_batch:
:param alpha:
:return:
"""
trip_idx = 0
emb_start_idx = 0
num_trips = 0
triplets = []
for i in range(people_per_batch):
# 获取当前类的音频个数
nrof_voices = int(nrof_voices_per_class[i])
for j in range(1, nrof_voices):
a_idx = emb_start_idx + j - 1
# 距离
neg_dists_sqr = np.sum(np.square(embeddings[a_idx]-embeddings), 1)
for pair in range(j, nrof_voices):
p_idx = emb_start_idx + pair
# 正向距离
pos_dist_sqr = np.sum(np.square(embeddings[a_idx]-embeddings[p_idx]))
neg_dists_sqr[emb_start_idx:emb_start_idx+nrof_voices] = np.NaN
# 负类的索引
all_neg = np.where(neg_dists_sqr-pos_dist_sqr<alpha)[0]
nrof_random_negs = all_neg.shape[0]
if nrof_random_negs > 0:
rnd_idx = np.random.randint(nrof_random_negs)
n_idx = all_neg[rnd_idx]
triplets.append((voice_arrays[a_idx], voice_arrays[p_idx], voice_arrays[n_idx]))
trip_idx += 1
num_trips += 1
emb_start_idx += nrof_voices
np.random.shuffle(triplets)
return triplets, num_trips, len(triplets)
# ----------------------mfcc 处理--------------------------#
# 读取音频文件并通过mfcc转化
def voice_to_mfcc(wav):
try:
wav, sr = librosa.load(wav, mono=True)
mfcc = np.transpose(librosa.feature.mfcc(wav, sr), [1, 0])
return True, mfcc
except:
print(str(wav) + ' 加载错误')
return False, None
# mfcc 预处理
def mfcc_prehandle(mfcc, MFCC_STRIDE=256):
if isinstance(mfcc, np.ndarray):
length = mfcc.shape[0]
else:
raise Exception('mfcc文件格式错误')
if length > MFCC_STRIDE:
random_scope = length - MFCC_STRIDE
random_index = np.random.randint(0, random_scope)
pre_data = mfcc[random_index:random_index + MFCC_STRIDE, :]
else:
misszero_number = MFCC_STRIDE - length
pre_data = np.zeros((MFCC_STRIDE, mfcc.shape[1]))
pre_data[misszero_number:, :] = mfcc
pre_data = np.expand_dims(pre_data, -1)
return pre_data
if __name__ == '__main__':
pickle_path = '../resource/aishell_mfcc.pickle'
save_path = '../resource/aishell_triplet_mfcc.pickle'
aishell = load_format_pickle(save_path)
a, b, c = sample_voices(aishell, 10, 20)
embeddings = np.random()
triplets, num_trips, triplets_len = selcet_triplet_with_label(embeddings, b, c, a, 10, 0.2)
|
[
"[email protected]"
] | |
65c180e0455a95187b16075fcc7b7267cae1394e
|
06209bdd16094babef1fd05345f05082fc5517c8
|
/finance/migrations/0004_auto_20200330_1317.py
|
015bd0b4aa7d89511813d9136b9d0593a6712106
|
[] |
no_license
|
Bmawira/Bmawira
|
7a365c8b22787ca509485eb3992a3d6576eb7a8c
|
8799dac69782f3570860ab92abbccb66fe557fca
|
refs/heads/main
| 2023-04-01T13:03:08.394608 | 2021-04-15T10:31:45 | 2021-04-15T10:31:45 | 358,217,352 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,754 |
py
|
# Generated by Django 2.2.6 on 2020-03-30 13:17
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('iamadmin', '0003_auto_20200312_1510'),
('pm', '0001_initial'),
('hr', '0027_auto_20200328_0702'),
('finance', '0003_auto_20200328_0451'),
]
operations = [
migrations.RemoveField(
model_name='payslip',
name='extra_hours',
),
migrations.RemoveField(
model_name='payslip',
name='normal_hours',
),
migrations.CreateModel(
name='PettyCash',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('amount', models.FloatField(default=0.0)),
('spent', models.FloatField(blank=True, default=0.0)),
('date_disbursed', models.DateTimeField()),
('company', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, related_name='petty_cash', to='iamadmin.Company')),
('recorded_by', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, related_name='cash_by', to='hr.Employee')),
('workstation', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, related_name='site_cash', to='pm.Workstation')),
],
),
migrations.CreateModel(
name='Expenditure',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('amount', models.FloatField(default=0.0)),
('change', models.FloatField(blank=True, default=0.0)),
('expense_date', models.DateTimeField()),
('description', models.TextField()),
('receipt_scan', models.ImageField(blank=True, upload_to='expense')),
('cash_lot', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, related_name='expense_cash', to='finance.PettyCash')),
('company', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, related_name='c_expenses', to='iamadmin.Company')),
('expense_by', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, related_name='expense_by', to='hr.Employee')),
('recorded_by', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, related_name='expense_rec_by', to='hr.Employee')),
('workstation', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, related_name='site_expenses', to='pm.Workstation')),
],
),
]
|
[
"[email protected]"
] | |
2a7030555a4fca5f3679e7876b8ba8659e260d02
|
b6df4e1f96acb130b73e2fb9221d09c5dc18122f
|
/file.py
|
5c90e3c1e060ecf4c98c99762d2aba7eed08bbcf
|
[] |
no_license
|
ATHIRAP123/iedc
|
bd7924fa7f1f3af258d254a9a42e79cd13dccdd6
|
89bed9d8112d91d113db58284ed536623ceea8c1
|
refs/heads/master
| 2020-04-21T11:52:35.427565 | 2019-02-08T00:32:55 | 2019-02-08T00:32:55 | 169,542,472 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 79 |
py
|
myfile=open("myfile.txt","w")
myfile.write("india is my country")
myfile.close
|
[
"[email protected]"
] | |
5f0e9bb8690706e78de7f9f2e079d922a650337a
|
14d0e3f1498c8b30621cb2c1e8d5c9e883a3fdd4
|
/setup.py
|
b1e7fbe78bd648b48028638883d85256c33b0ff5
|
[
"BSD-2-Clause"
] |
permissive
|
tokibito/django-csvutils
|
964d7bd3ab31bfba74ff3a9ac676b2feae579220
|
85d7e8368e8744372a75572102961067152157c8
|
refs/heads/master
| 2021-01-25T10:00:41.828427 | 2010-10-10T15:08:17 | 2010-10-10T15:08:17 | 1,132,363 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 693 |
py
|
#!/usr/bin/env python
#:coding=utf-8:
from setuptools import setup, find_packages
setup (
name='django-csvutils',
version='0.1',
description='CSV utilities for Django',
author='Shinya Okano',
author_email='[email protected]',
url='http://bitbucket.org/beproud/django-csvutils/',
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Plugins',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
],
packages=["csvutils"],
#test_suite='tests.main',
)
|
[
"[email protected]"
] | |
69039729b5b60b87a8fe2d3f1259187e85c16226
|
5bcc18af2ffbf60e570900e78b9ce4c3dcbc9eb7
|
/Async in Django - aiohttp/mymoviedb/views.py
|
06f0ab0c897379a3bebfc7eca326a91f6ffe3b5a
|
[] |
no_license
|
Mystique-orca/Async-operations-Django
|
4fa68f59efaeeda1ea8e2a97e9ae3f0404b2c1c1
|
c4c581df24e57b9dcd5147734df33f43f56410d4
|
refs/heads/main
| 2023-02-15T09:10:40.718414 | 2021-01-12T05:43:48 | 2021-01-12T05:43:48 | 328,884,148 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 273 |
py
|
from django.apps import apps
from .utils import database_sync_to_async
class ModelMixin:
model = None
async def get_queryset(self, **kwargs):
model = apps.get_model(self.model)
return await database_sync_to_async(model.objects.filter)(**kwargs)
|
[
"[email protected]"
] | |
5fcbb0aa1a9c1d5675f151f2e7c7d65353f0821f
|
b46935a9c04e3d675723cb429ca58eaccc4682e3
|
/PrimeChecker.py
|
4450babd65a4505154aaa10a934df1e35708e114
|
[] |
no_license
|
dhruvcheemakurti/PrimeChecker
|
b7fb3a2390b7afefa5ce413c2926a11b3e29001f
|
c2f2277cac508284acf006f96d4c1bd2f69020d7
|
refs/heads/master
| 2020-12-20T22:53:20.949624 | 2020-01-25T21:33:50 | 2020-01-25T21:33:50 | 236,232,608 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 176 |
py
|
factor = 0
number = int(input())
for i in range (1,number):
if number%i == 0:
factor = factor + 1
if counter > 2:
print("not prime")
else:
print("prime")
|
[
"[email protected]"
] | |
328563c64eb4c15a16bd1e55a13109a1b9a461ea
|
dd116fe1e94191749ab7a9b00be25bfd88641d82
|
/cairis/cairis/data/AttackerDAO.py
|
01d297cfe2a83a44767b9013e87d4ce371e9615c
|
[
"Apache-2.0"
] |
permissive
|
RobinQuetin/CAIRIS-web
|
fbad99327707ea3b995bdfb4841a83695989e011
|
4a6822db654fecb05a09689c8ba59a4b1255c0fc
|
HEAD
| 2018-12-28T10:53:00.595152 | 2015-06-20T16:53:39 | 2015-06-20T16:53:39 | 33,935,403 | 0 | 3 | null | null | null | null |
UTF-8
|
Python
| false | false | 16,226 |
py
|
import ARM
from AttackerEnvironmentProperties import AttackerEnvironmentProperties
from CairisHTTPError import ARMHTTPError, ObjectNotFoundHTTPError, MalformedJSONHTTPError, MissingParameterHTTPError, \
OverwriteNotAllowedHTTPError
from Attacker import Attacker
from AttackerParameters import AttackerParameters
from ValueType import ValueType
from ValueTypeParameters import ValueTypeParameters
from data.CairisDAO import CairisDAO
from tools.JsonConverter import json_serialize, json_deserialize
from tools.ModelDefinitions import AttackerModel, AttackerEnvironmentPropertiesModel
from tools.SessionValidator import check_required_keys
__author__ = 'Robin Quetin'
class AttackerDAO(CairisDAO):
def __init__(self, session_id):
"""
:raise CairisHTTPError:
"""
CairisDAO.__init__(self, session_id)
def get_attackers(self, constraint_id=-1, simplify=True):
"""
:rtype: dict[str,Attacker]
:return
:raise ARMHTTPError:
"""
try:
attackers = self.db_proxy.getAttackers(constraint_id)
except ARM.DatabaseProxyException as ex:
self.close()
raise ARMHTTPError(ex)
except ARM.ARMException as ex:
self.close()
raise ARMHTTPError(ex)
if simplify:
for key, value in attackers.items():
attackers[key] = self.simplify(value)
return attackers
def get_attacker_by_name(self, name, simplify=True):
"""
:rtype: Attacker
:raise ObjectNotFoundHTTPError:
"""
attackers = self.get_attackers(simplify=simplify)
found_attacker = attackers.get(name, None)
if found_attacker is None:
self.close()
raise ObjectNotFoundHTTPError('The provided attacker name')
return found_attacker
def add_attacker(self, attacker):
"""
:type attacker: Attacker
:rtype: int
:raise ARMHTTPError:
"""
attacker_params = AttackerParameters(
name=attacker.theName,
desc=attacker.theDescription,
image=attacker.theImage,
tags=attacker.theTags,
properties=attacker.theEnvironmentProperties
)
try:
if not self.check_existing_attacker(attacker.theName):
new_id = self.db_proxy.addAttacker(attacker_params)
return new_id
else:
self.close()
raise OverwriteNotAllowedHTTPError(obj_name=attacker.theName)
except ARM.DatabaseProxyException as ex:
self.close()
raise ARMHTTPError(ex)
except ARM.ARMException as ex:
self.close()
raise ARMHTTPError(ex)
def update_attacker(self, attacker, name):
found_attacker = self.get_attacker_by_name(name, simplify=False)
attacker_params = AttackerParameters(
name=attacker.theName,
desc=attacker.theDescription,
image=attacker.theImage,
tags=attacker.theTags,
properties=attacker.theEnvironmentProperties
)
attacker_params.setId(found_attacker.theId)
try:
self.db_proxy.updateAttacker(attacker_params)
except ARM.DatabaseProxyException as ex:
self.close()
raise ARMHTTPError(ex)
except ARM.ARMException as ex:
self.close()
raise ARMHTTPError(ex)
def delete_attacker(self, name):
found_attacker = self.get_attacker_by_name(name, simplify=False)
attacker_id = found_attacker.theId
try:
self.db_proxy.deleteAttacker(attacker_id)
except ARM.DatabaseProxyException as ex:
self.close()
raise ARMHTTPError(ex)
except ARM.ARMException as ex:
self.close()
raise ARMHTTPError(ex)
def check_existing_attacker(self, name):
"""
:rtype: bool
:raise: ARMHTTPError
"""
try:
self.db_proxy.nameCheck(name, 'attacker')
return False
except ARM.DatabaseProxyException as ex:
if str(ex.value).find('already exists') > -1:
return True
self.close()
raise ARMHTTPError(ex)
except ARM.ARMException as ex:
if str(ex.value).find('already exists') > -1:
return True
self.close()
raise ARMHTTPError(ex)
# region Capabilities
def get_attacker_capabilities(self, environment_name=''):
"""
:rtype: list[ValueType]
:raise: ARMHTTPError
"""
try:
attacker_capabilities = self.db_proxy.getValueTypes('capability', environment_name)
return attacker_capabilities
except ARM.DatabaseProxyException as ex:
self.close()
raise ARMHTTPError(ex)
except ARM.ARMException as ex:
self.close()
raise ARMHTTPError(ex)
def get_attacker_capability_by_name(self, name, environment_name=''):
"""
:rtype : ValueType
"""
found_capability = None
attacker_capabilities = self.get_attacker_capabilities(environment_name=environment_name)
if attacker_capabilities is None or len(attacker_capabilities) < 1:
self.close()
raise ObjectNotFoundHTTPError('Attacker capabilities')
idx = 0
while found_capability is None and idx < len(attacker_capabilities):
if attacker_capabilities[idx].theName == name:
found_capability = attacker_capabilities[idx]
idx += 1
if found_capability is None:
self.close()
raise ObjectNotFoundHTTPError('The provided attacker capability name')
return found_capability
def add_attacker_capability(self, attacker_capability, environment_name=''):
"""
:rtype : int
:raises
ARMHTTPError:
OverwriteNotAllowedHTTPError:
"""
assert isinstance(attacker_capability, ValueType)
type_exists = self.check_existing_attacker_capability(attacker_capability.theName, environment_name=environment_name)
if type_exists:
self.close()
raise OverwriteNotAllowedHTTPError(obj_name='The attacker capability')
params = ValueTypeParameters(
vtName=attacker_capability.theName,
vtDesc=attacker_capability.theDescription,
vType='capability',
envName=environment_name,
vtScore=attacker_capability.theScore,
vtRat=attacker_capability.theRationale
)
try:
return self.db_proxy.addValueType(params)
except ARM.DatabaseProxyException as ex:
self.close()
raise ARMHTTPError(ex)
except ARM.ARMException as ex:
self.close()
raise ARMHTTPError(ex)
def update_attacker_capability(self, attacker_capability, name, environment_name=''):
assert isinstance(attacker_capability, ValueType)
found_capability = self.get_attacker_capability_by_name(name, environment_name)
params = ValueTypeParameters(
vtName=attacker_capability.theName,
vtDesc=attacker_capability.theDescription,
vType='capability',
envName=environment_name,
vtScore=attacker_capability.theScore,
vtRat=attacker_capability.theRationale
)
params.setId(found_capability.theId)
try:
self.db_proxy.updateValueType(params)
except ARM.DatabaseProxyException as ex:
self.close()
raise ARMHTTPError(ex)
except ARM.ARMException as ex:
self.close()
raise ARMHTTPError(ex)
def delete_attacker_capability(self, name, environment_name=''):
"""
:raise ARMHTTPError:
"""
found_capability = self.get_attacker_capability_by_name(name, environment_name)
try:
self.db_proxy.deleteAssetType(found_capability.theId)
except ARM.DatabaseProxyException as ex:
self.close()
raise ARMHTTPError(ex)
except ARM.ARMException as ex:
self.close()
raise ARMHTTPError(ex)
def check_existing_attacker_capability(self, name, environment_name):
try:
self.get_attacker_capability_by_name(name, environment_name)
return True
except ObjectNotFoundHTTPError:
self.db_proxy.reconnect(session_id=self.session_id)
return False
# endregion
# region Motivations
def get_attacker_motivations(self, environment_name=''):
"""
:rtype : list[ValueType]
:raise ARMHTTPError:
"""
try:
attacker_motivations = self.db_proxy.getValueTypes('motivation', environment_name)
return attacker_motivations
except ARM.DatabaseProxyException as ex:
self.close()
raise ARMHTTPError(ex)
except ARM.ARMException as ex:
self.close()
raise ARMHTTPError(ex)
def get_attacker_motivation_by_name(self, name, environment_name=''):
"""
:rtype: ValueType
:raise ObjectNotFoundHTTPError:
"""
found_motivation = None
attacker_motivations = self.get_attacker_motivations(environment_name=environment_name)
if attacker_motivations is None or len(attacker_motivations) < 1:
self.close()
raise ObjectNotFoundHTTPError('Attacker motivations')
idx = 0
while found_motivation is None and idx < len(attacker_motivations):
if attacker_motivations[idx].theName == name:
found_motivation = attacker_motivations[idx]
idx += 1
if found_motivation is None:
self.close()
raise ObjectNotFoundHTTPError('The provided attacker motivation name')
return found_motivation
def add_attacker_motivation(self, attacker_motivation, environment_name=''):
"""
:rtype: int
:raises
CairisHTTPError:
OverwriteNotAllowedHTTPError:
"""
assert isinstance(attacker_motivation, ValueType)
type_exists = self.check_existing_attacker_motivation(attacker_motivation.theName, environment_name=environment_name)
if type_exists:
self.close()
raise OverwriteNotAllowedHTTPError(obj_name='The attacker motivation')
params = ValueTypeParameters(
vtName=attacker_motivation.theName,
vtDesc=attacker_motivation.theDescription,
vType='motivation',
envName=environment_name,
vtScore=attacker_motivation.theScore,
vtRat=attacker_motivation.theRationale
)
try:
return self.db_proxy.addValueType(params)
except ARM.DatabaseProxyException as ex:
self.close()
raise ARMHTTPError(ex)
except ARM.ARMException as ex:
self.close()
raise ARMHTTPError(ex)
def update_attacker_motivation(self, attacker_motivation, name, environment_name=''):
"""
:return:
:raise: ARMHTTPError:
"""
assert isinstance(attacker_motivation, ValueType)
found_motivation = self.get_attacker_motivation_by_name(name, environment_name)
params = ValueTypeParameters(
vtName=attacker_motivation.theName,
vtDesc=attacker_motivation.theDescription,
vType='motivation',
envName=environment_name,
vtScore=attacker_motivation.theScore,
vtRat=attacker_motivation.theRationale
)
params.setId(found_motivation.theId)
try:
self.db_proxy.updateValueType(params)
except ARM.DatabaseProxyException as ex:
self.close()
raise ARMHTTPError(ex)
except ARM.ARMException as ex:
self.close()
raise ARMHTTPError(ex)
def delete_attacker_motivation(self, name, environment_name=''):
"""
:return:
:raise: ARMHTTPError:
"""
found_motivation = self.get_attacker_motivation_by_name(name, environment_name)
try:
self.db_proxy.deleteAssetType(found_motivation.theId)
except ARM.DatabaseProxyException as ex:
self.close()
raise ARMHTTPError(ex)
except ARM.ARMException as ex:
self.close()
raise ARMHTTPError(ex)
def check_existing_attacker_motivation(self, name, environment_name):
"""
:rtype: bool
:raise: ARMHTTPError:
"""
try:
self.get_attacker_motivation_by_name(name, environment_name)
return True
except ObjectNotFoundHTTPError:
self.db_proxy.reconnect(session_id=self.session_id)
return False
# endregion
def from_json(self, request):
"""
:rtype : Attacker
:raise MalformedJSONHTTPError:
"""
json = request.get_json(silent=True)
if json is False or json is None:
self.close()
raise MalformedJSONHTTPError(data=request.get_data())
json_dict = json['object']
check_required_keys(json_dict, AttackerModel.required)
json_dict['__python_obj__'] = Attacker.__module__ + '.' + Attacker.__name__
attacker_props = self.convert_props(fake_props=json_dict['theEnvironmentProperties'])
json_dict['theEnvironmentProperties'] = []
attacker = json_serialize(json_dict)
attacker = json_deserialize(attacker)
attacker.theEnvironmentProperties = attacker_props
if not isinstance(attacker, Attacker):
self.close()
raise MalformedJSONHTTPError(data=request.get_data())
else:
return attacker
def simplify(self, obj):
assert isinstance(obj, Attacker)
obj.theEnvironmentDictionary = {}
obj.likelihoodLookup = {}
obj.theAttackerPropertyDictionary = {}
delattr(obj, 'theEnvironmentDictionary')
delattr(obj, 'likelihoodLookup')
delattr(obj, 'theAttackerPropertyDictionary')
obj.theEnvironmentProperties = self.convert_props(real_props=obj.theEnvironmentProperties)
return obj
def convert_props(self, real_props=None, fake_props=None):
new_props = []
if real_props is not None:
if len(real_props) > 0:
for real_prop in real_props:
assert isinstance(real_prop, AttackerEnvironmentProperties)
capabilities = []
for capability in real_prop.theCapabilities:
if len(capability) == 2:
capabilities.append({
'name': capability[0],
'value': capability[1]
})
real_prop.theCapabilities = capabilities
new_props.append(real_prop)
elif fake_props is not None:
if len(fake_props) > 0:
for fake_prop in fake_props:
check_required_keys(fake_prop, AttackerEnvironmentPropertiesModel.required)
cap_list = []
assert isinstance(cap_list, list)
for cap in fake_prop['theCapabilities']:
cap_list.append((cap['name'], cap['value']))
new_prop = AttackerEnvironmentProperties(
environmentName=fake_prop['theEnvironmentName'],
roles=fake_prop['theRoles'],
motives=fake_prop['theMotives'],
capabilities=cap_list
)
new_props.append(new_prop)
else:
self.close()
raise MissingParameterHTTPError(param_names=['real_props', 'fake_props'])
return new_props
|
[
"[email protected]"
] | |
a6055a3c1a584f66e8f90f1cbee22eb42c315b03
|
c356b0f0d380b3dee5bf38334e6f162875ea23d9
|
/05_scrapy/python_01_ernesto/python_01_ernesto/settings.py
|
39df3f6086736d512a0f3147dd494f0d157bcbae
|
[
"MIT"
] |
permissive
|
AlexaVane/Velasco-Yepez-Andres-David-Python
|
d29bd7f22a1dc69e3e106fd75811aecd129fe23a
|
0c017d6e5f169f31207ddec5ceffc8dd82d327eb
|
refs/heads/master
| 2020-08-23T15:16:54.268707 | 2019-02-09T08:52:23 | 2019-02-09T08:52:23 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,187 |
py
|
# -*- coding: utf-8 -*-
# Scrapy settings for python_01_ernesto project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://doc.scrapy.org/en/latest/topics/settings.html
# https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'python_01_ernesto'
SPIDER_MODULES = ['python_01_ernesto.spiders']
NEWSPIDER_MODULE = 'python_01_ernesto.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'python_01_ernesto (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'python_01_ernesto.middlewares.Python01ErnestoSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'python_01_ernesto.middlewares.Python01ErnestoDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See https://doc.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'python_01_ernesto.pipelines.Python01ErnestoPipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
|
[
"[email protected]"
] | |
4583dbc64958c40d7357f148adb9f69f16cf7c8e
|
bdc4a92558539abb18196886f9a56f6086f62d8e
|
/HW-2/practical_SGD.py
|
ad208d7d102186b420810956a2ebfd4e928efbf8
|
[] |
no_license
|
caglademirhan/FIZ437E-Homework-2
|
8987709f6d8b84e29778a3792ea074be7cc5dd4c
|
d29f972a371ac9992e4d054731064d840f7a382e
|
refs/heads/main
| 2023-09-05T04:53:11.730176 | 2021-11-14T19:32:37 | 2021-11-14T19:32:37 | 425,187,922 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,503 |
py
|
import torch
import torch.nn as nn
import torchvision.transforms as transforms
import torchvision.datasets as dsets
train_data = dsets.MNIST(root='./Data', train=True, transform=transforms.ToTensor(), download=True)
test_data = dsets.MNIST(root='./Data', train=False, transform=transforms.ToTensor())
batch = 100
iters = 3000
epochs = int(iters / (len(train_data) / batch))
train_loader = torch.utils.data.DataLoader(dataset=train_data, batch_size=batch, shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset=test_data, batch_size=batch, shuffle=False)
class LRModel(nn.Module):
def __init__(self, input_size, num_classes):
super(LRModel, self).__init__()
self.linear = nn.Linear(input_dim, output_dim)
def forward(self, x):
return self.linear(x)
input_dim = 28**2 ; output_dim = 10
model = LRModel(input_dim, output_dim)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model.to(device)
criterio = nn.CrossEntropyLoss()
L_R = 1e-3
optimizer_regu = torch.optim.SGD(model.parameters(), lr=L_R)
print("Standart SGD: ")
iter = 0
for epoch in range(epochs):
for i, (images, labels) in enumerate(train_loader):
images = images.view(-1, 28*28).requires_grad_().to(device)
labels = labels.to(device)
optimizer_regu.zero_grad()
outputs = model(images)
loss = criterio(outputs, labels)
loss.backward()
optimizer_regu.step()
iter = iter + 1
if iter % 500 == 0:
correct = 0
total = 0
for images, labels in test_loader:
images = images.view(-1, 28*28).to(device)
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total = total + labels.size(0)
if torch.cuda.is_available():
correct = correct + (predicted.cpu() == labels.cpu()).sum()
else:
correct = correct + (predicted == labels).sum()
accuracy = 100 * correct.item() / total
print('Iter: {}. L: {}. Ac: {}'.format(iter, loss.item(), accuracy)) # info
print("SGD with weight decay : ")
optimizer_w_regu = torch.optim.SGD(model.parameters(), lr=L_R, weight_decay=1e-5)
iter = 0
for epoch in range(epochs):
for i, (images, labels) in enumerate(train_loader):
images = images.view(-1, 28*28).requires_grad_().to(device)
labels = labels.to(device)
optimizer_w_regu.zero_grad()
outputs = model(images)
loss = criterio(outputs, labels)
loss.backward()
optimizer_w_regu.step()
iter = iter + 1
if iter % 500 == 0:
correct = 0
total = 0
for images, labels in test_loader:
images = images.view(-1, 28*28).to(device)
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total = total + labels.size(0)
if torch.cuda.is_available():
correct = correct + (predicted.cpu() == labels.cpu()).sum()
else:
correct = correct + (predicted == labels).sum()
accuracy = 100 * correct.item() / total
print('Iter: {}. L: {}. Ac: {}'.format(iter, loss.item(), accuracy)) # info
|
[
"[email protected]"
] | |
44595c484df47c4d12aacab65f4a7b48a3c05a69
|
40e37e4e0754eaa7877603801f1caa9bec4942a5
|
/app/core/controller/ControllerEstatus.py
|
d9e36056715d1b75573d4d10e9f62de3137fa48c
|
[
"MIT"
] |
permissive
|
DesarrolloActivoEAM/activo-django-docker-postgres
|
c96b12739ed341d914eb9ee2ec19bed642ac6e48
|
1041fd74fffd92a2c136eef3eb96ed269292f7c6
|
refs/heads/main
| 2023-06-27T17:08:17.421249 | 2021-08-04T22:12:39 | 2021-08-04T22:12:39 | 392,806,139 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 984 |
py
|
from ..serializers import EstatusSerializer
from ..models import EstatusUsuario
class ControllerEstatus:
def crearestatus(request):
datosEstatus = request.data
estatusNuevo = EstatusUsuario()
try:
estatusNuevo.activo = datosEstatus['activo']
except Exception:
return {"estatus":"Error"}
estatusNuevo.save()
return {"estatus":"Ok", 'scope': estatusNuevo.activo}
def listarestatus(id_estatus=None):
if id_estatus:
try:
queryset = EstatusUsuario.objects.get(id_estatus=id_estatus)
except EstatusUsuario.DoesNotExist:
return ({'result': 'No se encontró el estatus deseado'})
serializer = EstatusSerializer(queryset)
return serializer.data
else:
queryset = EstatusUsuario.objects.all()
serializer = EstatusSerializer(queryset, many=True)
return serializer.data
|
[
"[email protected]"
] | |
c5690d2fd14d01f08bf8f251da604588fd5202d0
|
faaf8af02a069919c4d553ff8e944bee8ccad806
|
/Python para Zumbis/1.py
|
f861885a654189f1139b857c2ef6ec4d35faed3c
|
[] |
no_license
|
marcioandrews/Estudos_Python
|
5fe10993fc0ebc5a75d8a04687bc55cae7bd9fda
|
07bf0683114132c726ed22c5d1c1be8ed7fc7fb2
|
refs/heads/master
| 2022-11-20T21:18:57.529507 | 2020-07-13T16:29:03 | 2020-07-13T16:29:03 | 279,171,877 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 78 |
py
|
n1 = int (input ('numero 1 '))
n2 = int (input ('numero 2 '))
print (n1 + n2)
|
[
"[email protected]"
] | |
829f15c337c350ad9fceadcd1f44706ff07b4a37
|
5a2c02dda6e4f79140996cd96d10c2c5d07c777a
|
/venv/lib/python2.7/site-packages/botocore/__init__.py
|
bae03e376975fc7376269486d73a117f822b3799
|
[] |
no_license
|
cclaassen3/service_learn_sustain
|
655dea9c216314de68c898b4c1652370aa4b5afa
|
96c47b93bc2b83915aae6131116f441c1d19d7f5
|
refs/heads/master
| 2021-01-19T09:15:43.070241 | 2017-04-25T16:00:33 | 2017-04-25T16:00:33 | 87,743,357 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,593 |
py
|
# Copyright (c) 2012-2013 Mitch Garnaat http://garnaat.org/
# Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import os
import re
import logging
__version__ = '1.5.2'
class NullHandler(logging.Handler):
def emit(self, record):
pass
# Configure default logger to do nothing
log = logging.getLogger('botocore')
log.addHandler(NullHandler())
_first_cap_regex = re.compile('(.)([A-Z][a-z]+)')
_number_cap_regex = re.compile('([a-z])([0-9]+)')
_end_cap_regex = re.compile('([a-z0-9])([A-Z])')
# The regex below handles the special case where some acryonym
# name is pluralized, e.g GatewayARNs, ListWebACLs, SomeCNAMEs.
_special_case_transform = re.compile('[A-Z]{3,}s$')
# Prepopulate the cache with special cases that don't match
# our regular transformation.
_xform_cache = {
('CreateCachediSCSIVolume', '_'): 'create_cached_iscsi_volume',
('CreateCachediSCSIVolume', '-'): 'create-cached-iscsi-volume',
('DescribeCachediSCSIVolumes', '_'): 'describe_cached_iscsi_volumes',
('DescribeCachediSCSIVolumes', '-'): 'describe-cached-iscsi-volumes',
('DescribeStorediSCSIVolumes', '_'): 'describe_stored_iscsi_volumes',
('DescribeStorediSCSIVolumes', '-'): 'describe-stored-iscsi-volumes',
('CreateStorediSCSIVolume', '_'): 'create_stored_iscsi_volume',
('CreateStorediSCSIVolume', '-'): 'create-stored-iscsi-volume',
}
# The items in this dict represent partial renames to apply globally to all
# services which might have a matching argument or operation. This way a
# common mis-translation can be fixed without having to call out each
# individual case.
_partial_renames = {
'ipv-6': 'ipv6',
'ipv_6': 'ipv6',
}
ScalarTypes = ('string', 'integer', 'boolean', 'timestamp', 'float', 'double')
BOTOCORE_ROOT = os.path.dirname(os.path.abspath(__file__))
# Used to specify anonymous (unsigned) request signature
UNSIGNED = object()
def xform_name(name, sep='_', _xform_cache=_xform_cache,
partial_renames=_partial_renames):
"""Convert camel case to a "pythonic" name.
If the name contains the ``sep`` character, then it is
returned unchanged.
"""
if sep in name:
# If the sep is in the name, assume that it's already
# transformed and return the string unchanged.
return name
key = (name, sep)
if key not in _xform_cache:
if _special_case_transform.search(name) is not None:
is_special = _special_case_transform.search(name)
matched = is_special.group()
# Replace something like ARNs, ACLs with _arns, _acls.
name = name[:-len(matched)] + sep + matched.lower()
s1 = _first_cap_regex.sub(r'\1' + sep + r'\2', name)
s2 = _number_cap_regex.sub(r'\1' + sep + r'\2', s1)
transformed = _end_cap_regex.sub(r'\1' + sep + r'\2', s2).lower()
# Do partial renames
for old, new in partial_renames.items():
if old in transformed:
transformed = transformed.replace(old, new)
_xform_cache[key] = transformed
return _xform_cache[key]
|
[
"[email protected]"
] | |
1477574a6be24653b60d9f9b21fd6656ade4f4d9
|
0484331138f73d05f9f173805bba122d246952a4
|
/src/manage.py
|
75568c911d3054c0a62fe705a2400dcbc67588e2
|
[] |
no_license
|
sl3760/EasyVC
|
805c6f19c0b8e9e5bfee4495fe1d0a2e0380db84
|
3ee63475be29ccc715296badc7dfed02e31672c5
|
refs/heads/master
| 2021-01-13T01:37:25.703371 | 2014-05-15T04:38:25 | 2014-05-15T04:38:25 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 249 |
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "easyvc.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
[
"[email protected]"
] | |
3dfe193901cf9aa28f6a4d72adcdaf72d7bf3727
|
d27af2880f61e8e4b1848559dd06155a456874fe
|
/dev_reference_lines/ourmatplotlib.py
|
d93c5bd1a724a796269d21222c281372ce67e6d7
|
[] |
no_license
|
CINF/cinfdata
|
6c60371cdca69409be139dd1f0eab9288da422c0
|
d91b0e292095ee2ba748ebd803b794c00be37d43
|
refs/heads/master
| 2023-08-17T11:35:01.321323 | 2023-08-11T06:54:37 | 2023-08-11T06:54:37 | 58,733,415 | 0 | 3 | null | 2023-08-11T06:54:38 | 2016-05-13T11:23:33 |
Python
|
UTF-8
|
Python
| false | false | 16,407 |
py
|
#!/usr/bin/python
"""
This file is part of the CINF Data Presentation Website
Copyright (C) 2012 Robert Jensen, Thomas Andersen and Kenneth Nielsen
The CINF Data Presentation Website is free software: you can
redistribute it and/or modify it under the terms of the GNU
General Public License as published by the Free Software
Foundation, either version 3 of the License, or
(at your option) any later version.
The CINF Data Presentation Website is distributed in the hope
that it will be useful, but WITHOUT ANY WARRANTY; without even
the implied warranty of MERCHANTABILITY or FITNESS FOR A
PARTICULAR PURPOSE. See the GNU General Public License for more
details.
You should have received a copy of the GNU General Public License
along with The CINF Data Presentation Website. If not, see
<http://www.gnu.org/licenses/>.
"""
from optparse import OptionParser
import sys
import hashlib
# set HOME environment variable to a directory the httpd server can write to
import os
os.environ[ 'HOME' ] = '/var/www/cinfdata/figures'
# System-wide ctypes cannot be run by apache... strange...
sys.path.insert(1, '/var/www/cinfdata')
from pytz import timezone
import numpy as np
# Matplotlib must be imported before MySQLdb (in dataBaseBackend), otherwise we
# get an ugly error
import matplotlib
#matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.transforms as mtransforms
import matplotlib.dates as mdates
# Import our own classes
#from databasebackend import dataBaseBackend
from common import Color
class Plot():
"""This class is used to generate the figures for the plots."""
def __init__(self, options, ggs):
""" Description of init """
self.o = options
self.ggs = ggs
# Set the image format to standard, overwite with ggs value and again
# options value if it exits
if self.o['image_format'] == '':
self.image_format = self.ggs['image_format']
else:
self.image_format = self.o['image_format']
# Default values for matplotlib plots (names correspond to ggs names)
mpl_settings = {'width': 900,
'height': 600,
'title_size': '24',
'xtick_labelsize': '12',
'ytick_labelsize': '12',
'legend_fontsize': '10',
'label_fontsize': '16',
'linewidth': 1.0,
'grid': False}
# Owerwrite defaults with gs values and convert to appropriate types
for key, value in mpl_settings.items():
try:
mpl_settings[key] = type(value)(self.ggs['matplotlib_settings'][key])
except KeyError:
pass
# Write some settings to pyplot
rc_temp = {'figure.figsize': [float(mpl_settings['width'])/100,
float(mpl_settings['height'])/100],
'axes.titlesize': mpl_settings['title_size'],
'xtick.labelsize': mpl_settings['xtick_labelsize'],
'ytick.labelsize': mpl_settings['ytick_labelsize'],
'legend.fontsize': mpl_settings['legend_fontsize'],
'axes.labelsize': mpl_settings['label_fontsize'],
'lines.linewidth': mpl_settings['linewidth'],
'axes.grid': mpl_settings['grid']
}
plt.rcParams.update(rc_temp)
# Plotting options
self.maxticks=15
self.tz = timezone('Europe/Copenhagen')
self.right_yaxis = None
self.measurement_count = None
# Colors object, will be filled in at new_plot
self.c = None
def new_plot(self, data, plot_info, measurement_count):
""" Form a new plot with the given data and info """
self.c = Color(data, self.ggs)
self.measurement_count = sum(measurement_count)
self._init_plot(data)
# _plot returns True or False to indicate whether the plot is good
if self._plot(data):
self._zoom_and_flip(data)
self._title_and_labels(plot_info)
self._add_reference_lines(data)
self._save(plot_info)
def _init_plot(self, data):
""" Initialize plot """
self.fig = plt.figure(1)
self.ax1 = self.fig.add_subplot(111)
# We only activate the right y-axis, if there there points to put on it
self.right_yaxis = sum([len(dat['data']) for dat in data['right']]) > 0
if self.right_yaxis:
self.ax2 = self.ax1.twinx()
if self.o['left_logscale']:
self.ax1.set_yscale('log')
if self.right_yaxis and self.o['right_logscale']:
self.ax2.set_yscale('log')
def _plot(self, data):
""" Determine the type of the plot and make the appropriate plot by use
of the functions:
_plot_dateplot
_plot_xyplot
"""
if self.ggs['default_xscale'] == 'dat':
return self._plot_dateplot(data)
else:
return self._plot_xyplot(data)
def _plot_dateplot(self, data):
""" Make the date plot """
# Rotate datemarks on xaxis
self.ax1.set_xticklabels([], rotation=25, horizontalalignment='right')
# Test for un-workable plot configurations
error_msg = None
# Test if there is data on the left axis
if sum([len(dat['data']) for dat in data['left']]) == 0:
error_msg = 'There must\nbe data on\nthe left y-axis'
# Test if there is any data at all
if self.measurement_count == 0:
error_msg = 'No data'
# No data
if error_msg:
y = 0.00032 if self.o['left_logscale'] is True else 0.5
self.ax1.text(0.5, y, error_msg, horizontalalignment='center',
verticalalignment='center', color='red', size=60)
return False
# Left axis
for dat in data['left']:
# Form legend
if dat['lgs'].has_key('legend'):
legend = dat['lgs']['legend']
else:
legend = None
# Plot
if len(dat['data']) > 0:
self.ax1.plot_date(mdates.epoch2num(dat['data'][:,0]),
dat['data'][:,1],
label=legend,
xdate=True,
color=self.c.get_color(),
tz=self.tz,
fmt='-')
# Right axis
if self.right_yaxis:
for dat in data['right']:
# Form legend
if dat['lgs'].has_key('legend'):
legend = dat['lgs']['legend']
else:
legend = None
# Plot
if len(dat['data']) > 0:
self.ax2.plot_date(mdates.epoch2num(dat['data'][:,0]),
dat['data'][:,1],
label=legend,
xdate=True,
color=self.c.get_color(),
tz=self.tz,
fmt='-')
# Set xtick formatter (only if we have points)
if self.measurement_count > 0:
xlim = self.ax1.set_xlim()
diff = max(xlim) - min(xlim) # in days
format_out = '%H:%M:%S' # Default
# Diff limit to date format translation, will pick the format
# format of the largest limit the diff is larger than. Limits
# are in minutes.
formats = [
[1.0, '%a %H:%M'], # Larger than 1 day
[7.0, '%Y-%m-%d'], # Larger than 7 day
[7*30., '%Y-%m'], # Larger than 3 months
]
for limit, format in formats:
if diff > limit:
format_out = format
fm = mdates.DateFormatter(format_out, tz=self.tz)
self.ax1.xaxis.set_major_formatter(fm)
# Indicate that the plot is good
return True
def _plot_xyplot(self, data):
# Left axis
for dat in data['left']:
# Form legend
if dat['lgs'].has_key('legend'):
legend = dat['lgs']['legend']
else:
legend = None
# Plot
if len(dat['data']) > 0:
self.ax1.plot(dat['data'][:,0],
dat['data'][:,1],
'-',
label=legend,
color=self.c.get_color(dat['lgs']['id']),
)
# Right axis
for dat in data['right']:
# Form legend
if dat['lgs'].has_key('legend'):
legend = dat['lgs']['legend']
else:
legend = None
# Plot
if len(dat['data']) > 0:
self.ax2.plot(dat['data'][:,0],
dat['data'][:,1],
'-',
label=legend,
color=self.c.get_color(dat['lgs']['id'])
)
# No data
if self.measurement_count == 0:
y = 0.00032 if self.o['left_logscale'] is True else 0.5
self.ax1.text(0.5, y, 'No data', horizontalalignment='center',
verticalalignment='center', color='red', size=60)
# Indicate that the plot is good
return True
def _zoom_and_flip(self, data):
""" Apply the y zooms.
NOTE: self.ax1.axis() return a list of bounds [xmin,xmax,ymin,ymax] and
we reuse x and replace y)
"""
left_yscale_inferred = self.o['left_yscale_bounding']
right_yscale_inferred = self.o['right_yscale_bounding']
# X-axis zoom and infer y-axis zoom implications
if self.o['xscale_bounding'] is not None and\
self.o['xscale_bounding'][1] > self.o['xscale_bounding'][0]:
# Set the x axis scaling, unsure if we should do it for ax2 as well
self.ax1.set_xlim(self.o['xscale_bounding'])
# With no specific left y-axis zoom, infer it from x-axis zoom
if left_yscale_inferred is None:
left_yscale_inferred = self._infer_y_on_x_zoom(
data['left'], self.o['left_logscale'])
# With no specific right y-axis zoom, infer it from x-axis zoom
if right_yscale_inferred is None and self.right_yaxis:
right_yscale_inferred = self._infer_y_on_x_zoom(
data['right'])
# Left axis
if left_yscale_inferred is not None:
self.ax1.set_ylim(left_yscale_inferred)
# Right axis
if self.right_yaxis and right_yscale_inferred is not None:
self.ax2.set_ylim(right_yscale_inferred)
if self.o['flip_x']:
self.ax1.set_xlim((self.ax1.set_xlim()[1],self.ax1.set_xlim()[0]))
def _infer_y_on_x_zoom(self, list_of_data_sets, log=None):
"""Infer the implied Y axis zoom with an X axis zoom, for one y axis"""
yscale_inferred = None
min_candidates = []
max_candidates = []
for dat in list_of_data_sets:
# Make mask that gets index for points where x is within bounds
mask = (dat['data'][:, 0] > self.o['xscale_bounding'][0]) &\
(dat['data'][:, 0] < self.o['xscale_bounding'][1])
# Gets all the y values from that mask
reduced = dat['data'][mask, 1]
# Add min/max candidates
if len(reduced) > 0:
min_candidates.append(np.min(reduced))
max_candidates.append(np.max(reduced))
# If there are min/max candidates, set the inferred left y bounding
if len(min_candidates) > 0 and len(max_candidates) > 0:
min_, max_ = np.min(min_candidates), np.max(max_candidates)
height = max_ - min_
yscale_inferred = (min_ - height*0.05, max_ + height*0.05)
return yscale_inferred
def _title_and_labels(self, plot_info):
""" Put title and labels on the plot """
# xlabel
if plot_info.has_key('xlabel'):
label = plot_info['xlabel']
if plot_info['xlabel_addition'] != '':
label += '\n' + plot_info['xlabel_addition']
self.ax1.set_xlabel(label)
if self.o['xlabel'] != '': # Manual override
self.ax1.set_xlabel(r'{0}'.format(self.o['xlabel']))
# Left ylabel
if plot_info.has_key('left_ylabel'):
label = plot_info['left_ylabel']
if plot_info['y_left_label_addition'] != '':
label += '\n' + plot_info['y_left_label_addition']
self.ax1.set_ylabel(label, multialignment='center')
if self.o['left_ylabel'] != '': # Manual override
self.ax1.set_ylabel(self.o['left_ylabel'], multialignment='center')
# Right ylabel
if self.right_yaxis and plot_info.has_key('right_ylabel'):
label = plot_info['right_ylabel']
if plot_info['y_right_label_addition'] != '':
label += '\n' + plot_info['y_right_label_addition']
self.ax2.set_ylabel(label, multialignment='center', rotation=270)
if self.o['right_ylabel'] != '': # Manual override
self.ax2.set_ylabel(self.o['right_ylabel'],
multialignment='center', rotation=270)
# Title
if plot_info.has_key('title'):
self.ax1.set_title(plot_info['title'], y=1.03)
if self.o['title'] != '':
# experiment with 'r{0}'.form .. at some time
self.ax1.set_title('{0}'.format(self.o['title']), y=1.03)
# Legends
if self.measurement_count > 0:
ax1_legends = self.ax1.get_legend_handles_labels()
if self.right_yaxis:
ax2_legends = self.ax2.get_legend_handles_labels()
for color, text in zip(ax2_legends[0], ax2_legends[1]):
ax1_legends[0].append(color)
ax1_legends[1].append(text)
# loc for locations, 0 means 'best'. Why that isn't deafult I
# have no idea
legends = self.ax1.legend(ax1_legends[0], ax1_legends[1], loc=0)
# Make legend lines thicker
for legend_handle in legends.legendHandles:
legend_handle.set_linewidth(6)
def _add_reference_lines(self, data):
"""Add reference lines to the plot"""
for value in data['reference_line_info']:
plt.axvline(x=value[0], ymax=( value[1] / 10000 )
# KARL TODO. In this method, actually draw the reference lines
# onto the graph. Remember, the data argument is the dict you
# wrote information to in databasebackend and the lef axes
# (which I think is the one you want to use is called self.ax1
def _save(self, plot_info):
""" Save the figure """
# The tight method only works if there is a title (it caps of parts of
# the axis numbers, therefore this hack, this may also become a problem
# for the other edges of the figure if there are no labels)
tight = ''
if plot_info.has_key('title'):
tight = 'tight'
# For some wierd reason we cannot write directly to sys.stdout when it
# is a pdf file, so therefore we use a the StringIO object workaround
if self.o['image_format'] == 'pdf':
import StringIO
out = StringIO.StringIO()
self.fig.savefig(out, bbox_inches=tight, pad_inches=0.03,
format=self.o['image_format'])
sys.stdout.write(out.getvalue())
else:
self.fig.savefig(sys.stdout, bbox_inches=tight, pad_inches=0.03,
format=self.o['image_format'])
|
[
"[email protected]"
] | |
b8212889d6b20712d6dc7e09b2af346ddbf3babd
|
13a954fed4bced90c325e5508900b0f8665d0f08
|
/day_2/list_iterate.py
|
80d62d57c8fcf03d73a7af678bee6a0920c0d396
|
[
"MIT"
] |
permissive
|
anishLearnsToCode/ml-workshop-wac-2
|
64b84589fa0b45057bf36bd1f073f12a17a8eba2
|
9992acd30f4b74ce2debf0d5ff3d8a1b78b1163f
|
refs/heads/main
| 2023-02-21T02:36:06.542630 | 2021-01-19T17:38:48 | 2021-01-19T17:38:48 | 327,333,663 | 3 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 81 |
py
|
numbers = [2, 3, 5, 7, 11, 13, 19, 23]
for number in numbers:
print(number)
|
[
"[email protected]"
] | |
c170aaa2633ce3220690a016a2aefaf01660220e
|
c72b6b2e7b7421505b88489d6026f8c5226fbf31
|
/Chapter8/ex8.6.py
|
26ea69e57888cb4004b40d33b910e1c7343b4608
|
[] |
no_license
|
amadjarov/thinkPython
|
1185fe8ee5d47ecba56c251f0c5d537fe82b5b18
|
7e8fb3a96fa6607587efac07abff04a342c15f96
|
refs/heads/master
| 2021-01-22T16:25:18.790251 | 2014-05-29T09:54:11 | 2014-05-29T09:54:11 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 630 |
py
|
def find(word, letter, index):
index = index
while index < len(word):
if word[index] == letter:
print index
break
index += 1
else:
print - 1
def count_v2 (word, letter, start):
how_many = 0
index = find(word, letter, start)
if index == -1:
how_many = 0
else:
while index > -1:
how_many = how_many + 1
index = index+1
index = find(word, letter, index)
print how_many
count_v2 ('banana', 'x', 0)
count_v2 ('banana', 'a', 4)
count_v2 ('banana', 'a', 2)
count_v2 ('banana', 'a', 0)
|
[
"[email protected]"
] | |
84620c7a9c9a4148a5440e5110ee1abccb676b0a
|
ffa1cc65fea1d2e1998f41684e687610179f9b27
|
/pinyin/pinyin.py
|
e9bc5ad36dc3df7936ae2b5c3556e89cf1496d63
|
[] |
no_license
|
geodge831012/learning_example
|
e9b419e6e7a3cbb31c36a01786e3f15bd2a7b54f
|
40061b52f4c3c62486d8e0575088bd561c78171e
|
refs/heads/master
| 2021-07-22T07:09:53.264766 | 2020-09-17T02:46:24 | 2020-09-17T02:46:24 | 215,983,279 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 249 |
py
|
# -*- coding: utf-8 -*-
from xpinyin import Pinyin
p = Pinyin()
a = p.get_pinyin(u"上海")
print(a)
print(type(a))
b = p.get_pinyin(u"上海", tone_marks='marks')
print(b)
print(type(b))
c = p.get_pinyin(u"上海", '')
print(c)
print(type(c))
|
[
"[email protected]"
] | |
c477ff81c9b1feba08d0ef6621a1c2c2e4a1acac
|
b5c5c27d71348937322b77b24fe9e581cdd3a6c4
|
/tests/pyutils/test_is_invalid.py
|
d39c12e2c935eb05fc776988bbe838d3d98d9059
|
[
"MIT"
] |
permissive
|
dfee/graphql-core-next
|
92bc6b4e5a39bd43def8397bbb2d5b924d5436d9
|
1ada7146bd0510171ae931b68f6c77dbdf5d5c63
|
refs/heads/master
| 2020-03-27T10:30:43.486607 | 2018-08-30T20:26:42 | 2018-08-30T20:26:42 | 146,425,198 | 0 | 0 |
MIT
| 2018-08-28T09:40:09 | 2018-08-28T09:40:09 | null |
UTF-8
|
Python
| false | false | 865 |
py
|
from math import inf, nan
from graphql.error import INVALID
from graphql.pyutils import is_invalid
def describe_is_invalid():
def null_is_not_invalid():
assert is_invalid(None) is False
def falsy_objects_are_not_invalid():
assert is_invalid('') is False
assert is_invalid(0) is False
assert is_invalid([]) is False
assert is_invalid({}) is False
def truthy_objects_are_not_invalid():
assert is_invalid('str') is False
assert is_invalid(1) is False
assert is_invalid([0]) is False
assert is_invalid({None: None}) is False
def inf_is_not_invalid():
assert is_invalid(inf) is False
assert is_invalid(-inf) is False
def undefined_is_invalid():
assert is_invalid(INVALID) is True
def nan_is_invalid():
assert is_invalid(nan) is True
|
[
"[email protected]"
] | |
2f816bb890383cc7f178bf5be4d2290e2fbdfa61
|
4e81512b34223788559ea1c84acb2ef0aa4d899d
|
/booktracker/settings.py
|
6831cbc70ea526b979e29eb6fc1e105511cae832
|
[] |
no_license
|
arsummers/book-tracker-django
|
75a2e559c5dd05be67287a40514533a699889368
|
012fa821288ee99f45665e017bc8b7ab4db54a1f
|
refs/heads/master
| 2022-12-11T16:28:27.393199 | 2019-10-15T21:28:37 | 2019-10-15T21:28:37 | 209,672,800 | 0 | 0 | null | 2022-12-08T06:38:29 | 2019-09-20T00:37:11 |
Python
|
UTF-8
|
Python
| false | false | 3,428 |
py
|
"""
Django settings for booktracker project.
Generated by 'django-admin startproject' using Django 2.2.5.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import django_heroku
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'v9j%)jsi$x1sp8oqfgln@m0a^1*0%z&4defyjpd#0ld@=^5vdx'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'books.apps.BooksConfig',
]
MIDDLEWARE = [
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'booktracker.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'booktracker.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATIC_URL = '/static/'
# Extra places for collectstatic to find static files.
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
django_heroku.settings(locals())
|
[
"[email protected]"
] | |
1c48b42dfc347ccae1fca77e710bbdde6f730db7
|
47b6ae75b73b6ae5f29532e4d0edf2eae5a45428
|
/manage.py
|
0ff1f58ae4235767d7c39f80fdd2692283fc6665
|
[] |
no_license
|
smartwy/blog
|
f2844972fe7135b5e752713b1b4065ead1f5a7c2
|
83bd4aeb94c90a5e61d3d7f7fbf11db78a3f6a15
|
refs/heads/master
| 2020-04-08T07:06:24.128586 | 2018-11-26T07:18:29 | 2018-11-26T07:18:29 | 159,126,937 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 541 |
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'blog_cmdb.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
|
[
"[email protected]"
] | |
2d6556b603b8f16cc9085de0d0c0a7fd73b0eb82
|
fcbad89386611fe3b0d49fa5276d3759dac69edc
|
/Assigment05.py
|
e005f9745c4621996708aeeb28a122905b47b730
|
[] |
no_license
|
killmerjason/IntroToProg-Python
|
2f0da3149229d11e6f538e6a29bd54cb9586b2e9
|
c80cfc5551359e71bc6c2a5beafa08662624663b
|
refs/heads/main
| 2023-03-14T08:38:27.551461 | 2021-02-18T02:33:31 | 2021-02-18T02:33:31 | 339,881,240 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,176 |
py
|
# ------------------------------------------------------------------------ #
# Title: Assignment 05
# Description: Working with Dictionaries and Files
# When the program starts, load each "row" of data
# in "ToDoToDoList.txt" into a python Dictionary.
# Add the each dictionary "row" to a python list "table"
# ChangeLog (Who,When,What):
# RRoot,1.1.2030,Created started script
# Jason Killmer,2/13/2021,Added code to complete assignment 5
# ------------------------------------------------------------------------ #
# -- Data -- #
# declare variables and constants
objFile = "ToDoList.txt" # An object that represents a file
strData = "" # A row of text data from the file
dicRow = {} # A row of data separated into elements of a dictionary {Task,Priority}
lstTable = [] # A list that acts as a 'table' of rows
strMenu = "" # A menu of user options
strChoice = "" # A Capture the user option selection
strTask = "" # A capture of the user's task addition/ deletion
strPriority = "" # A capture of the user's priority addition
strInstances = "" # A capture of the user's choice whether to remove multiple instances
# -- Processing -- #
# Step 1 - When the program starts, load the any data you have
# in a text file called ToDoList.txt into a python list of dictionaries rows (like Lab 5-2)
objFile = open("C:\\_PythonClass\\Assignment05\\ToDoList.txt", "r")
for strData in objFile.readlines():
dicRow = {strData.split(", ")[0] : strData.split(",")[1].strip()}
lstTable += dicRow.items()
objFile.close()
# -- Input/Output -- #
# Step 2 - Display a menu of choices to the user
while (True):
strMenu = """
Menu of Options
1) Show current data
2) Add a new item.
3) Remove an existing item.
4) Save Data to File
5) Exit Program
"""
print(strMenu)
strChoice = str(input("Which option would you like to perform? [1 to 5] - "))
print() # adding a new line for looks
# Step 3 - Show the current items in the table
if (strChoice.strip() == '1'):
for strData in lstTable:
print(strData[0] + ", ", strData[1])
continue
# Step 4 - Add a new item to the list/Table
elif (strChoice.strip() == '2'):
strTask = input("Task? ")
strPriority = input("Priority? ")
dicRow = {strTask: strPriority}
lstTable += dicRow.items()
print("Added: " + strTask + ", " + strPriority)
continue
# Step 5 - Remove a new item from the list/Table
elif (strChoice.strip() == '3'):
strTask = input("Which task would you like to remove? ").lower().strip()
strInstances = input("""Would you like to remove all instances of this task?
If not, only the first instance will be deleted. """)
if strInstances.lower().strip().startswith("y"):
for strData in lstTable.__reversed__():
dicRow = {strData[0].lower().strip() : strData[1]}
if dicRow.__contains__(strTask):
lstTable.remove(strData)
print("Removed: " + strData[0] + ", " + strData[1])
else:
for strData in lstTable:
dicRow = {strData[0].lower().strip() : strData[1]}
if dicRow.__contains__(strTask):
lstTable.remove(strData)
print("Removed: " + strData)
break
continue
# Step 6 - Save tasks to the ToDoToDoList.txt file
elif (strChoice.strip() == '4'):
if input("Would you like to save data to the file? ").lower().strip().startswith("y"):
objFile = open("C:\\_PythonClass\\Assignment05\\ToDoList.txt", "w")
for strData in lstTable:
objFile.write((strData[0] + ", " + strData[1] + "\n"))
objFile.close()
print("Saved!")
else:
print("Not Saved!")
continue
# Step 7 - Exit program
elif (strChoice.strip() == '5'):
print("Program exited.")
objFile.close()
break # and Exit the program
|
[
"[email protected]"
] | |
5628b540ad53bf7290b179cb3f6de1f245706da2
|
bd3528cc321dc37f8c47ac63e57561fd6432c7cc
|
/transformer/tensor2tensor/models/xception.py
|
2452a7d4ff23d06b687e61f5eea6106e13c22930
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
oskopek/cil
|
92bbf52f130a1ed89bbe93b74eef74027bb2b37e
|
4c1fd464b5af52aff7a0509f56e21a2671fb8ce8
|
refs/heads/master
| 2023-04-15T10:23:57.056162 | 2021-01-31T14:51:51 | 2021-01-31T14:51:51 | 139,629,560 | 2 | 5 |
MIT
| 2023-03-24T22:34:39 | 2018-07-03T19:35:24 |
Python
|
UTF-8
|
Python
| false | false | 5,857 |
py
|
# coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Xception."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from six.moves import range # pylint: disable=redefined-builtin
from tensor2tensor.layers import common_hparams
from tensor2tensor.layers import common_layers
from tensor2tensor.utils import registry
from tensor2tensor.utils import t2t_model
import tensorflow as tf
def residual_block(x, hparams):
"""A stack of convolution blocks with residual connection."""
k = (hparams.kernel_height, hparams.kernel_width)
dilations_and_kernels = [((1, 1), k) for _ in range(3)]
y = common_layers.subseparable_conv_block(
x,
hparams.hidden_size,
dilations_and_kernels,
padding="SAME",
separability=0,
name="residual_block")
x = common_layers.layer_norm(x + y, hparams.hidden_size, name="lnorm")
return tf.nn.dropout(x, 1.0 - hparams.dropout)
def xception_internal(inputs, hparams):
"""Xception body."""
with tf.variable_scope("xception"):
cur = inputs
if cur.get_shape().as_list()[1] > 200:
# Large image, Xception entry flow
cur = xception_entry(cur, hparams.hidden_size)
else:
# Small image, conv
cur = common_layers.conv_block(
cur,
hparams.hidden_size, [((1, 1), (3, 3))],
first_relu=False,
padding="SAME",
force2d=True,
name="small_image_conv")
for i in range(hparams.num_hidden_layers):
with tf.variable_scope("layer_%d" % i):
cur = residual_block(cur, hparams)
return xception_exit(cur)
def xception_entry(inputs, hidden_dim):
"""Xception entry flow."""
with tf.variable_scope("xception_entry"):
def xnet_resblock(x, filters, res_relu, name):
"""Resblock."""
with tf.variable_scope(name):
y = common_layers.separable_conv_block(
x,
filters, [((1, 1), (3, 3)), ((1, 1), (3, 3))],
first_relu=True,
padding="SAME",
force2d=True,
name="sep_conv_block")
y = common_layers.pool(y, (3, 3), "MAX", "SAME", strides=(2, 2))
return y + common_layers.conv_block(
x,
filters, [((1, 1), (1, 1))],
padding="SAME",
strides=(2, 2),
first_relu=res_relu,
force2d=True,
name="res_conv0")
tf.summary.image("inputs", inputs, max_outputs=2)
x = common_layers.conv_block(
inputs,
32, [((1, 1), (3, 3))],
first_relu=False,
padding="SAME",
strides=(2, 2),
force2d=True,
name="conv0")
x = common_layers.conv_block(
x, 64, [((1, 1), (3, 3))], padding="SAME", force2d=True, name="conv1")
x = xnet_resblock(x, min(128, hidden_dim), True, "block0")
x = xnet_resblock(x, min(256, hidden_dim), False, "block1")
return xnet_resblock(x, hidden_dim, False, "block2")
def xception_exit(inputs):
"""Xception exit flow."""
with tf.variable_scope("xception_exit"):
x = inputs
x_shape = x.get_shape().as_list()
if x_shape[1] is None or x_shape[2] is None:
length_float = tf.to_float(tf.shape(x)[1])
length_float *= tf.to_float(tf.shape(x)[2])
spatial_dim_float = tf.sqrt(length_float)
spatial_dim = tf.to_int32(spatial_dim_float)
x_depth = x_shape[3]
x = tf.reshape(x, [-1, spatial_dim, spatial_dim, x_depth])
elif x_shape[1] != x_shape[2]:
spatial_dim = int(math.sqrt(float(x_shape[1] * x_shape[2])))
if spatial_dim * spatial_dim != x_shape[1] * x_shape[2]:
raise ValueError("Assumed inputs were square-able but they were "
"not. Shape: %s" % x_shape)
x = tf.reshape(x, [-1, spatial_dim, spatial_dim, x_depth])
x = common_layers.conv_block_downsample(x, (3, 3), (2, 2), "SAME")
return tf.nn.relu(x)
@registry.register_model
class Xception(t2t_model.T2TModel):
def body(self, features):
return xception_internal(features["inputs"], self._hparams)
@registry.register_hparams
def xception_base():
"""Set of hyperparameters."""
hparams = common_hparams.basic_params1()
hparams.batch_size = 128
hparams.hidden_size = 768
hparams.dropout = 0.2
hparams.symbol_dropout = 0.2
hparams.label_smoothing = 0.1
hparams.clip_grad_norm = 2.0
hparams.num_hidden_layers = 8
hparams.kernel_height = 3
hparams.kernel_width = 3
hparams.learning_rate_decay_scheme = "exp"
hparams.learning_rate = 0.05
hparams.learning_rate_warmup_steps = 3000
hparams.initializer_gain = 1.0
hparams.weight_decay = 3.0
hparams.num_sampled_classes = 0
hparams.sampling_method = "argmax"
hparams.optimizer_adam_epsilon = 1e-6
hparams.optimizer_adam_beta1 = 0.85
hparams.optimizer_adam_beta2 = 0.997
return hparams
@registry.register_hparams
def xception_tiny():
hparams = xception_base()
hparams.batch_size = 2
hparams.hidden_size = 64
hparams.num_hidden_layers = 2
hparams.learning_rate_decay_scheme = "none"
return hparams
@registry.register_hparams
def xception_tiny_tpu():
hparams = xception_base()
hparams.batch_size = 2
hparams.num_hidden_layers = 2
hparams.hidden_size = 128
hparams.optimizer = "TrueAdam"
return hparams
|
[
"[email protected]"
] | |
2dc35c3d2292ced1188d4f111a89b631a2105445
|
2b8d9e0b27f69a05e4b1e0bbdf5b392d4d41a806
|
/taskapp/models.py
|
daaaf1b7bdab2d7fcbb1b50c739d5da2cfe2e23f
|
[] |
no_license
|
michaellamarnorman/Websites
|
bcfb60caceb87189ccffb6b7825cb8045242ca2e
|
e0952dad74795d9fb6ce2db5217499d693ec3bba
|
refs/heads/master
| 2021-01-18T14:18:45.156095 | 2014-11-02T20:50:44 | 2014-11-02T20:50:44 | 24,086,271 | 0 | 1 | null | 2020-10-16T14:26:22 | 2014-09-16T05:10:51 |
Python
|
UTF-8
|
Python
| false | false | 390 |
py
|
__author__ = 'admin'
from views import db
class Task(db.Model):
__tablename__ = 'tasks'
task_id = db.Column(db.Integer, primary_key=True)
task = db.Column(db.String, nullable=False)
status = db.Column(db.Integer)
def __init__(self, task, status):
self.task = task
self.status = status
def __repr__(self):
return '<name %r>', self.task
|
[
"[email protected]"
] | |
f4d448ee717a71c1efa39105c22ee21a96515b5d
|
8ef6e8aa3377d99c6dd4114856dc8cad5f806eb0
|
/server.py
|
c7de2feb94e46a8fdf0db63c9baad649f88308f3
|
[
"MIT"
] |
permissive
|
kevinastock/iot-sensor
|
c1f9810845da984c5fde4ee95bff54d991922e1a
|
5869bb7ce24aad0a7c7144ccfe6106ae6eaa09cb
|
refs/heads/master
| 2020-03-27T09:45:28.977153 | 2018-08-28T00:35:00 | 2018-08-28T00:35:00 | 146,369,756 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 953 |
py
|
#!/usr/bin/env python3
from collections import namedtuple
import psycopg2
import socketserver
import struct
Reading = namedtuple('Reading', ['lux', 'pressure', 'temperature', 'humidity', 'tvoc', 'eco2', 'full', 'ir', 'gain', 'timing', 'mic_min', 'mic_max', 'x_min', 'x_max', 'y_min', 'y_max', 'z_min', 'z_max'])
DB = None
def init_db():
global DB
if not DB or DB.closed != 0:
DB = psycopg2.connect("dbname='roommetrics' user='kevin' host='localhost' password='Lb289zWj'")
DB.set_session(autocommit=True)
init_db()
class HandlePacket(socketserver.BaseRequestHandler):
def handle(self):
data = self.request[0].strip()
data = Reading(*struct.unpack("<4f14H", data))
DB.cursor().execute('INSERT INTO data VALUES (NOW(), ' + ','.join(str(x) for x in data) + ');')
if __name__ == "__main__":
with socketserver.UDPServer(("0.0.0.0", 9942), HandlePacket) as server:
server.serve_forever()
|
[
"[email protected]"
] | |
edbd64f05bc9add46fb8a202427244573c403ad7
|
4b95ced83232d8edbd15faa9f48162fc973c3e86
|
/mmdet/models/roi_heads/dynamic_roi_head.py
|
e7c2ad0018bed70c7c97155b0bad60472b9a26e8
|
[
"Apache-2.0"
] |
permissive
|
DongshuoYin/garbage_dump_detection
|
7849b0071429b27fe0afd7050821f77023a4a7f0
|
0b78e855e579dee5234c2a672b95ce350b3b3489
|
refs/heads/main
| 2023-04-11T05:11:25.411396 | 2022-09-17T06:56:46 | 2022-09-17T06:56:46 | 473,835,255 | 5 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,621 |
py
|
import numpy as np
import torch
from mmdet.core import bbox2roi
from mmdet.models.losses import SmoothL1Loss
from ..builder import HEADS
from .standard_roi_head import StandardRoIHead
EPS = 1e-15
@HEADS.register_module()
class DynamicRoIHead(StandardRoIHead):
"""RoI head for `Dynamic R-CNN <https://arxiv.org/abs/2004.06002>`_."""
def __init__(self, **kwargs):
super(DynamicRoIHead, self).__init__(**kwargs)
assert isinstance(self.bbox_head.loss_bbox, SmoothL1Loss)
# the IoU history of the past `update_iter_interval` iterations
self.iou_history = []
# the beta history of the past `update_iter_interval` iterations
self.beta_history = []
def forward_train(self,
x,
img_metas,
proposal_list,
gt_bboxes,
gt_labels,
gt_bboxes_ignore=None,
gt_masks=None):
"""Forward function for training.
Args:
x (list[Tensor]): list of multi-level img features.
img_metas (list[dict]): list of JPEGImages info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
`mmdet/datasets/pipelines/formatting.py:Collect`.
proposals (list[Tensors]): list of region proposals.
gt_bboxes (list[Tensor]): each item are the truth boxes for each
JPEGImages in [tl_x, tl_y, br_x, br_y] format.
gt_labels (list[Tensor]): class indices corresponding to each box
gt_bboxes_ignore (None | list[Tensor]): specify which bounding
boxes can be ignored when computing the loss.
gt_masks (None | Tensor) : true segmentation masks for each box
used if the architecture supports a segmentation task.
Returns:
dict[str, Tensor]: a dictionary of loss components
"""
# assign gts and sample proposals
if self.with_bbox or self.with_mask:
num_imgs = len(img_metas)
if gt_bboxes_ignore is None:
gt_bboxes_ignore = [None for _ in range(num_imgs)]
sampling_results = []
cur_iou = []
for i in range(num_imgs):
assign_result = self.bbox_assigner.assign(
proposal_list[i], gt_bboxes[i], gt_bboxes_ignore[i],
gt_labels[i])
sampling_result = self.bbox_sampler.sample(
assign_result,
proposal_list[i],
gt_bboxes[i],
gt_labels[i],
feats=[lvl_feat[i][None] for lvl_feat in x])
# record the `iou_topk`-th largest IoU in an JPEGImages
iou_topk = min(self.train_cfg.dynamic_rcnn.iou_topk,
len(assign_result.max_overlaps))
ious, _ = torch.topk(assign_result.max_overlaps, iou_topk)
cur_iou.append(ious[-1].item())
sampling_results.append(sampling_result)
# average the current IoUs over images
cur_iou = np.mean(cur_iou)
self.iou_history.append(cur_iou)
losses = dict()
# bbox head forward and loss
if self.with_bbox:
bbox_results = self._bbox_forward_train(x, sampling_results,
gt_bboxes, gt_labels,
img_metas)
losses.update(bbox_results['loss_bbox'])
# mask head forward and loss
if self.with_mask:
mask_results = self._mask_forward_train(x, sampling_results,
bbox_results['bbox_feats'],
gt_masks, img_metas)
losses.update(mask_results['loss_mask'])
# update IoU threshold and SmoothL1 beta
update_iter_interval = self.train_cfg.dynamic_rcnn.update_iter_interval
if len(self.iou_history) % update_iter_interval == 0:
new_iou_thr, new_beta = self.update_hyperparameters()
return losses
def _bbox_forward_train(self, x, sampling_results, gt_bboxes, gt_labels,
img_metas):
num_imgs = len(img_metas)
rois = bbox2roi([res.bboxes for res in sampling_results])
bbox_results = self._bbox_forward(x, rois)
bbox_targets = self.bbox_head.get_targets(sampling_results, gt_bboxes,
gt_labels, self.train_cfg)
# record the `beta_topk`-th smallest target
# `bbox_targets[2]` and `bbox_targets[3]` stand for bbox_targets
# and bbox_weights, respectively
pos_inds = bbox_targets[3][:, 0].nonzero().squeeze(1)
num_pos = len(pos_inds)
cur_target = bbox_targets[2][pos_inds, :2].abs().mean(dim=1)
beta_topk = min(self.train_cfg.dynamic_rcnn.beta_topk * num_imgs,
num_pos)
cur_target = torch.kthvalue(cur_target, beta_topk)[0].item()
self.beta_history.append(cur_target)
loss_bbox = self.bbox_head.loss(bbox_results['cls_score'],
bbox_results['bbox_pred'], rois,
*bbox_targets)
bbox_results.update(loss_bbox=loss_bbox)
return bbox_results
def update_hyperparameters(self):
"""Update hyperparameters like IoU thresholds for assigner and beta for
SmoothL1 loss based on the training statistics.
Returns:
tuple[float]: the updated ``iou_thr`` and ``beta``.
"""
new_iou_thr = max(self.train_cfg.dynamic_rcnn.initial_iou,
np.mean(self.iou_history))
self.iou_history = []
self.bbox_assigner.pos_iou_thr = new_iou_thr
self.bbox_assigner.neg_iou_thr = new_iou_thr
self.bbox_assigner.min_pos_iou = new_iou_thr
if (np.median(self.beta_history) < EPS):
# avoid 0 or too small value for new_beta
new_beta = self.bbox_head.loss_bbox.beta
else:
new_beta = min(self.train_cfg.dynamic_rcnn.initial_beta,
np.median(self.beta_history))
self.beta_history = []
self.bbox_head.loss_bbox.beta = new_beta
return new_iou_thr, new_beta
|
[
"[email protected]"
] | |
11f06725aa8b271658da33356bbe96e61d525ff9
|
27e6a1ef07d8a52763b6e8f66460fa277a297232
|
/Python HW Link/output_config.py
|
e86517ac6c1bc5ed5ba848ddd261888d4db9598a
|
[] |
no_license
|
bnepethomas/bne-arduino-flight-simulator-interfaces
|
0354c02de1bc822db7f668880d2079aaaa2bfaae
|
ba784eea1bc920d13a9a6cb4ec484726fbf421ce
|
refs/heads/master
| 2023-08-31T14:09:03.964669 | 2023-08-20T06:16:05 | 2023-08-20T06:16:05 | 57,860,939 | 15 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 38 |
py
|
learning = True
AircraftType = 'A10C'
|
[
"[email protected]"
] | |
11836edf6c90ff6bf879c37b7be53abadf8674b5
|
27aa730834cb731f4cd9fdfd1e12a443fa717c5b
|
/blog/migrations/0001_initial.py
|
a85bc42edc973d4b7f946d37420cef3827089637
|
[] |
no_license
|
vishwakarmad1999/my-blog
|
1570b0d8c5ec47242ed5380cf0dd3dd59974f168
|
d8210c9624a7094ab7dc7bdec943b8feef67e957
|
refs/heads/master
| 2020-04-14T23:31:12.315102 | 2019-12-03T10:46:33 | 2019-12-03T10:46:33 | 164,205,103 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 932 |
py
|
# Generated by Django 2.1.4 on 2019-01-05 10:14
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=30)),
('text', models.TextField()),
('created_date', models.DateTimeField(auto_now_add=True)),
('published_date', models.DateTimeField(auto_now=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"[email protected]"
] | |
efc47731f179a6c98925d1f31746cfcda3f8de07
|
ada9440f62fd46b81233e9faa50f4d6f109c10b8
|
/Maths-Pro-NEA-TESTING/NEA Programming/Stage 1 Creating GUI/Registeration (Sqlite3).py
|
3188fe04317e88fa51da161e7ebb9ffc2888adfc
|
[] |
no_license
|
WolfAuto/Maths-Pro
|
a8df2e70196baf69fb134e2b5d23ad024027b3b6
|
2560633a1a6d38615345b9ee072a9063a8369f83
|
refs/heads/master
| 2020-04-15T13:56:29.265589 | 2020-02-03T14:37:13 | 2020-02-03T14:37:13 | 164,737,979 | 0 | 0 | null | 2020-02-03T14:38:35 | 2019-01-08T21:42:46 |
Python
|
UTF-8
|
Python
| false | false | 6,754 |
py
|
import sqlite3
import tkinter as tk
from tkinter import ttk
class MathsPro(tk.Tk): # Creating a class that inherits from tk.Tk
def __init__(self, *args, **kwargs): # intialises the object
tk.Tk.__init__(self, *args, **kwargs) # intialises the object as a tkinter frame
# tk.Tk.iconbitmap(self, default="")
tk.Tk.wm_title(self, "Maths Pro") # Sets the title of each page to be Maths Pro
container = tk.Frame(self) # defined a container for all the framesto be kept
# The containter is filled with a bunch of frames
container.pack(side="top", fill="both", expand=True)
container.grid_rowconfigure(0, weight=1)
# After the page being packed this allows it to be displayed correctly
container.grid_columnconfigure(0, weight=1)
self.frames = {} # Empty dictionary where all the frames are kept
for F in (Register, Register2): # contains all the pages being used #this will not work without pages
# Defines the frame from the for loop which contains all the pages
frame = F(container, self)
self.frames[F] = frame # Sets the top frame to be the current frame
# This allows the frame to be displayed and streched
frame.grid(row=0, column=0, sticky="nsew")
self.show_frame(Register) # sets the first frame to be shown is a register page
def show_frame(self, cont): # method that takes in cont as a controller
frame = self.frames[cont] # Defines the frame from the chosen frame in the dictionary
frame.tkraise() # Brings the frame to the top for the user to see
class Register(tk.Frame): # Creating a class that inheirts tk.Frame from tkinter
def __init__(self, parent, controller): # intialise the class register with self, args and kwargs
tk.Frame.__init__(self, parent) # intialise the frame with self and parent class
tk.Frame.config(self) # allows the frame to be styled i.e changing background colour
label = tk.Label(self, text="Registration 1", font=("Times New Roman", 50))
label.pack(pady=10, padx=10)
label_1 = tk.Label(self, text="First Name", width=20, font=("bold", 10))
label_1.place(x=80, y=130)
label_a = tk.Label(self, text="Surname", width=20, font=("bold", 10))
label_a.place(x=80, y=180)
label_b = tk.Label(self, text="Age", width=20, font=("bold", 10))
label_b.place(x=80, y=300)
label_c = tk.Label(self, text="Class", width=20, font=("bold", 10))
label_c.place(x=60, y=360)
school_class = tk.StringVar()
entry_c = tk.Entry(self, textvariable=school_class) # Class
entry_c.place(x=240, y=360)
age = tk.StringVar()
entry_b = tk.Entry(self, textvariable=age) # Age
entry_b.place(x=240, y=300)
surname = tk.StringVar()
entry_2 = tk.Entry(self, textvariable=surname) # Surname
entry_2.place(x=240, y=180)
firstname = tk.StringVar()
entry_1 = tk.Entry(self, textvariable=firstname) # First Name
entry_1.place(x=240, y=130)
label_2 = tk.Label(self, text="Gender", width=20, font=("bold", 10))
label_2.place(x=70, y=230)
var = tk.IntVar()
tk.Radiobutton(self, text="Male", padx=5, variable=var, value=1).place(x=205, y=230)
tk.Radiobutton(self, text="Female", padx=20, variable=var, value=2).place(x=290, y=230)
label_3 = tk.Label(self, text="School", width=20, font=("bold", 10))
label_3.place(x=70, y=420)
var1 = tk.IntVar()
tk.Radiobutton(self, text="Student", padx=5, variable=var1, value=1).place(x=205, y=420)
tk.Radiobutton(self, text="Teacher", padx=20, variable=var1, value=2).place(x=290, y=420)
button1 = tk.Button(self, text="Enter details",
command=lambda: controller.show_frame(Register2))
button1.place(x=470, y=470)
def datacheck():
First_name = firstname.get()
Surname = surname.get()
Age = int(age.get())
Class = school_class.get()
if var.get() == 1:
gender = "Male"
elif var.get() == 2:
gender = "Female"
if var1.get() == 1:
School = "Student"
elif var1.get() == 2:
School = "Teacher"
conn = sqlite3.connect("datacheck.db")
with conn:
cursor= conn.cursor()
cursor.execute("CREATE TABLE IF NOT EXISTS personal_details (userID INTEGER PRIMARY KEY NOT NULL, firstname VARCHAR(30) NOT NULL, surname VARCHAR(30) NOT NULL, age INTEGER NOT NULL, class VARCHAR (3) NOT NULL, gender VARCHAR (30) NOT NULL, school VARCHAR (30) NOT NULL)")
cursor.execute("INSERT INTO personal_details (firstname, surname, age , class, gender , school) VALUES (?, ?, ?, ?, ?, ?)", (First_name,Surname,Age,Class,gender,School))
conn.commit()
conn.close()
controller.show_frame(Register2)
buttoncheck = tk.Button(self, text="Check details", command=datacheck)
buttoncheck.place(x=600, y=600)
class Register2(tk.Frame):
def __init__(self, parent, controller):
tk.Frame.__init__(self, parent)
tk.Frame.config(self, bg="grey")
label = tk.Label(self, text="Register 2", font=("Times New Roman", 50), bg="grey")
label.pack(pady=10, padx=10)
label_0 = tk.Label(self, text="Username", font=("Times New Roman", 20), bg="grey")
label_0.pack()
username_entry = tk.StringVar()
entry_0 = tk.Entry(self, textvariable=username_entry)
entry_0.pack()
label_1 = tk.Label(self, text="Password", bg="grey")
label_1.pack()
password_entry = tk.StringVar()
entry_1 = tk.Entry(self, textvariable=password_entry, show="*")
entry_1.pack()
label_2 = tk.Label(self, text="Email", bg="grey")
label_2.pack()
email_entry = tk.StringVar()
entry_2 = tk.Entry(self, textvariable=email_entry)
button1 = ttk.Button(self, text="Back to home",
command=lambda: controller.show_frame(Register))
button1.pack()
def data_details():
username = username_entry.get()
password = password_entry.get()
email = email_entry.get()
button2 = ttk.Button(self, text="Create account", bg="grey")
button2.pack()
root = MathsPro() # this runs the Maths Pro class
root.geometry("1280x800") # changes the size of the window
root.mainloop() # As MathsPro inherited from tkinter this function can be moved
|
[
"[email protected]"
] | |
81e26930666b720bd531d102ba34160f29b59198
|
be5fa730da0aa1b45c748decf695c47ab536f806
|
/indicator.py
|
c87a3ebea72bcf523e0e09b65e527a1726950585
|
[] |
no_license
|
academo/gp-okta-linux
|
6e502a54d214c9d3630fa0dc79eb610d0fc0f117
|
63f236bab8f559cdfaeb06e16cd01aaa27be9304
|
refs/heads/master
| 2022-10-02T00:50:17.991981 | 2020-06-05T15:21:45 | 2020-06-05T15:21:45 | 269,683,241 | 2 | 0 | null | 2020-06-05T15:24:55 | 2020-06-05T15:24:54 | null |
UTF-8
|
Python
| false | false | 4,884 |
py
|
#!/usr/bin/env python3
"""gp-okta-indicator.py: A GlobalProtect VPN with Okta 2FA indicator applet"""
__author__ = "Jeff Channell"
__copyright__ = "Copyright 2019, Jeff Channell"
__credits__ = ["Jeff Channell"]
__license__ = "GPL"
__version__ = "0.0.1"
__maintainer__ = "Jeff Channell"
__email__ = "[email protected]"
__status__ = "Prototype"
import gi
import os
import shlex
import signal
import subprocess
import sys
gi.require_version("Gtk", "3.0")
gi.require_version("AppIndicator3", "0.1")
from gi.repository import Gtk
from gi.repository import AppIndicator3 as appindicator
from gi.repository import GLib
class GpOktaLinuxIndicator:
def __init__(self):
# read configuration
command = shlex.split("env -i bash -c 'source /etc/gp-okta.conf && env'")
proc = subprocess.Popen(command, stdout = subprocess.PIPE)
for line in proc.stdout:
(key, _, value) = line.decode('utf-8').partition("=")
os.environ[key.strip()] = value.strip()
proc.communicate()
self.vpn = os.environ.get("VPN_SERVER", "")
if "" == self.vpn:
print("VPN_SERVER not found in environment. Exiting.", file=sys.stderr)
sys.exit(1)
self.about = None
self.indicator = appindicator.Indicator.new(
"GpOktaLinuxIndicator",
os.path.abspath("icons/disconnected.svg"),
appindicator.IndicatorCategory.HARDWARE
)
self.indicator.set_status(appindicator.IndicatorStatus.ACTIVE)
self.menu = Gtk.Menu()
# add connection toggle menu item
self.toggle = Gtk.MenuItem()
self.toggle.set_label("Connect to %s" % self.vpn)
self.toggle.connect("activate", self.toggle_connection)
self.menu.append(self.toggle)
# sep
item = Gtk.SeparatorMenuItem()
self.menu.append(item)
# about me
item = Gtk.MenuItem()
item.set_label("About")
item.connect("activate", self.show_about)
self.menu.append(item)
# add a quit menu item
item = Gtk.MenuItem()
item.set_label("Quit")
item.connect("activate", self.quit)
self.menu.append(item)
# set the menu
self.menu.show_all()
self.indicator.set_menu(self.menu)
def add_about_window_contents(self):
text = Gtk.Label()
text.set_markup(
"<b>GlobalProtect with Okta 2FA Indicator</b>\n\n{}\n\n"
"A GlobalProtect VPN with Okta 2FA indicator applet\n\n"
"<a href=\"https://github.com/jeffchannell/gp-okta-linux\">"
"https://github.com/jeffchannell/gp-okta-linux</a>\n\n"
"<small>"
"© 2019 Jeff Channell\n\n"
"This program comes with absolutely no warranty.\n"
"See the GNU General Public License, version 3 or later for details."
"</small>".format(__version__)
)
text.set_line_wrap(True)
text.set_justify(Gtk.Justification.CENTER)
self.about.add(text)
def destroy_about(self, widget, something):
self.about = None
return False
def is_running(self):
return os.path.isfile("/var/run/gp-okta.pid")
def main(self):
self.run_loop()
Gtk.main()
def quit(self, widget):
subprocess.call([os.path.abspath("stop.sh")])
Gtk.main_quit()
def run_loop(self):
if self.is_running():
icon = "connected"
icon_desc = "Connected to %s"
label = "Disconnect from %s"
else:
icon = "disconnected"
icon_desc = "Disconnected from %s"
label = "Connect to %s"
self.indicator.set_icon_full(os.path.abspath("icons/%s.svg" % icon), icon_desc % self.vpn)
self.toggle.set_label(label % self.vpn)
GLib.timeout_add_seconds(1, self.run_loop)
def show_about(self, widget):
if None == self.about:
self.about = Gtk.Window()
self.about.set_title("About GpOktaLinuxIndicator")
self.about.set_keep_above(True)
self.about.connect("delete-event", self.destroy_about)
self.add_about_window_contents()
self.about.set_position(Gtk.WindowPosition.CENTER)
self.about.set_size_request(400, 200)
self.about.show_all()
def toggle_connection(self, widget):
if self.is_running():
script = "stop.sh"
else:
script = "start.sh"
subprocess.Popen([os.path.abspath(script)])
def main():
# allow app to be killed using ctrl+c
signal.signal(signal.SIGINT, signal.SIG_DFL)
indicator = GpOktaLinuxIndicator()
indicator.main()
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
3981a101ac67bb48c3abaf31d8209d0d40274ca4
|
29a7b7cf040366358beeb8e7116791d0aa31e99a
|
/indexer.py
|
17f1a0af0d9ef492c70518a85549036b0cdaf5f8
|
[] |
no_license
|
gale2307/Jarvis
|
acf82cd3062fe6fb813d5fb23808392d4021cbd8
|
1a43d865cd1ac0180377e0088d55a631d69ad3cf
|
refs/heads/master
| 2023-03-23T23:17:42.279377 | 2021-03-22T04:43:11 | 2021-03-22T04:43:11 | 330,080,718 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,499 |
py
|
import os.path
import whoosh.index as index
from whoosh.analysis import StemmingAnalyzer
from whoosh.fields import *
#List of stopwords for the analyzer applied to page content
stops = frozenset(['and', 'is', 'it', 'an', 'as', 'at', 'have', 'in', 'yet', 'if', 'from', 'for', 'when', 'by', 'to', 'you', 'be', 'we', 'that', 'may',
'not', 'with', 'tbd', 'a', 'on', 'your', 'this', 'of', 'us', 'will', 'can', 'the', 'or', 'are', 'what', 'how', 'why'])
#Creates index schema; only the title is stored, and receives increased weight
schema = Schema(fullTitle=ID(stored=True), content=TEXT(analyzer=StemmingAnalyzer(stoplist=stops)), title=TEXT(analyzer=StemmingAnalyzer(stoplist=stops),field_boost=3.0))
#Creates and opens index object
if not os.path.exists("index"):
os.mkdir("index")
ix = index.create_in("index", schema)
#Creates writer
writer = ix.writer()
#Opens corpus directory and writes it all to index
for subdir, dirs, files in os.walk(r'MinecraftWiki/'):
for filename in files:
filepath = subdir + os.sep + filename
short_title = filename[30:].rstrip(".txt").replace("_", " ")
print("Processing: {} (full title: {})\n".format(short_title, filename))
with open(filepath, 'r', encoding="utf-8") as f:
f_str = f.read()
writer.add_document(fullTitle=u"{}".format(filename), content=u"{}".format(f_str), title=u"{}".format(short_title))
f.close()
writer.commit()
|
[
"[email protected]"
] | |
a74660e8b225a828ac81ad3fe78a75e878478a32
|
ddf5332b1f2e5f485e2ee4b8307ecf1766e3ac54
|
/productivityTools/asgi.py
|
deb35cb4ef3c151c0510cd5b7d81d328293c9d50
|
[] |
no_license
|
Muzammiluddin-Syed-ECE/productivityTools
|
3743965b3ff2187f4fcc9667aa0c63b376ec9c41
|
8f52c058f46721e6b6cb01123b4729fc61c0526d
|
refs/heads/master
| 2022-12-01T11:50:36.995017 | 2020-08-18T22:14:13 | 2020-08-18T22:14:13 | 288,567,987 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 411 |
py
|
"""
ASGI config for productivityTools project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'productivityTools.settings')
application = get_asgi_application()
|
[
"[email protected]"
] | |
5ac388b69a2ab3c163d4dd86e79293977f264fc7
|
aa1e637de90f69f9ae742d42d5b777421617d10c
|
/nitro/resource/config/cs/csvserver_cspolicy_binding.py
|
e6035fde5b4e240edfc80048627a63609ec4ab92
|
[
"Apache-2.0",
"Python-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
km0420j/nitro-python
|
db7fcb49fcad3e7a1ae0a99e4fc8675665da29ba
|
d03eb11f492a35a2a8b2a140322fbce22d25a8f7
|
refs/heads/master
| 2021-10-21T18:12:50.218465 | 2019-03-05T14:00:15 | 2019-03-05T15:35:43 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 13,684 |
py
|
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nitro.resource.base.base_resource import base_resource
from nitro.resource.base.base_resource import base_response
from nitro.service.options import options
from nitro.exception.nitro_exception import nitro_exception
from nitro.util.nitro_util import nitro_util
class csvserver_cspolicy_binding(base_resource) :
"""Binding class showing the cspolicy that can be bound to csvserver."""
def __init__(self) :
self._policyname = ""
self._targetlbvserver = ""
self._priority = 0
self._gotopriorityexpression = ""
self._bindpoint = ""
self._invoke = False
self._labeltype = ""
self._labelname = ""
self._hits = 0
self._pipolicyhits = 0
self._rule = ""
self._name = ""
self.___count = 0
@property
def priority(self) :
"""Priority for the policy."""
try :
return self._priority
except Exception as e:
raise e
@priority.setter
def priority(self, priority) :
"""Priority for the policy.
:param priority:
"""
try :
self._priority = priority
except Exception as e:
raise e
@property
def bindpoint(self) :
"""The bindpoint to which the policy is bound.<br/>Possible values = REQUEST, RESPONSE."""
try :
return self._bindpoint
except Exception as e:
raise e
@bindpoint.setter
def bindpoint(self, bindpoint) :
"""The bindpoint to which the policy is bound.<br/>Possible values = REQUEST, RESPONSE
:param bindpoint:
"""
try :
self._bindpoint = bindpoint
except Exception as e:
raise e
@property
def policyname(self) :
"""Policies bound to this vserver."""
try :
return self._policyname
except Exception as e:
raise e
@policyname.setter
def policyname(self, policyname) :
"""Policies bound to this vserver.
:param policyname:
"""
try :
self._policyname = policyname
except Exception as e:
raise e
@property
def labelname(self) :
"""Name of the label invoked."""
try :
return self._labelname
except Exception as e:
raise e
@labelname.setter
def labelname(self, labelname) :
"""Name of the label invoked.
:param labelname:
"""
try :
self._labelname = labelname
except Exception as e:
raise e
@property
def name(self) :
"""Name of the content switching virtual server to which the content switching policy applies.<br/>Minimum length = 1."""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
"""Name of the content switching virtual server to which the content switching policy applies.<br/>Minimum length = 1
:param name:
"""
try :
self._name = name
except Exception as e:
raise e
@property
def gotopriorityexpression(self) :
"""Expression specifying the priority of the next policy which will get evaluated if the current policy rule evaluates to TRUE."""
try :
return self._gotopriorityexpression
except Exception as e:
raise e
@gotopriorityexpression.setter
def gotopriorityexpression(self, gotopriorityexpression) :
"""Expression specifying the priority of the next policy which will get evaluated if the current policy rule evaluates to TRUE.
:param gotopriorityexpression:
"""
try :
self._gotopriorityexpression = gotopriorityexpression
except Exception as e:
raise e
@property
def targetlbvserver(self) :
"""target vserver name."""
try :
return self._targetlbvserver
except Exception as e:
raise e
@targetlbvserver.setter
def targetlbvserver(self, targetlbvserver) :
"""target vserver name.
:param targetlbvserver:
"""
try :
self._targetlbvserver = targetlbvserver
except Exception as e:
raise e
@property
def invoke(self) :
"""Invoke flag."""
try :
return self._invoke
except Exception as e:
raise e
@invoke.setter
def invoke(self, invoke) :
"""Invoke flag.
:param invoke:
"""
try :
self._invoke = invoke
except Exception as e:
raise e
@property
def labeltype(self) :
"""The invocation type.<br/>Possible values = reqvserver, resvserver, policylabel."""
try :
return self._labeltype
except Exception as e:
raise e
@labeltype.setter
def labeltype(self, labeltype) :
"""The invocation type.<br/>Possible values = reqvserver, resvserver, policylabel
:param labeltype:
"""
try :
self._labeltype = labeltype
except Exception as e:
raise e
@property
def rule(self) :
"""Rule."""
try :
return self._rule
except Exception as e:
raise e
@property
def hits(self) :
"""Number of hits."""
try :
return self._hits
except Exception as e:
raise e
@property
def pipolicyhits(self) :
"""Number of hits."""
try :
return self._pipolicyhits
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
"""converts nitro response into object and returns the object array in case of get request.
:param service:
:param response:
"""
try :
result = service.payload_formatter.string_to_resource(csvserver_cspolicy_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.csvserver_cspolicy_binding
except Exception as e :
raise e
def _get_object_name(self) :
"""Returns the value of object identifier argument"""
try :
if self.name is not None :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
"""
:param client:
:param resource:
"""
try :
if resource and type(resource) is not list :
updateresource = csvserver_cspolicy_binding()
updateresource.name = resource.name
updateresource.policyname = resource.policyname
updateresource.targetlbvserver = resource.targetlbvserver
updateresource.priority = resource.priority
updateresource.gotopriorityexpression = resource.gotopriorityexpression
updateresource.bindpoint = resource.bindpoint
updateresource.invoke = resource.invoke
updateresource.labeltype = resource.labeltype
updateresource.labelname = resource.labelname
return updateresource.update_resource(client)
else :
if resource and len(resource) > 0 :
updateresources = [csvserver_cspolicy_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].name = resource[i].name
updateresources[i].policyname = resource[i].policyname
updateresources[i].targetlbvserver = resource[i].targetlbvserver
updateresources[i].priority = resource[i].priority
updateresources[i].gotopriorityexpression = resource[i].gotopriorityexpression
updateresources[i].bindpoint = resource[i].bindpoint
updateresources[i].invoke = resource[i].invoke
updateresources[i].labeltype = resource[i].labeltype
updateresources[i].labelname = resource[i].labelname
return cls.update_bulk_request(client, updateresources)
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
"""
:param client:
:param resource:
"""
try :
if resource and type(resource) is not list :
deleteresource = csvserver_cspolicy_binding()
deleteresource.name = resource.name
deleteresource.policyname = resource.policyname
deleteresource.bindpoint = resource.bindpoint
deleteresource.priority = resource.priority
return deleteresource.delete_resource(client)
else :
if resource and len(resource) > 0 :
deleteresources = [csvserver_cspolicy_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i].name
deleteresources[i].policyname = resource[i].policyname
deleteresources[i].bindpoint = resource[i].bindpoint
deleteresources[i].priority = resource[i].priority
return cls.delete_bulk_request(client, deleteresources)
except Exception as e :
raise e
@classmethod
def get(cls, service, name) :
"""Use this API to fetch csvserver_cspolicy_binding resources.
:param service:
:param name:
"""
try :
obj = csvserver_cspolicy_binding()
obj.name = name
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, name, filter_) :
"""Use this API to fetch filtered set of csvserver_cspolicy_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
:param service:
:param name:
:param filter_:
"""
try :
obj = csvserver_cspolicy_binding()
obj.name = name
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service, name) :
"""Use this API to count csvserver_cspolicy_binding resources configued on NetScaler.
:param service:
:param name:
"""
try :
obj = csvserver_cspolicy_binding()
obj.name = name
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, name, filter_) :
"""Use this API to count the filtered set of csvserver_cspolicy_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
:param service:
:param name:
:param filter_:
"""
try :
obj = csvserver_cspolicy_binding()
obj.name = name
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class Bindpoint:
""" """
REQUEST = "REQUEST"
RESPONSE = "RESPONSE"
class Labeltype:
""" """
reqvserver = "reqvserver"
resvserver = "resvserver"
policylabel = "policylabel"
class csvserver_cspolicy_binding_response(base_response) :
""" """
def __init__(self, length=1) :
self.csvserver_cspolicy_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.csvserver_cspolicy_binding = [csvserver_cspolicy_binding() for _ in range(length)]
|
[
"[email protected]"
] | |
db07bde569060c63017b9cc87cbbd1dba54730f0
|
63bfbc2cdfcf45c868401b7e83d6065ffe6a14f9
|
/math2/2581_find_prime_number2.py
|
2ccf956b7c067841e703ec204f13ea1e7bf5ad9b
|
[] |
no_license
|
minseop-kim/Beakjoon
|
c1011a712ae00655a5380140ff051e8c8d092377
|
07ccfb73bf9c07feb2fb35a64f9126082d5d22fa
|
refs/heads/master
| 2020-09-03T09:12:02.965834 | 2019-11-18T10:28:59 | 2019-11-18T10:28:59 | 219,433,092 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 469 |
py
|
m = int(input())
n = int(input())
sosu = [0 for i in range(n-m+1)]
def check_sosu(n):
if n == 1:
return 0
else:
for i in range(2, n):
if n % i == 0:
return 0
return 1
for i in range(len(sosu)):
sosu[i] = check_sosu(i+m)
if 1 not in sosu:
print('-1')
else:
sum = 0
for i in range(len(sosu)):
if sosu[i] == 1:
sum += (i+m)
print(sum)
print(sosu.index(1)+m,end='')
|
[
"[email protected]"
] | |
fef62cf65b04092023dfb0175155eaecc766cf9d
|
a8e3abd78187690a757a3387059adae4566e3a5e
|
/day02/solution.py
|
f135efd5ea0f78de4749439872eb8e3e8072bdb0
|
[] |
no_license
|
beatrizwang/advent_of_code_2020
|
f149305438d2113b13cc0a9dc3ef59a90e10143a
|
8b019bfd901a069a2e0dadf6109e109ffb96f37b
|
refs/heads/master
| 2023-02-12T10:31:24.674013 | 2021-01-09T22:07:52 | 2021-01-09T22:07:52 | 326,070,948 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,488 |
py
|
import re
import pprint
def get_lines():
with open('day02\input.txt', 'r') as input_file:
lines = [line.rstrip().split(':') for line in input_file]
return lines
def count_valid_passwords_by_letter_count(lines):
count_valid_pass = 0;
for line in lines:
required_letter = line[0][-1]
min_rep, max_rep = re.findall('\d+', line[0])
password = line[1]
count_required_letters = password.count(required_letter)
if count_required_letters <= int(max_rep) and count_required_letters >= int(min_rep):
count_valid_pass += 1
return count_valid_pass
def count_valid_passwords_by_letter_position(lines):
count_valid_pass = 0;
for line in lines:
required_letter = line[0][-1]
first_position, second_position = re.findall('\d+',line[0])
password = line[1]
first_letter = password.strip()[int(first_position) - 1]
second_letter = password.strip()[int(second_position) - 1]
if ((first_letter == required_letter and second_letter != required_letter) or
(first_letter != required_letter and second_letter == required_letter)):
count_valid_pass += 1
return count_valid_pass
def main():
lines = get_lines()
print('Solution to first puzzle: ' + str(count_valid_passwords_by_letter_count(lines)))
print('Solution to second puzzle: ' + str(count_valid_passwords_by_letter_position(lines)))
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
83bd0237bba7855fe2a35fb36e2c4190f014c5f1
|
5a6716ac1c7c5745b25781c7dbfb192e593c5ae4
|
/client/staas/extend_exist_storage.py
|
6433d6918d687f27e458c29302843e2fe97f8b60
|
[] |
no_license
|
tej1996/molecularC
|
9eecfe3a48ffa5e2803117cafbab958fe366980b
|
17d4c305de7bd642c0e1ee388f0597408fbdd212
|
refs/heads/master
| 2020-01-19T21:08:40.800301 | 2017-06-23T10:00:03 | 2017-06-23T10:00:03 | 94,209,611 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 745 |
py
|
#!/usr/bin/python
import getpass, socket,time, sys,os,commands
sock= socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
SERVER_IP="192.168.122.152"
SERVER_PORT=8000
drive_name=raw_input("Enter the drive name: ")
drive_size=raw_input("Enter the drive size to be extended(in MB : eg. 1000,500, any size):")
sock.sendto(drive_name,(SERVER_IP,SERVER_PORT))
sock.sendto(drive_size,(SERVER_IP,SERVER_PORT))
while True:
response = sock.recvfrom(15)
result = response[0]
if result == "success" or result =="error":
break
elif result == "insufficient":
print "Sorry, unable to extend due to insufficient storage!"
break
if result == "success" :
print "Drive named "+drive_name+" is resized!"
else :
print "Error in resizing drive!"
|
[
"[email protected]"
] | |
fae8926483b5d96a39e1fe038a84851bf29c2e75
|
b3e3c2437f44f234ca5a00826557ba637d50c55a
|
/mascota/views.py
|
548201e8af69bd8495e53922d84512ff79627648
|
[] |
no_license
|
fedebelve/mascotas
|
92dc343ad65348ddaa75dc69a67a02002515e99a
|
a76458d0acce82ea49e5314f0f1531ea3929df8f
|
refs/heads/master
| 2023-09-01T16:29:22.411608 | 2020-05-10T21:51:14 | 2020-05-10T21:51:14 | 262,845,470 | 0 | 0 | null | 2021-09-22T18:59:43 | 2020-05-10T18:01:02 |
Python
|
UTF-8
|
Python
| false | false | 2,223 |
py
|
from django.shortcuts import render, redirect, get_object_or_404
from django.http import HttpResponse
# Create your views here.
from django.urls import reverse_lazy
from django.views.generic import ListView, CreateView, UpdateView, DeleteView
from mascota.forms import MascotaForm
from mascota.models import Mascota
# def mascota_add(request):
# if request.method == 'POST':
# form = MascotaForm(request.POST)
# if form.is_valid():
# form.save()
# return redirect('mascota:mascota_listar')
#
# else:
# form = MascotaForm()
# return render(request, 'mascota/mascotaForm.html', {'form': form})
#
#
# def mascota_list(request):
# mascotas = Mascota.objects.all()
# contexto = {'mascotas': mascotas}
#
# return render(request, 'mascota/mascotaList.html', contexto)
#
#
# def mascota_edit(request, codigo_mascota):
# mascota = Mascota.objects.get(codigo=codigo_mascota)
# if request.method == 'GET':
# form = MascotaForm(instance=mascota)
# else:
# form = MascotaForm(request.POST, instance=mascota)
# if form.is_valid():
# form.save()
# return redirect('mascota:mascota_listar')
#
# return render(request, 'mascota/mascotaForm.html', {'form': form})
#
#
# def mascota_delete(request, codigo_mascota):
# mascota = Mascota.objects.get(codigo=codigo_mascota)
# if request.method == 'POST':
# mascota.delete()
# return redirect('mascota:mascota_listar')
#
# return render(request, 'mascota/mascotaDelete.html', {'mascota': mascota})
class MascotaList(ListView):
model = Mascota
template_name = 'mascota/mascotaList.html'
class MascotaCreate(CreateView):
model = Mascota
form_class = MascotaForm
template_name = 'mascota/mascotaForm.html'
success_url = reverse_lazy('mascota:mascota_listar')
class MascotaUpdate(UpdateView):
model = Mascota
form_class = MascotaForm
template_name = 'mascota/mascotaForm.html'
success_url = reverse_lazy('mascota:mascota_listar')
class MascotaDelete(DeleteView):
model = Mascota
template_name = 'mascota/mascotaDelete.html'
success_url = reverse_lazy('mascota:mascota_listar')
|
[
"[email protected]"
] | |
64e5aaded7f8c4ba2ef53e2eec133197072029a0
|
280ae2419f935e1d30866fae0efb7d15005c8bf7
|
/LIKAN_HEATMAP_52BESTUN/Actual/Create_realData_Actual.py
|
29e24ae893fd8ebe8afbaa3681331335ab7cc875
|
[] |
no_license
|
johanngunnar/ATVR
|
2a25cc8c8a34bee11737f9f5633bc01142c86713
|
6c82b6c8bfbbef8ac5d4eda8b5230e689b930ba9
|
refs/heads/master
| 2020-04-23T21:33:38.667824 | 2019-05-15T10:47:20 | 2019-05-15T10:47:20 | 171,473,971 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,383 |
py
|
import psycopg2
from datetime import datetime
from Functions.Select_function import Select_string
from Functions.Write_Vendor_data import Write_vendor_data
from Functions.Write_sendingarVendorar_data import Write_sendingar_data
#Connection to SQL
host = 'localhost'
dbname = 'atvr2'
username = 'postgres'
pw = 'postgres'
conn_string = "host='{}' dbname='{}' user='{}' password='{}'"
try:
conn = psycopg2.connect(conn_string.format(host, dbname, username, pw))
except psycopg2.OperationalError as e:
print('Connection failed')
print('Error: ', e)
exit()
cursor = conn.cursor()
#----------------------------------------------------------------------------
# Write the select Q
#----------------------------------------------------------------------------
for i in range(1,52):
selectstring = Select_string(i)
cursor.execute(selectstring)
arr = cursor.fetchall()
fjoldiSendinga = 0
for x in arr:
fjoldiSendinga = fjoldiSendinga + 1
#----------------------------------------------------------------------------
# Create data file
#----------------------------------------------------------------------------
#DETERMINE VALUES
Days = 5
Timeslots = 8
Sendingar = fjoldiSendinga -1
windowsize = 0
demo_data_real = "demo_data_real" + str(i) + ".txt"
#START WRITING THE FILE
f= open(demo_data_real,"w+")
f.write("param S := {};\r\n".format(Sendingar))
f.write("param T := {};\r\n".format(Timeslots))
f.write("param D := {};\r\n".format(Days))
f.write("param windowsize := {};\r\n".format(windowsize))
f.write("\r\n")
#----------------------------------------------------------------------
# VENDORAR & TIMESLOTT
#----------------------------------------------------------------------
'''
vendor = 'O'
Write_vendor_data(1,vendor,Timeslots,f)
vendor = 'C'
Write_vendor_data(2,vendor,Timeslots,f)
vendor = 'G'
Write_vendor_data(3,vendor,Timeslots,f)
vendor = 'V'
Write_vendor_data(3,vendor,Timeslots,f)
vendor = 'BR'
Write_vendor_data(4,vendor,Timeslots,f)
vendor = 'DI'
Write_vendor_data(4,vendor,Timeslots,f)
vendor = 'BA'
Write_vendor_data(5,vendor,Timeslots,f)
vendor = 'M'
Write_vendor_data(6,vendor,Timeslots,f)
#EIMSKIP OG SAMSKIP
vendor = 'EIM'
Write_vendor_data(7,vendor,Timeslots,f)
vendor = 'SAM'
Write_vendor_data(8,vendor,Timeslots,f)
'''
#----------------------------------------------------------------------
# ALAG & TARGET
#----------------------------------------------------------------------
#Write the ALAG & CREATE the sequence i
f.write("param A := \r\n")
for i in range(1,Sendingar+1):
f.write("{} {}\r\n".format(i,round(arr[i][4]*arr[i][3]))) #timevalue * Qty
f.write(";\r\n")
f.write("\r\n")
#Write the TARGET
f.write("param Ttarget := \r\n")
for i in range(1,Days+1):
for x in range(1,Timeslots+1):
if x in (1,2,3,4):
f.write("{} {} {}\r\n".format(x,i,1000))
if x in (5,6,7,8):
f.write("{} {} {}\r\n".format(x,i,500))
f.write(";\r\n")
f.write("\r\n")
#----------------------------------------------------------------------
#CREATE OF SENDINGAR FOR EACH VENDOR
#----------------------------------------------------------------------
'''
all_sendingar = []
rest_sendingar = []
Ol_kennitolur = ['420369-7789']
vendorname = 'Olgerdin'
Write_sendingar_data(arr,Ol_kennitolur,vendorname,Sendingar,f,all_sendingar)
Cola_kennitolur = ['470169-1419']
vendorname = 'Cola'
Write_sendingar_data(arr,Cola_kennitolur,vendorname,Sendingar,f,all_sendingar)
G_kennitolur = ['570169-0339']
vendorname = 'Globus'
Write_sendingar_data(arr,G_kennitolur,vendorname,Sendingar,f,all_sendingar)
V_kennitolur = ['700103-3660']
vendorname = 'Vintrio'
Write_sendingar_data(arr,V_kennitolur,vendorname,Sendingar,f,all_sendingar)
BR_kennitolur = ['541205-1520']
vendorname = 'Brugghusstedja'
Write_sendingar_data(arr,BR_kennitolur,vendorname,Sendingar,f,all_sendingar)
DI_kennitolur = ['410999-2859']
vendorname = 'Dista'
Write_sendingar_data(arr,DI_kennitolur,vendorname,Sendingar,f,all_sendingar)
BA_kennitolur = ['530303-2410']
vendorname = 'Bakkus'
Write_sendingar_data(arr,BA_kennitolur,vendorname,Sendingar,f,all_sendingar)
M_kennitolur = ['550595-2579']
vendorname = 'Mekka'
Write_sendingar_data(arr,M_kennitolur,vendorname,Sendingar,f,all_sendingar)
#EIMSKIP OG SAMSKIP
EIM_kennitolur = ["601083-0789","470105-2240","490104-2160","450310-0500"
,"491007-1680","511105-1290","601289-1489","470302-4290",
"640485-0949","420178-0349","531212-0530","451205-0560",
"470706-1040","451295-2929","530206-0330","620509-0190","470205-0400"]
vendorname = 'Eimskip'
Write_sendingar_data(arr,EIM_kennitolur,vendorname,Sendingar,f,all_sendingar)
#Ekki endilega réttar kennitölur...
SAM_kennitolur = ['550394-2359','470415-1260','660509-0970','470710-0390','670616-1690',
'460999-2519','501117-0210','520914-2180','500316-0470','490211-0630','681215-1740','430913-0690'
,'660169-1729','600112-1390','590515-3290','550609-1420','471289-2569','650387-1399','560113-0480','560793-2199',
'550405-0400','571214-0240','451115-1460','510515-1020','440417-0510']
vendorname = 'Samskip'
Write_sendingar_data(arr,SAM_kennitolur,vendorname,Sendingar,f,all_sendingar)
#test
print(all_sendingar)
for i in range(1,Sendingar):
if i not in all_sendingar:
rest_sendingar.append(i)
print(rest_sendingar)
'''
#----------------------------------------------------------------------
#Bannlisti & Fixlisti
#----------------------------------------------------------------------
f.write("set Bannlisti := \r\n")
f.write(";\r\n")
#s,t,d
#print(arr)
Start_day = int(arr[0][2][:2])
f.write("set Fixlisti := \r\n")
for i in range(1,Sendingar+1):
day = int(arr[i][2][:2].strip())-Start_day+1
slot_vendor_data = []
if arr[i][6].strip() == '420369-7789':
slot = 1
elif arr[i][6].strip() == '470169-1419':
slot = 2
elif arr[i][6].strip() == '570169-0339':
slot = 3
elif arr[i][6].strip() == '700103-3660':
slot = 3
elif arr[i][6].strip() == '541205-1520':
slot = 4
elif arr[i][6].strip() == '410999-2859':
slot = 4
elif arr[i][6].strip() == '530303-2410':
slot = 5
elif arr[i][6].strip() == '550595-2579':
slot = 6
elif arr[i][6].strip() in ["601083-0789","470105-2240","490104-2160","450310-0500","491007-1680","511105-1290","601289-1489","470302-4290","640485-0949","420178-0349","531212-0530","451205-0560","470706-1040","451295-2929","530206-0330","620509-0190","470205-0400"]:
slot = 7
else:
slot = 8
f.write("{} {} {} \r\n".format(i,slot,day))
f.write(";\r\n")
f.write("end;\r\n")
#counter = 0
#newArr = [2]
#dags = []
#arr = list(dict.fromkeys(arr))
#for i in range(1, len(arr)):
#print('Printing each date only the first number: ', int(arr[counter][2][0:2]))
#if(arr[counter][2][3:4] == '02'):
#if((int(arr[counter+1][2][0:2]) !> int(arr[coun))
#elif(arr[counter][2][3:4] == '04' or arr[counter][2][3:4] == '06' or arr[counter][2][3:4] == '09' or arr[counter][2][3:4] == '11'):
#else
#if(int(arr[counter+1][2][0:2]) !> int(arr[counter][2][0:2])+1):
#newArr.append(arr[counter][2][0:2])
#dags.append(arr[counter][2])
#counter = counter + 1
#print(arr)
#dags2 = (set(dags))
#dags3 = sorted(dags2, key=lambda x: datetime.strptime(x, "%d/%m/%Y").strftime("%Y-%m-%d"))
print(arr)
print(selectstring)
|
[
"[email protected]"
] | |
c77a6b9e8035fcce367ab1e8750ee354760ca8c4
|
4a3df93a88f231cc5aa75cdcee1c8a2f07e95f56
|
/src/stubtool/types/pkg_resources.py
|
2529587e3376e1975af22de979813251d5d3671f
|
[] |
no_license
|
moreati/stubtool
|
4e5d1f9560abd58764a490e30ae1dddf4c50bff9
|
549abe25c13d9c7a05354055444f9be5277aa9e1
|
refs/heads/master
| 2021-01-14T12:35:47.647415 | 2015-11-15T00:36:52 | 2015-11-15T00:36:52 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 249 |
py
|
from __future__ import absolute_import
import pkg_resources._vendor.packaging._structures
Infinity = type(pkg_resources._vendor.packaging._structures.Infinity)
NegativeInfinity = type(pkg_resources._vendor.packaging._structures.NegativeInfinity)
|
[
"[email protected]"
] | |
57edf19b3571ac2f3999d957dd0aae9ec33deb9c
|
1ae0d8eb0cd5db0bd886b40912b910bbba908b70
|
/swap1,py
|
90718d99c64d33935f9a3da82974c8e7ce54bef3
|
[] |
no_license
|
keerthanachinna/beginnerset3
|
fae07713eac49f121e103b118e495019bf4102af
|
1cc4fe785d35035a184f128865019acb17ccf4e5
|
refs/heads/master
| 2020-03-21T09:55:59.035254 | 2018-08-08T15:56:26 | 2018-08-08T15:56:26 | 138,424,981 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 146 |
x = 150
y = 160
temp = x
x = y
y = temp
print('The value of x after swapping: {}'.format(x))
print('The value of y after swapping: {}'.format(y))
|
[
"[email protected]"
] | ||
183d371c867e94d1a9c47d759689a27dc9d9a6f1
|
1253ece44b8b59bab091ab785df590fbec54f57a
|
/f0004_model.py
|
3dd82ec4c183574ad8d417558010b2db765fc615
|
[] |
no_license
|
edyadan/calidad
|
10ab1cf56ef098197e324d5925355a7464227622
|
e8705fc47b4519c6fcd15cece1985af9f531dfab
|
refs/heads/master
| 2022-07-12T21:54:25.754478 | 2019-08-04T20:26:53 | 2019-08-04T20:26:53 | 200,537,556 | 1 | 0 | null | 2022-06-21T22:27:56 | 2019-08-04T20:17:24 |
Python
|
UTF-8
|
Python
| false | false | 2,626 |
py
|
from aplication import db
from wtforms import Form, StringField, SelectField, validators, IntegerField
from sqlalchemy import Table, Column, Integer, ForeignKey
from sqlalchemy.orm import relationship
from sqlalchemy import UniqueConstraint, exc
class f0004(db.Model):
__tablename__ = "f0004"
__table_args__ = {'extend_existing': True}
id = db.Column(db.Integer, primary_key=True)
area = db.Column(db.String, nullable=False)
subarea = db.Column(db.String, nullable=False)
ubicacion = db.Column(db.String, nullable=False)
elemento = db.Column(db.String, nullable=False)
fecha = db.Column(db.String, nullable=False)
plano = db.Column(db.String, nullable=False)
r1 = db.Column(db.String, nullable=False)
r2 = db.Column(db.String, nullable=False)
r3 = db.Column(db.String, nullable=False)
r4 = db.Column(db.String, nullable=False)
r5 = db.Column(db.String, nullable=False)
r6 = db.Column(db.String, nullable=False)
r7 = db.Column(db.String, nullable=False)
observacion = db.Column(db.String, nullable=False)
class f0004_form(Form):
id = IntegerField('id')
area = StringField('area')
subarea = StringField('subarea')
ubicacion = StringField('ubicacion')
elemento = StringField('elemento')
fecha = StringField('fecha')
plano = StringField('plano')
r1 = StringField('r1')
r2 = StringField('r2')
r3 = StringField('r3')
r4 = StringField('r4')
r5 = StringField('r5')
r6 = StringField('r6')
r7 = StringField('r7')
observacion = StringField('observacion')
def f0004_convert(f0004, form):
f0004.id = form.id.data
f0004.area = form.area.data
f0004.subarea = form.subarea.data
f0004.ubicacion = form.ubicacion.data
f0004.elemento = form.elemento.data
f0004.fecha = form.fecha.data
f0004.plano = form.plano.data
f0004.r1 = form.r1.data
f0004.r2 = form.r2.data
f0004.r3 = form.r3.data
f0004.r4 = form.r4.data
f0004.r5 = form.r5.data
f0004.r6 = form.r6.data
f0004.r7 = form.r7.data
f0004.observacion = form.observacion.data
return f0004
def f0004_obj(f0004, obj):
f0004.id = obj.id
f0004.area = obj.area
f0004.subarea = obj.subarea
f0004.ubicacion = obj.ubicacion
f0004.elemento = obj.elemento
f0004.fecha = obj.fecha
f0004.plano = obj.plano
f0004.r1 = obj.r1
f0004.r2 = obj.r2
f0004.r3 = obj.r3
f0004.r4 = obj.r4
f0004.r5 = obj.r5
f0004.r6 = obj.r6
f0004.r7 = obj.r7
f0004.observacion = obj.observacion
return f0004
|
[
"[email protected]"
] | |
82c5347dab115be9855b892071b7b06b72a8d9b1
|
e9a5950b1b95da07e553c16bfcd8abb99e85f44d
|
/utils.py
|
e9c81e0f7c299a69e3922db4f7008941d17bf6da
|
[] |
no_license
|
guneetsk99/ArgumentMining_SharedTask
|
c2a1aba3d335af2d95ebcdd85412ecd6d06b110b
|
468f0439b13825ad39fdd14c38b1d86f6f1fff41
|
refs/heads/main
| 2023-06-18T19:17:57.948714 | 2021-07-18T21:05:39 | 2021-07-18T21:05:39 | 387,261,981 | 5 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 20,325 |
py
|
import multiprocessing
import pickle
import numpy as np
import sklearn
from typing import List, Tuple, Any
from transformers import *
import torch.nn as nn
from termcolor import colored
import os
from collections import defaultdict
import itertools
context_models = {
'bert-base-uncased' : {"model": BertModel, "tokenizer" : BertTokenizer },
'bert-base-cased' : {"model": BertModel, "tokenizer" : BertTokenizer },
'bert-large-cased' : {"model": BertModel, "tokenizer" : BertTokenizer },
'bert-base-chinese' : {"model": BertModel, "tokenizer" : BertTokenizer },
'openai-gpt': {"model": OpenAIGPTModel, "tokenizer": OpenAIGPTTokenizer},
'gpt2': {"model": GPT2Model, "tokenizer": GPT2Tokenizer},
'ctrl': {"model": CTRLModel, "tokenizer": CTRLTokenizer},
'transfo-xl-wt103': {"model": TransfoXLModel, "tokenizer": TransfoXLTokenizer},
'xlnet-base-cased': {"model": XLNetModel, "tokenizer": XLNetTokenizer},
'xlm-mlm-enfr-1024': {"model": XLMModel, "tokenizer": XLMTokenizer},
'distilbert-base-cased': {"model": DistilBertModel, "tokenizer": DistilBertTokenizer},
'roberta-base': {"model": RobertaModel, "tokenizer": RobertaTokenizer},
'roberta-large': {"model": RobertaModel, "tokenizer": RobertaTokenizer},
'xlm-roberta-base': {"model": XLMRobertaModel, "tokenizer": XLMRobertaTokenizer},
'nghuyong/ernie-2.0-en': {"model": AutoModel, "tokenizer": AutoTokenizer},
}
class Metric():
def __init__(self, args, predictions, goldens, bert_lengths, sen_lengths, last_review_indice, golden_bio, pred_bio):
self.args = args
self.predictions = predictions
self.goldens = goldens
self.bert_lengths = bert_lengths
self.sen_lengths = sen_lengths
self.data_num = len(self.predictions)
self.last_review_indice = last_review_indice
self.golden_bio = golden_bio
self.pred_bio = [pred_seq[::-1] for pred_seq in pred_bio]
def get_aspect_spans(self, biotags, last_review_idx): # review
spans = []
start = -1
for i in range(last_review_idx+1):
if biotags[i] == 1:
start = i
if i == last_review_idx:
spans.append([start, i])
elif biotags[i+1] != 2:
spans.append([start, start])
elif biotags[i] == 2:
if i == last_review_idx:
spans.append([start, i])
elif biotags[i+1] != 2:
spans.append([start, i])
return spans
def get_opinion_spans(self, biotags, length, last_review_idx): # rebuttal
spans = []
start = -1
for i in range(last_review_idx+1, length):
if biotags[i] == 1:
start = i
if i == length-1:
spans.append([start, i])
elif biotags[i+1] != 2:
spans.append([start, start])
elif biotags[i] == 2:
if i == length-1:
spans.append([start, i])
elif biotags[i+1] != 2:
spans.append([start, i])
return spans
def find_pair(self, tags, aspect_spans, opinion_spans):
pairs = []
for al, ar in aspect_spans:
for pl, pr in opinion_spans:
tag_num = [0] * self.args.class_num
for i in range(al, ar + 1):
for j in range(pl, pr + 1):
tag_num[int(tags[i][j])] += 1
# tag_num[int(tags[j][i])] += 1
if tag_num[self.args.class_num-1] < 1*(ar-al+1)*(pr-pl+1)*self.args.pair_threshold: continue
sentiment = -1
pairs.append([al, ar, pl, pr, sentiment])
return pairs
def score_aspect(self):
assert len(self.predictions) == len(self.goldens)
golden_set = set()
predicted_set = set()
for i in range(self.data_num):
golden_aspect_spans = self.get_aspect_spans(self.golden_bio[i], self.last_review_indice[i])
for spans in golden_aspect_spans:
golden_set.add(str(i) + '-' + '-'.join(map(str, spans)))
predicted_aspect_spans = self.get_aspect_spans(self.pred_bio[i], self.last_review_indice[i])
for spans in predicted_aspect_spans:
predicted_set.add(str(i) + '-' + '-'.join(map(str, spans)))
correct_num = len(golden_set & predicted_set)
# precision = correct_num / len(predicted_set) if len(predicted_set) > 0 else 0
# recall = correct_num / len(golden_set) if len(golden_set) > 0 else 0
# f1 = 2 * precision * recall / (precision + recall) if (precision + recall) > 0 else 0
# return precision, recall, f1
return correct_num, len(predicted_set), len(golden_set)
def score_opinion(self):
assert len(self.predictions) == len(self.goldens)
golden_set = set()
predicted_set = set()
for i in range(self.data_num):
golden_opinion_spans = self.get_opinion_spans(self.golden_bio[i], self.sen_lengths[i], self.last_review_indice[i])
for spans in golden_opinion_spans:
golden_set.add(str(i) + '-' + '-'.join(map(str, spans)))
predicted_opinion_spans = self.get_opinion_spans(self.pred_bio[i], self.sen_lengths[i], self.last_review_indice[i])
for spans in predicted_opinion_spans:
predicted_set.add(str(i) + '-' + '-'.join(map(str, spans)))
correct_num = len(golden_set & predicted_set)
# precision = correct_num / len(predicted_set) if len(predicted_set) > 0 else 0
# recall = correct_num / len(golden_set) if len(golden_set) > 0 else 0
# f1 = 2 * precision * recall / (precision + recall) if (precision + recall) > 0 else 0
# return precision, recall, f1
return correct_num, len(predicted_set), len(golden_set)
def score_bio(self, aspect, opinion):
correct_num = aspect[0] + opinion[0]
pred_num = aspect[1] + opinion[1]
gold_num = aspect[2] + opinion[2]
precision = correct_num / pred_num * 100 if pred_num > 0 else 0
recall = correct_num / gold_num * 100 if gold_num > 0 else 0
f1 = 2 * precision * recall / (precision + recall) if (precision + recall) > 0 else 0
return precision, recall, f1
def score_pair(self):
# self.all_labels: (batch_size, num_sents, num_sents)
all_labels = [k for i in range(self.data_num) for j in self.goldens[i] for k in j]
all_preds = [k for i in range(self.data_num) for j in self.predictions[i] for k in j]
tp = 0
tn = 0
fn = 0
fp = 0
for i in range(len(all_labels)):
if all_labels[i] != -1:
if all_labels[i] == 1 and all_preds[i] == 1:
tp += 1
elif all_labels[i] == 1 and all_preds[i] == 0:
fn += 1
elif all_labels[i] == 0 and all_preds[i] == 1:
fp += 1
elif all_labels[i] == 0 and all_preds[i] == 0:
tn += 1
precision = 1.0 * tp / (tp + fp) * 100 if tp + fp != 0 else 0
recall = 1.0 * tp / (tp + fn) * 100 if tp + fn != 0 else 0
f1 = 2.0 * precision * recall / (precision + recall) if precision + recall != 0 else 0
return precision, recall, f1
def score_uniontags(self):
assert len(self.predictions) == len(self.goldens)
golden_set = set()
predicted_set = set()
for i in range(self.data_num):
golden_aspect_spans = self.get_aspect_spans(self.golden_bio[i], self.last_review_indice[i])
golden_opinion_spans = self.get_opinion_spans(self.golden_bio[i], self.sen_lengths[i], self.last_review_indice[i])
# print(golden_aspect_spans)
# print(golden_opinion_spans)
golden_tuples = self.find_pair(self.goldens[i], golden_aspect_spans, golden_opinion_spans)
# print(golden_tuples)
for pair in golden_tuples:
golden_set.add(str(i) + '-' + '-'.join(map(str, pair)))
predicted_aspect_spans = self.get_aspect_spans(self.pred_bio[i], self.last_review_indice[i])
predicted_opinion_spans = self.get_opinion_spans(self.pred_bio[i], self.sen_lengths[i], self.last_review_indice[i])
predicted_tuples = self.find_pair(self.predictions[i], predicted_aspect_spans, predicted_opinion_spans)
for pair in predicted_tuples:
predicted_set.add(str(i) + '-' + '-'.join(map(str, pair)))
# print('gold: ', golden_set)
# print('pred: ', predicted_set)
correct_num = len(golden_set & predicted_set)
precision = correct_num / len(predicted_set) * 100 if len(predicted_set) > 0 else 0
recall = correct_num / len(golden_set) * 100 if len(golden_set) > 0 else 0
f1 = 2 * precision * recall / (precision + recall) if (precision + recall) > 0 else 0
return precision, recall, f1
def get_huggingface_optimizer_and_scheduler(args, model: nn.Module,
num_training_steps: int,
weight_decay: float = 0.0,
eps: float = 1e-8,
warmup_step: int = 0):
"""
Copying the optimizer code from HuggingFace.
"""
print(colored(f"Using AdamW optimizer by HuggingFace with {args.lr} learning rate, "
f"eps: {eps}, weight decay: {weight_decay}, warmup_step: {warmup_step}, ", 'yellow'))
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": weight_decay,
},
{
"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0,
},
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.lr, eps=eps)
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=warmup_step, num_training_steps=num_training_steps
)
return optimizer, scheduler
class DisjointSet:
def __init__(self, size):
self.size_ = size
self.size = [1]*size
self.connection = list(range(size))
def root(self, a):
if self.connection[a] == a:
return a
else:
return self.root(self.connection[a])
def find(self, a, b):
return self.root(a) == self.root(b)
def union(self, a, b):
if self.size[a] > self.size[b]:
self.size[self.root(a)] += self.size[self.root(b)]
self.connection[self.root(b)] = self.root(a)
else:
self.size[self.root(b)] += self.size[self.root(a)]
self.connection[self.root(a)] = self.root(b)
def unify_(self):
for i in range(self.size_):
root_ = self.root(self.connection[i])
self.connection[i] = root_
def cluster(self, paired_spans):
self.unify_()
dic = defaultdict(list)
for i in range(self.size_):
# the key is just a dummy key since list is not hashable
dic[self.connection[i]].append(paired_spans[i])
return dic
class Writer():
"""
output test dataset results to file
"""
def __init__(self, args, predictions, goldens, bert_lengths, sen_lengths, last_review_indice, golden_bio, pred_bio, sentence_pack):
self.args = args
self.predictions = predictions
self.goldens = goldens
self.bert_lengths = bert_lengths
self.sen_lengths = sen_lengths
self.data_num = len(self.predictions)
self.last_review_indice = last_review_indice
self.golden_bio = golden_bio
self.pred_bio = [pred_seq[::-1] for pred_seq in pred_bio]
self.sentence_pack=sentence_pack
self.output_dir = os.path.join('submission' + '.results')
def get_review_spans(self, biotags, last_review_idx):
spans = []
start = -1
for i in range(last_review_idx+1):
if biotags[i] == 1:
start = i
if i == last_review_idx:
spans.append([start, i])
elif biotags[i+1] != 2:
spans.append([start, start])
elif biotags[i] == 2:
if i == last_review_idx:
spans.append([start, i])
elif biotags[i+1] != 2:
spans.append([start, i])
return spans
def get_rebuttal_spans(self, biotags, length, last_review_idx):
spans = []
start = -1
for i in range(last_review_idx+1, length):
if biotags[i] == 1:
start = i
if i == length-1:
spans.append([start, i])
elif biotags[i+1] != 2:
spans.append([start, start])
elif biotags[i] == 2:
if i == length-1:
spans.append([start, i])
elif biotags[i+1] != 2:
spans.append([start, i])
return spans
def find_pair(self, tags, review_spans, reply_spans):
pairs = []
for al, ar in review_spans:
for pl, pr in reply_spans:
tag_num = [0] * self.args.class_num
for i in range(al, ar + 1):
for j in range(pl, pr + 1):
tag_num[int(tags[i][j])] += 1
# tag_num[int(tags[j][i])] += 1
if tag_num[self.args.class_num-1] < 1*(ar-al+1)*(pr-pl+1)*self.args.pair_threshold: continue
pairs.append([al, ar, pl, pr])
return pairs
def output_results(self):
fin=[]
a=0
for i in self.sentence_pack:
sent=i['sentence']
tokens=sent.strip().split(' <sentsep> ')
fin=fin+tokens
with open(self.output_dir, 'w') as f:
# f.write('\t'.join(['golden', 'pred']) + '\n')
for i in range(self.data_num):
golden_review_spans = self.get_review_spans(self.golden_bio[i], self.last_review_indice[i])
# review_golden = '|'.join(map(lambda span: '-'.join(map(str, span)), golden_review_spans))
predicted_review_spans = self.get_review_spans(self.pred_bio[i], self.last_review_indice[i])
# review_pred = '|'.join(map(lambda span: '-'.join(map(str, span)), predicted_review_spans))
golden_reply_spans = self.get_rebuttal_spans(self.golden_bio[i], self.sen_lengths[i], self.last_review_indice[i])
# reply_golden = '|'.join(map(lambda span: '-'.join(map(str, span)), golden_reply_spans))
predicted_reply_spans = self.get_rebuttal_spans(self.pred_bio[i], self.sen_lengths[i], self.last_review_indice[i])
# reply_pred = '|'.join(map(lambda span: '-'.join(map(str, span)), predicted_reply_spans))
golden_pairs = self.find_pair(self.goldens[i], golden_review_spans, golden_reply_spans)
# pair_golden = '|'.join(map(lambda pair: '-'.join(map(str, pair)), golden_pairs))
predicted_pairs = self.find_pair(self.predictions[i], predicted_review_spans, predicted_reply_spans)
# pair_pred = '|'.join(map(lambda pair: '-'.join(map(str, pair)), predicted_pairs))
golden_labels, pred_labels = ['O'] * self.sen_lengths[i], ['O'] * self.sen_lengths[i]
for start, end in golden_review_spans:
golden_labels[start] = 'B'
for idx in range(start+1, end + 1):
golden_labels[idx] = 'I'
for start, end in golden_reply_spans:
golden_labels[start] = 'B'
for idx in range(start+1, end + 1):
golden_labels[idx] = 'I'
for start, end in predicted_review_spans:
pred_labels[start] = 'B'
for idx in range(start+1, end + 1):
pred_labels[idx] = 'I'
for start, end in predicted_reply_spans:
pred_labels[start] = 'B'
for idx in range(start+1, end + 1):
pred_labels[idx] = 'I'
golden_disjoint_set = DisjointSet(len(golden_pairs))
for m, n in itertools.combinations(range(len(golden_pairs)), 2):
review_start1, review_end1, reply_start1, reply_end1 = golden_pairs[m]
review_start2, review_end2, reply_start2, reply_end2 = golden_pairs[n]
if (review_start1 == review_start2 and review_end1 == review_end2) or (reply_start1 == reply_start2 and reply_end1 == reply_end2):
golden_disjoint_set.union(m, n)
paired_golden_spans = golden_disjoint_set.cluster(golden_pairs)
for pair_idx, paired_spans in enumerate(paired_golden_spans.values(), 1):
for review_start, review_end, reply_start, reply_end in paired_spans:
for idx in range(review_start, review_end + 1):
if not golden_labels[idx][-1].isdigit():
golden_labels[idx] += '-' + str(pair_idx)
for idx in range(reply_start, reply_end + 1):
if not golden_labels[idx][-1].isdigit():
golden_labels[idx] += '-' + str(pair_idx)
pred_disjoint_set = DisjointSet(len(predicted_pairs))
for m, n in itertools.combinations(range(len(predicted_pairs)), 2):
review_start1, review_end1, reply_start1, reply_end1 = predicted_pairs[m]
review_start2, review_end2, reply_start2, reply_end2 = predicted_pairs[n]
if (review_start1 == review_start2 and review_end1 == review_end2) or (reply_start1 == reply_start2 and reply_end1 == reply_end2):
pred_disjoint_set.union(m, n)
paired_pred_spans = pred_disjoint_set.cluster(predicted_pairs)
for pair_idx, paired_spans in enumerate(paired_pred_spans.values(), 1):
for review_start, review_end, reply_start, reply_end in paired_spans:
for idx in range(review_start, review_end + 1):
if not pred_labels[idx][-1].isdigit():
pred_labels[idx] += '-' + str(pair_idx)
for idx in range(reply_start, reply_end + 1):
if not pred_labels[idx][-1].isdigit():
pred_labels[idx] += '-' + str(pair_idx)
# for pair_idx, (review_start, review_end, reply_start, reply_end) in enumerate(golden_pairs, 1):
# for idx in range(review_start, review_end + 1):
# golden_labels[idx] += '-' + str(pair_idx)
# for idx in range(reply_start, reply_end + 1):
# golden_labels[idx] += '-' + str(pair_idx)
# for pair_idx, (review_start, review_end, reply_start, reply_end) in enumerate(predicted_pairs, 1):
# for idx in range(review_start, review_end + 1):
# pred_labels[idx] += '-' + str(pair_idx)
# for idx in range(reply_start, reply_end + 1):
# pred_labels[idx] += '-' + str(pair_idx)
# golden_labels_str, pred_labels_str = ' '.join(golden_labels), ' '.join(pred_labels)
# f.write('\t'.join([golden_labels_str, pred_labels_str]) + '\n')
for j in range(self.sen_lengths[i]):
f.write('\t'.join([fin[a], pred_labels[j]]) + '\n')
a=a+1
f.write('\n')
|
[
"[email protected]"
] | |
78f8c0f4f850229e0b5bb96e3f8fc5abdf9a63ca
|
9e5efb1c1aacd95b382d710f9978e08d72635c82
|
/DIY_LSTM.py
|
e50b6643796cf77b5f05f210d86d24a777ec5de6
|
[] |
no_license
|
2018-summer-DL-training-program/Lab-2-Image-Captioning
|
d00cf34e934621ddbe5f8002b1dc26c97648eb24
|
efe81f1a2cb8428e5e4c3587a9dadd34bc38e41f
|
refs/heads/master
| 2020-03-26T08:00:31.143666 | 2018-08-15T12:01:19 | 2018-08-15T12:01:19 | 144,681,769 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,308 |
py
|
import math
import torch
import warnings
import itertools
import numbers
from torch.nn.modules.module import Module
from torch.nn.parameter import Parameter
from torch.nn.utils.rnn import PackedSequence
from torch.nn import init
import torch.nn as nn
class my_LSTM(nn.Module):
def __init__(self, input_size, hidden_size, num_layers, if_bias=True, batch_first=False):
super(my_LSTM, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.if_bias = if_bias
self.batch_first = batch_first
'''
TO-DO: define each matric multiplication here
'''
self.sigmoid = nn.Sigmoid()
self.tanh = nn.Tanh()
self.reset_parameters()
def reset_parameters(self):
stdv = 1.0 / math.sqrt(self.hidden_size)
for weight in self.parameters():
init.uniform_(weight, -stdv, stdv)
def forward(self, input_seq, hx=None):
'''
TO-DO: check if input_seq is a packed sequence. If yes, unpack it.
'''
# outputs
hidden_state_list = []
cell_state_list = []
'''
TO-DO: if hx is None, initialize it.
'''
if hx is None:
pass
else:
pass
'''
TO-DO: implement LSTM here
'''
hidden_state_list = torch.cat(hidden_state_list, 0)
cell_state_list = torch.cat(cell_state_list, 0)
return hidden_state_list, (hidden_state_list, cell_state_list)
|
[
"[email protected]"
] | |
459acc3568c8d550b4a508be06eede9ec81e9d93
|
b055c6a1610ad7780d2a86506b773cb62eebc076
|
/Django ve MongoDb ile Telefon Rehberi Uygulaması/rehber/models.py
|
e69ee2de476baabd4f8d5ef1ed53bf4f01b7c630
|
[] |
no_license
|
kadircan23/DjangoileTelefonRehberi
|
201870a39b4fa853173679ac79d6bb4225645bea
|
cedeff950c30f6952a97f98827759cf51387435b
|
refs/heads/master
| 2023-03-05T00:32:25.928412 | 2021-02-21T12:19:47 | 2021-02-21T12:19:47 | 340,893,581 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 621 |
py
|
from django.db import models
from django.urls import reverse
from django.core.validators import RegexValidator
# Create your models here.
class rehber(models.Model):
name=models.CharField(max_length=120, verbose_name='Adı')
surname=models.CharField(max_length=120, verbose_name='Soyadı')
number=models.PositiveIntegerField(verbose_name='Tel No', validators=[RegexValidator(r'^\d{1,10}$')])
address=models.TextField(max_length=120,verbose_name='Adresi')
def __str__(self):
return self.name
# def get_absolute_url(self):
# return reverse('rehber:detail', kwargs={'id':self.id})
|
[
"[email protected]"
] | |
c683dd17778e50627255cb6099eefeb30f9b9b3b
|
521e98d2dfcea3f8476dd2af0371516eccd76f72
|
/query_pets.py
|
c7b43b804b95928e7b7dde0dabbc7ef6c0f5c75a
|
[] |
no_license
|
jf4rr3ll/IS211_Assignment10
|
8e2546bbb13143ce8b196a3c9e12878f0e7f83bc
|
41a381a26cfcd44bd7d25f20646f7d6e6e23c4f3
|
refs/heads/master
| 2020-12-24T10:40:46.120458 | 2016-11-08T03:30:50 | 2016-11-08T03:30:50 | 73,137,164 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 736 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""This module queries the pets database"""
import sqlite3 as lite
import sys
person_ID = raw_input("Input Person ID: ")
con = lite.connect('pets.db')
with con:
cur = con.cursor()
cur.execute("SELECT * FROM pet "
"INNER JOIN person_pet"
"ON pet.id = person_pet.pet_id"
"INNER JOIN person"
"ON person_pet.person_id = person.id"
"WHERE person.id = ?", (person_ID))
rows = cur.fetchall()
for row in rows:
print "%s %s %s %s" % (row["id"], row["first_name"], row["last_name"], row["age"])
print "%s %s %s %s %s" % (row["id"], row["name"], row["breed"], row["age"], row["dead"])
|
[
"[email protected]"
] | |
1a11640edd46dab731b7bd9127b9f70bd3668848
|
be0a1ea576ae3d3f1e931547cbc971db5cb51239
|
/imge.py
|
a2852ebc0bc1ad33f8aecfd31fabf36e9fc2de96
|
[] |
no_license
|
jiyoonpark05/python
|
819ddce77d3006ae981b59a86b39073dcb446bd4
|
e6450a141dfd7ebf5ec0374c0280867a0e00b8f3
|
refs/heads/master
| 2022-12-24T15:25:26.349641 | 2020-10-03T17:50:01 | 2020-10-03T17:50:01 | 300,571,523 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 501 |
py
|
import dload
from bs4 import BeautifulSoup
from selenium import webdriver
import time
driver = webdriver.Chrome('./chromedriver')
driver.get("https://search.daum.net/search?w=img&nil_search=btn&DA=NTB&enc=utf8&q=%EC%95%84%EB%9D%BC%EC%8B%9C")
time.sleep(5)
req = driver.page_source
soup = BeautifulSoup(req,'html.parser')
i = 1
thumbnails = soup.select('#imgList > div > a > img')
for thumbnail in thumbnails:
img = thumbnail['src']
dload.save(img, f'img/{i}.jpg')
i += 1
driver.quit()
|
[
"[email protected]"
] | |
b0474ec6fe305a889b8e4b892c09bb1184658bf3
|
4171c500f453fcab5e76521a3b27ee95c55dc600
|
/app.py
|
4dbb5287a6902fb6e4c7e121fa1d7899a9977dfd
|
[] |
no_license
|
Namyalg/streamlit-share
|
8642b74b3ec1f25ca7d1c0179af9f04182db2038
|
25927c7f36f5eba684069f18214edae197740081
|
refs/heads/main
| 2023-04-30T13:03:26.426903 | 2021-05-11T16:25:03 | 2021-05-11T16:25:03 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 254 |
py
|
from multiapp import MultiApp
import streamlit as st
from enter_case import enter_text
from pdf import upload_file
app = MultiApp()
app.add_app("Enter the text of the case content", enter_text)
app.add_app("Upload the case file", upload_file)
app.run()
|
[
"[email protected]"
] | |
5a87147a18bb02b3dac68c82bd10d6619a7aafe8
|
9f03d5172562193695f9a6d44e0f1fb3e264e194
|
/day01/code/myscript.py
|
354b691882c718558dd15be12104d489b5d9094e
|
[] |
no_license
|
Easonchar/Pythondata-notebook
|
99ef3a9ac2efd3ed485704033764b6fa780b862d
|
672de7b0ddd40b19556e5f01d50550203b62236d
|
refs/heads/master
| 2020-04-13T18:08:24.945621 | 2018-12-28T04:46:22 | 2018-12-28T04:46:22 | 163,365,917 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 127 |
py
|
import random
def foo(n):
x = 0
for i in range(n):
x += random.randint(0,100)
return x
A = 'abc'
|
[
"1009560311"
] |
1009560311
|
8d8b37634b63379e721b8c5f3bda2467d2438749
|
de19677d79cbafda1af531565d1ca1f52392e61c
|
/backEnd/python250/migrations/versions/52b3cdee5568_.py
|
cd93fd9cf54ee64cc935691a33fb7b94e60eb678
|
[] |
no_license
|
jingnanzh/EvolveU
|
c2175fd01e21aa7b3f5d8ba5bb9d52abe3ef7048
|
c1d77f11ec9aa41ddc8ee5c114e1f632bb8f1db6
|
refs/heads/master
| 2023-01-12T13:50:44.959914 | 2019-07-25T20:18:06 | 2019-07-25T20:18:06 | 198,889,855 | 0 | 0 | null | 2023-01-04T05:24:13 | 2019-07-25T19:27:22 |
JavaScript
|
UTF-8
|
Python
| false | false | 735 |
py
|
"""empty message
Revision ID: 52b3cdee5568
Revises: 62c2747a5145
Create Date: 2019-07-20 22:51:07.162561
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '52b3cdee5568'
down_revision = '62c2747a5145'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint('invoices_products_qty_fkey', 'invoices_products', type_='foreignkey')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_foreign_key('invoices_products_qty_fkey', 'invoices_products', 'products', ['qty'], ['id'])
# ### end Alembic commands ###
|
[
"[email protected]"
] | |
3d7f8ce94144fef6f93f6d36fff9b1cb7645c9b0
|
e5321bd212801b41183e3227a2b7a58edb5caddf
|
/002_python_built-in-function/004_raw_flaot.py
|
c7788f04b2697e5e354b93cedd0414bcc7bc4dbf
|
[] |
no_license
|
anjan111/Arun_pavan
|
79081c55a16250098c49ad16ce5a6368392eb92c
|
fe292f74046a315dcff36bf71e45b26f3777faef
|
refs/heads/master
| 2020-07-22T02:56:14.286587 | 2019-11-27T03:55:46 | 2019-11-27T03:55:46 | 207,054,018 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 376 |
py
|
# raw_input===> float
a = raw_input("enter float : ")
print "data in a : ",a
print(type(a))
print "memory : ",id(a)
print "**********************"
a = float(a)
print "data in a : ",a
print(type(a))
print "memory : ",id(a)
# write remaining 7 programs for each program introduce new datatype function
'''
complex
bool
str
list
tuple
set
dict
'''
|
[
"[email protected]"
] | |
f33e40785123417f2d60f24fefa651fd1737244d
|
accb745d7c03f1ef71d48720ca68087a96570b64
|
/JUAPP/apps.py
|
56daec438f27d5f572f56dc55385afde3e220a86
|
[] |
no_license
|
krishmi/django
|
e5acc94ca8c87fe5aabcfe4faaa4cb238393d0ff
|
1ee341d6a9f978c9886218b29226560dd6c7be2d
|
refs/heads/master
| 2020-04-04T03:37:08.473935 | 2018-11-01T15:37:15 | 2018-11-01T15:37:15 | 155,719,271 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 85 |
py
|
from django.apps import AppConfig
class JuappConfig(AppConfig):
name = 'JUAPP'
|
[
"[email protected]"
] | |
1287e5a813ace00140f4612e94c9a6f43b1c6cfe
|
3594dd22f5bd51b088ab94cfe420f5d1965413fc
|
/accounts/forms.py
|
999c7a559f2ce6ee34ee764a1c18ff587c83c931
|
[] |
no_license
|
sgd122/django-site-v1
|
953d04f5722d59c60efeb21a641c44e1affb51ff
|
76f905c6bd198ce4b7cd16f9ab424d8da65b6116
|
refs/heads/master
| 2023-02-01T10:03:40.010716 | 2020-12-19T10:17:21 | 2020-12-19T10:17:21 | 322,818,018 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,171 |
py
|
from django import forms
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth import (
password_validation,
)
from . import models
class LoginForm(forms.Form):
email = forms.EmailField(widget=forms.EmailInput(attrs={"placeholder": "Email"}))
password = forms.CharField(
widget=forms.PasswordInput(attrs={"placeholder": "Password"})
)
def clean(self):
email = self.cleaned_data.get("email")
password = self.cleaned_data.get("password")
try:
user = models.User.objects.get(email=email)
if user.check_password(password):
return self.cleaned_data
else:
self.add_error("password", forms.ValidationError("비밀번호가 잘못되었습니다."))
except models.User.DoesNotExist:
self.add_error("email", forms.ValidationError("사용자가 존재하지 않습니다."))
class SignUpForm(forms.ModelForm):
class Meta:
model = models.User
fields = ("first_name", "last_name", "email", "phone_number")
widgets = {
"first_name": forms.TextInput(attrs={"placeholder": "First Name"}),
"last_name": forms.TextInput(attrs={"placeholder": "Last Name"}),
"phone_number": forms.NumberInput(attrs={"placeholder": "Phone Number"}),
"email": forms.EmailInput(attrs={"placeholder": "Email Name"}),
}
labels = {
"phone_number": "휴대폰번호",
}
def clean_email(self):
email = self.cleaned_data.get("email")
try:
models.User.objects.get(email=email)
raise forms.ValidationError("이미 존재하는 이메일입니다.", code="existing_user")
except models.User.DoesNotExist:
return email
password = forms.CharField(
widget=forms.PasswordInput(attrs={"placeholder": "Password"}),
label="비밀번호",
)
password2 = forms.CharField(
widget=forms.PasswordInput(attrs={"placeholder": "Confirm Password"}),
label="비밀번호 확인",
)
def clean_password1(self):
password = self.cleaned_data.get("password")
password2 = self.cleaned_data.get("password1")
if password and password2 and password != password2:
raise forms.ValidationError(
self.error_messages["password_mismatch"],
code="password_mismatch",
)
return password
def _post_clean(self):
super()._post_clean()
# Validate the password after self.instance is updated with form data
password = self.cleaned_data.get("password2")
if password:
try:
password_validation.validate_password(password, self.instance)
except forms.ValidationError as error:
self.add_error("password2", error)
def save(self, *args, **kwargs):
email = self.cleaned_data.get("email")
password = self.cleaned_data.get("password")
user = super().save(commit=False)
user.username = email
user.set_password(password)
user.save()
|
[
"[email protected]"
] | |
9f1f7c56dd3528ca30ea5de6164a333b6133d35e
|
ca07c7c71d595221d4524fc8801cb71ec9f6f321
|
/Features/source_features.py
|
fd63afd8d7c27e52a1b14f259d2e28a93bc96b84
|
[] |
no_license
|
srush/tf-fork
|
0abd47fcb4da213ed4cbf08d12fdfb9c421ee3a2
|
e362b8847333eec356ca1f17c4b5b6cdd7bd42ba
|
refs/heads/master
| 2021-01-21T23:04:00.449323 | 2010-11-05T19:12:24 | 2010-11-05T19:12:24 | 1,013,104 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 22 |
py
|
class SourceFeature()
|
[
"srush@srush-laptop.(none)"
] |
srush@srush-laptop.(none)
|
5748492e1ac68fbb9456a149c63bf5d73cb70cb7
|
1edb8304c6429729ffc2bab8a13f4123e19d2b32
|
/azure-export/settings.py
|
c35f8f6b2df73b7a2759d73c4be11b093fe95853
|
[] |
no_license
|
watchdogpolska/docker-images
|
d8292fc03df806f5be3a976cf87272f2d46e0b13
|
7a569e1d0cef4a4f57517daeac0456a59a25d021
|
refs/heads/master
| 2021-09-22T00:26:11.317526 | 2021-08-14T02:40:43 | 2021-08-14T02:41:33 | 157,301,522 | 0 | 4 | null | 2021-07-15T23:54:21 | 2018-11-13T01:26:54 |
Python
|
UTF-8
|
Python
| false | false | 1,023 |
py
|
import os
import dataset
from azure.mgmt.storage import StorageManagementClient
from azure.mgmt.resource import ResourceManagementClient
from msrestazure.azure_active_directory import ServicePrincipalCredentials
# $ az ad sp create-for-rbac --name "MY-PRINCIPAL-NAME2" --password "XXX" --verbose
# $ az role assignment create --assignee {app_id} --role Reader
tenant_id = os.environ.get('AZURE_TENANT_ID', '7dbd59e5-e4d9-499b-b5cb-005289cc158a')
app_id = os.environ.get('AZURE_APP_ID', 'bfeb6f69-5a18-4d0c-a669-2e7eb3798fdd')
password = os.environ['AZURE_APP_PASSWORD']
subscription_id = os.environ.get('AZURE_SUBSCRIPTION_ID', 'efeb9457-bf38-460f-a1e5-bb5ecc817987')
credentials = ServicePrincipalCredentials(
client_id=app_id,
secret=password,
tenant=tenant_id
)
storage_client = StorageManagementClient(
credentials,
subscription_id
)
resource_client = ResourceManagementClient(
credentials,
subscription_id
)
db = dataset.connect(os.environ.get('DATABASE_URL', 'sqlite:///:memory:'))
|
[
"[email protected]"
] | |
5931de956c0b0fe468509dbf0163d394084dd9b0
|
f6d23c9e3bd4e9870fc304a3c1012cc63d128e4b
|
/SentimentOfTrend/SentimentOfTrend/venv/bin/pip3.7
|
7d6bbeb6f47a973a2fc3dbc1c91e025374e90859
|
[] |
no_license
|
AimersUom/TrendAnalysis
|
e4e5dbb9f0bb2d0fa8c09b974ead864e33ac2111
|
c3f40d36295715aa2387ccab13caeff6b39c23ed
|
refs/heads/master
| 2020-04-27T11:24:17.050070 | 2019-03-10T09:14:29 | 2019-03-10T09:14:29 | 174,294,259 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 423 |
7
|
#!/Users/kaumadisamarathunga/PycharmProjects/GetTWEETS/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3.7'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3.7')()
)
|
[
"[email protected]"
] | |
d8e032b3398ca8b4d5089d70996f8278fc086e9d
|
123cf58c5dc4800d5d50fd2934cc63be1080e093
|
/models/string_cluster_model/encoder_network.py
|
33989eff917210d26d9f229c5dc93a45db8912b7
|
[] |
no_license
|
nitishgupta/char-encode-decode
|
dd303a9aa77a3af9000e275bcb86abb18d0b7d84
|
eb4bbb8be701c3cbb4476a779094c45458a1daef
|
refs/heads/master
| 2021-04-30T23:25:49.895472 | 2017-09-03T06:37:55 | 2017-09-03T06:37:55 | 66,794,519 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,167 |
py
|
import time
import tensorflow as tf
import numpy as np
from models.base import Model
class EncoderModel(Model):
"""Unsupervised Clustering using Discrete-State VAE"""
def __init__(self, num_layers, batch_size, h_dim, input_batch, input_lengths,
char_embeddings, scope_name, dropout_keep_prob=1.0):
self.num_layers = num_layers # Num of layers in the encoder and decoder network
# Size of hidden layers in the encoder and decoder networks. This will also
# be the dimensionality in which each string is represented when encoding
self.h_dim = h_dim
self.batch_size = batch_size
with tf.variable_scope(scope_name) as scope:
encoder_cell = tf.nn.rnn_cell.BasicLSTMCell(h_dim, state_is_tuple=True)
encoder_dropout_cell = tf.nn.rnn_cell.DropoutWrapper(
cell=encoder_cell,
input_keep_prob=dropout_keep_prob,
output_keep_prob=1.0)
self.encoder_network = tf.nn.rnn_cell.MultiRNNCell(
[encoder_dropout_cell] * self.num_layers, state_is_tuple=True)
#[batch_size, decoder_max_length, embed_dim]
self.embedded_encoder_sequences = tf.nn.embedding_lookup(char_embeddings,
input_batch)
self.encoder_outputs, self.encoder_states = tf.nn.dynamic_rnn(
cell=self.encoder_network, inputs=self.embedded_encoder_sequences,
sequence_length=input_lengths, dtype=tf.float32)
# To get the last output of the encoder_network
reverse_output = tf.reverse_sequence(input=self.encoder_outputs,
seq_lengths=tf.to_int64(input_lengths),
seq_dim=1,
batch_dim=0)
en_last_output = tf.slice(input_=reverse_output,
begin=[0,0,0],
size=[self.batch_size, 1, -1])
# [batch_size, h_dim]
self.encoder_last_output = tf.reshape(en_last_output,
shape=[self.batch_size, -1],
name="encoder_last_output")
|
[
"[email protected]"
] | |
009ad8e83368f5d9aba96f1b9c1b389734d8abee
|
1f151bf72d930066ca8153c11a6e22c0ca4c27d4
|
/GCD.py
|
579adcbd868e063f62381655169d75505dcbed44
|
[] |
no_license
|
prasadyacham/BackendAutomation
|
0f03ac7e7d363c7225f2460c10bb6985059dc452
|
333384f4084a4da8388a71662da062f3fec10202
|
refs/heads/master
| 2023-08-01T11:05:18.157634 | 2021-09-10T20:08:07 | 2021-09-10T20:08:07 | 405,200,638 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 319 |
py
|
import sys
def _greatest_common_divisor_euclid(p,q):
if q==0:
return p
else:
remainder = p%q
return _greatest_common_divisor_euclid(q,remainder)
if __name__ == "__main__":
input = sys.stdin.read()
a, b = map (int, input.split())
print (_greatest_common_divisor_euclid(a,b))
|
[
"[email protected]"
] | |
b3c0b84433b5431d0077dba3b06e3bd573900130
|
c8cb561638de5c86580719521cfd2ceee817e6e3
|
/project/serializer.py
|
1a7b17a57bf16c8241811c1959a3596feed3a7d4
|
[
"MIT"
] |
permissive
|
KitsuneNoctus/myCity
|
d60634eda94f20f21be50bab04c6993637611f9b
|
977aa39fe605e4cd5707557292906571ab608884
|
refs/heads/master
| 2022-11-17T23:18:05.716239 | 2020-07-16T18:52:17 | 2020-07-16T18:52:17 | 270,813,523 | 0 | 0 |
MIT
| 2020-06-08T20:15:41 | 2020-06-08T20:15:40 | null |
UTF-8
|
Python
| false | false | 2,167 |
py
|
from django.contrib.auth.models import User, Group
# from users.models import CustomUser as User
from rest_framework import routers, generics, serializers, viewsets, permissions
from Pages.models import Page
# from Utils.models import Photo
from rest_framework import serializers
class PageSerializer(serializers.ModelSerializer):
class Meta:
model = Page
fields = "__all__"
# class PhotoSerializer(serializers.ModelSerializer):
# class Meta:
# model = Photo
# fields = "__all__"
# Serializers define the API representation.
class UserSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = User
fields = ['url', 'username', 'email', 'groups', 'is_staff']
class GroupSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Group
fields = ['url', 'name']
class PageListViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows Pages to be listed.
"""
queryset = Page.objects.all()
serializer_class = PageSerializer
# permission_classes = [permissions.IsAuthenticated]
# class PageDetailViewSet(viewsets.ModelViewSet):
# """
# API endpoint that allows Page to be viewed or edited.
# """
# queryset = Page.objects.all()
# serializer_class = PageSerializer
# # permission_classes = [permissions.IsAuthenticated]
class UserViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows users to be viewed or edited.
"""
queryset = User.objects.all().order_by('-date_joined')
serializer_class = UserSerializer
# permission_classes = [permissions.IsAuthenticated]
class GroupViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows groups to be viewed or edited.
"""
queryset = Group.objects.all()
serializer_class = GroupSerializer
# permission_classes = [permissions.IsAuthenticated]
router = routers.DefaultRouter()
router.register(r'users', UserViewSet)
router.register(r'groups', GroupViewSet)
router.register(r'list', PageListViewSet)
# router.register(r'<int:pk>', PageDetailViewSet)
# router.register(r'<str:slug>', PageDetailViewSet)
|
[
"[email protected]"
] | |
b5056e893180c51f75de1fd397027e135c255cff
|
fe4b84171ecc8ba1e169d4f4f985df4ee448a093
|
/UNet_neum/UNet.py
|
8334678ecf787844c3dd727d2263614725bd13fe
|
[
"BSD-3-Clause"
] |
permissive
|
shenw33/generate_CFD
|
3507e896e2ac64717e01a2bd3d85f8d2c50ce5f9
|
578c3901bcfd961b0c4ceaf6688244923c002ca3
|
refs/heads/master
| 2023-04-11T18:46:47.811778 | 2022-01-18T18:43:45 | 2022-01-18T18:43:45 | 212,940,839 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,914 |
py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
import matplotlib.pyplot as plt
def conv_loss(domain_size=32):
"convolutional loss function based on steady heat equation"
"Neumann condition version"
h = L / (domain_size-3)
kernel = torch.tensor([[[[0, 1/4, 0], [1/4, -1, 1/4], [0, 1/4, 0]]]]).type(dtype)
bd_kernel = torch.tensor([[[[0, 1/4, 0], [-1/2*h, -1, 1/2], [0, 1/4, 0]]]]).type(dtype)
full_size = domain_size
img_size = full_size
reductions = []
lambd = 128.0
while img_size > 32:
img_size /= 4
indices = np.round(np.linspace(1, full_size-2, img_size)).astype(np.int32)
indices = np.insert(indices, 0, 0)
indices = np.append(indices, full_size - 1)
reductions.append(np.ix_(indices, indices))
def loss(input_img, isNeum):
img = input_img[:,:,1:-1,1:-1]
total_loss = F.conv2d(img, kernel).abs().mean() # main loss from original img
if isNeum[0]:
bd = input_img[:,:,1:-1,0:3] # the left boundary region that affect the boundary values on left
total_loss += lambd * F.conv2d(bd, bd_kernel).abs().mean() # total loss = internel loss+bonudary loss
for rows, cols in reductions:
reduced_img = input_img[:,:,rows,cols] # include NMBC
bd = reduced_img[:,:,1:-1,0:3]
# total_loss += F.conv2d(bd, bd_kernel).abs().mean()
for rows, cols in reductions:
reduced_img = input_img[:,:,rows,cols]
total_loss += F.conv2d(reduced_img[:,:,1:-1,1:-1], kernel).abs().mean()
return total_loss
return loss
class UNet(nn.Module):
def __init__(
self,
dtype,
img_size = 32, # pre-defined domain_size settings
filters = 64, # number of filters
):
super().__init__()
self.image_size = img_size
self.layers = int(np.log2(img_size)) # number of layers
self.filters = filters
self.dtype = dtype
self.encoding_layers = nn.ModuleList()
self.encoding_BN = nn.ModuleList()
for i in range(self.layers):
if i == 0:
self.encoding_layers.append(nn.Conv2d(1, filters, kernel_size=4, stride=2, padding=1))
else:
self.encoding_layers.append(nn.Conv2d(min(2**(i-1),8)*filters, min(2**i, 8)*filters, kernel_size=4, stride=2, padding=1))
self.encoding_BN.append(nn.BatchNorm2d(min(2**i*filters, 8*filters)))
self.encoded = None
self.decoding_layers = nn.ModuleList()
self.decoding_BN = nn.ModuleList()
for i in range(self.layers)[::-1]:
if i == self.layers-1:
self.decoding_layers.append(nn.ConvTranspose2d(min(2**i*filters, 8*filters), min(2**(i-1)*filters, 8*filters), kernel_size=4, stride=2, padding=1))
elif i == 0:
self.decoding_layers.append(nn.ConvTranspose2d(filters*2, 1, kernel_size=4, stride=2, padding=1))
else:
self.decoding_layers.append(nn.ConvTranspose2d(min(2**i*filters,8*filters)*2, min(2**(i-1)*filters, 8*filters), kernel_size=4, stride=2, padding=1))
self.decoding_BN.append(nn.BatchNorm2d(min(max(2**(i-1),1)*filters, 8*filters)))
self.bd = torch.zeros(1,1,img_size,img_size)
self.bd[:,:,:,0] = 1
self.bd[:,:,0,:] = 1
self.bd[:,:,:,-1] = 1
self.bd[:,:,-1,:] = 1
self.bd = self.bd.type(dtype)
def forward(self, x, isNeum=[False, False, False, False]):
ini_state = x
x_copy = []
for i in range(self.layers):
if i == 0:
x = F.leaky_relu(self.encoding_layers[i](x), 0.2)
elif i == self.layers - 1:
x = self.encoding_layers[i](x)
else:
x = F.leaky_relu(self.encoding_BN[i](self.encoding_layers[i](x)), 0.2)
x_copy.append(x)
self.encoded = x_copy.pop(-1)
for i in range(self.layers):
if i == 0:
x = self.decoding_BN[i](self.decoding_layers[i](F.relu(x)))
elif i == self.layers - 1:
x = torch.tanh(self.decoding_layers[i](F.relu(torch.cat((x,x_copy[0]), dim=1))))
else:
x = self.decoding_BN[i](self.decoding_layers[i](F.relu(torch.cat((x,x_copy[-1*i]), dim=1))))
nmbc = 0 * x
nmbc[:,:,:,0:2] = 1
nmbc[:,:,0:2,:] = 1
nmbc[:,:,:,-2:] = 1
nmbc[:,:,-2:,:] = 1
if isNeum[0]:
nmbc[:,:,2:-2,1] = 0
T_results = (x + 1) * 1/2 # x belongs to [-1,1], then (x + 1)* 1/2 *100scaled to 0 ~ 100
fixed_values = nmbc
return T_results * (1 - fixed_values) + ini_state * fixed_values
# return T_results * (1 - self.bd) + ini_state * self.bd
|
[
"[email protected]"
] | |
f8225fef502d1b9392e1df50f78fc03ce0842a56
|
85690414e489c1f3473c261c25a53cb888b58a52
|
/prototypes/password.py
|
23b6446fa29506eae6e1f7c4df0caeec8ed335bb
|
[
"MIT"
] |
permissive
|
ivanleoncz/PythonEggs
|
eb95c16b8632fd7782f707defca2295c871c06ff
|
540843dcf6fba4b3fe0d6d57dd19654f33cccb74
|
refs/heads/master
| 2022-01-13T06:15:04.315564 | 2019-06-06T05:40:11 | 2019-06-06T05:40:11 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,365 |
py
|
#!/usr/bin/python3
""" Encrypting and validating passwords."""
from getpass import getpass
__author__ = "@ivanleoncz"
import bcrypt
def encrypt(p):
"""
Concept:
Encrypts plain text password into a hash.
Parameter:
p: Plain text password.
Return:
Plain text password encrypted.
"""
salt = bcrypt.gensalt()
hashed = bcrypt.hashpw(p.encode('utf-8'), salt)
return hashed
def validate(p, hashed):
"""
Concept:
Validates password, generating a new hash, using the
provided hash as salt value and comparing the provided
hash VS the new hash.
Parameters:
p: Plain text password.
hashed: Generated hash, based on plain text password.
Return:
True: if hashed and new_hash are equal.
False: if hashed and new_hash are not equal.
"""
new_hash = bcrypt.hashpw(p.encode('utf-8'), hashed)
if hashed == new_hash:
return True
else:
return False
"""
def generate():
import string
import random
chars = string.ascii_uppercase + string.ascii_lowercase + string.punctuation + string.digits
print(chars)
print("".join(random.choice(chars) for step in range(10)))
print("".join(random.choice(chars) for step in range(10)))
"""
|
[
"[email protected]"
] | |
2b3c16897a3b35cc9e66306da93eacb32c23e5ef
|
0d5de943909877c01b485d8a918d8bef0cf9e196
|
/plugins/CompleteLikeEclipse/scribes/edit/complete_like_eclipse/__init__.py
|
1bf51577114ffc6ae996e45832814ec68d606743
|
[
"MIT"
] |
permissive
|
baverman/scribes-goodies
|
31e2017d81f04cc01e9738e96ceb19f872a3d280
|
f6ebfe62e5103d5337929648109b4e610950bced
|
refs/heads/master
| 2021-01-21T10:13:08.397980 | 2013-09-25T16:33:05 | 2013-09-25T16:33:05 | 854,207 | 2 | 1 | null | 2013-09-25T16:33:05 | 2010-08-22T03:12:39 |
Python
|
UTF-8
|
Python
| false | false | 4,949 |
py
|
from gettext import gettext as _
from string import whitespace
from scribes.helpers import TriggerManager, Trigger, connect_external, connect_all
from signals import Signals
from IndexerProcessManager import Manager as IndexerProcessManager
from DictionaryManager import Manager as DictionaryManager
from ProcessCommunicator import Communicator as ProcessCommunicator
from TextExtractor import Extractor as TextExtractor
from BufferMonitor import Monitor as BufferMonitor
trigger = Trigger('complete-word', '<alt>slash',
'Eclipse like word completition', 'Text Operations')
class Plugin(object):
def __init__(self, editor):
self.editor = editor
self.signals = Signals()
self.triggers = TriggerManager(editor)
connect_all(self, self.signals, self.triggers, textbuffer=self.editor.textbuffer)
self.block_word_reset = False
self.words = None
self.start_word = None
self.start_offset = None
self.indexer = IndexerProcessManager(self.signals.sender, editor)
self.dictionary_manager = DictionaryManager(self.signals.sender, editor)
self.communicator = ProcessCommunicator(self.signals.sender, editor)
self.extractor = TextExtractor(self.signals.sender, editor)
self.buffer_monitor = BufferMonitor(self.signals.sender, editor)
def unload(self):
self.signals.destroy.emit()
return False
def is_valid_character(self, c):
if c in whitespace:
return False
return c.isalpha() or c.isdigit() or (c in ("-", "_"))
def backward_to_word_begin(self, iterator):
if iterator.starts_line(): return iterator
iterator.backward_char()
while self.is_valid_character(iterator.get_char()):
iterator.backward_char()
if iterator.starts_line(): return iterator
iterator.forward_char()
return iterator
def forward_to_word_end(self, iterator):
if iterator.ends_line(): return iterator
if not self.is_valid_character(iterator.get_char()): return iterator
while self.is_valid_character(iterator.get_char()):
iterator.forward_char()
if iterator.ends_line(): return iterator
return iterator
def get_word_before_cursor(self):
iterator = self.editor.cursor.copy()
# If the cursor is in front of a valid character we ignore
# word completion.
if self.is_valid_character(iterator.get_char()):
return None, None
if iterator.starts_line():
return None, None
iterator.backward_char()
if not self.is_valid_character(iterator.get_char()):
return None, None
start = self.backward_to_word_begin(iterator.copy())
end = self.forward_to_word_end(iterator.copy())
word = self.editor.textbuffer.get_text(start, end).strip()
return word, start
def get_matches(self, string):
if not self.words:
return None
result = []
for word, count in self.words.iteritems():
if word != string and word.startswith(string):
result.append((word.encode('utf8'), count))
result.sort(key=lambda r: r[1], reverse=True)
return [r[0] for r in result]
@trigger
def cycle(self, *args):
word_to_complete, start = self.get_word_before_cursor()
if not word_to_complete:
return False
if not self.start_word or self.start_offset != start.get_offset():
self.start_word = word_to_complete
self.start_offset = start.get_offset()
matches = self.get_matches(self.start_word)
if matches:
idx = 0
try:
idx = matches.index(word_to_complete)
idx = (idx + 1) % len(matches)
except ValueError:
pass
if matches[idx] == word_to_complete:
self.editor.update_message(_("Word completed already"), "yes", 1)
return False
self.buffer_changed_handler.block()
end = self.editor.cursor.copy()
self.editor.textbuffer.delete(start, end)
self.editor.textbuffer.insert(start, matches[idx])
self.editor.response()
self.buffer_changed_handler.unblock()
else:
self.editor.update_message(_("No word to complete"), "no", 1)
return False
@Signals.dictionary
def word_list_updated(self, sender, words):
self.words = words
return False
@connect_external('textbuffer', 'changed')
def buffer_changed(self, *args):
self.start_word = None
self.start_iter = None
return False
|
[
"[email protected]"
] | |
0478e749c690f7d3c89ba75c1b9d3880465808f4
|
6e309bc7a2facb9d06dcadd6843471b14d0981b3
|
/correlate_image.py
|
8fd5157762a3bd2c2a5cd894bd2feacd518125a4
|
[] |
no_license
|
HaohuiHU/IVUS
|
d6841acc20ccf3e3e002448e31c078b1d113d316
|
ccbd22707936f2ec63446c7c73e42d0a3e2ef22b
|
refs/heads/master
| 2023-03-21T20:00:13.238324 | 2020-02-13T22:42:25 | 2020-02-13T22:42:25 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,696 |
py
|
# this function computes the correlation between two polar domain images
# The images should have been shifted so that the lumen is removed
# image1 and image2 are arrays of format(no. images, depth, angle)
import numpy as np
from matplotlib import pyplot as plt
def compute(image1, image2, step):
# ensure that the input are 3D arrays
if len(image1.shape) == 2:
image1 = np.reshape(image1, [1, image1.shape[0], image1.shape[1]])
image2 = np.reshape(image2, [1, image2.shape[0], image2.shape[1]])
num_images1 = image1.shape[0]
num_images2 = image2.shape[0]
corr = np.zeros([num_images1, 360/step, num_images2])
angles = np.linspace(step, 360, 360 / step, dtype=np.int)
# permute so that num_images is third dimension
image1 = np.transpose(image1, [1, 2, 0])
image2 = np.transpose(image2, [1, 2, 0])
# determine the depth limit to operate over
max_depth = min(image1.shape[0], image2.shape[0])
# remove pixels outside the max depth
image1 = image1[0:max_depth, :, :]
image2 = image2[0:max_depth, :, :]
for i in range(num_images1):
for j in range(angles.shape[0]):
# rotate image clockwise
image2_shifted = np.roll(image2, -angles[j], axis=1)
corr[i, j, :] = correlation(image1[:,:,i], image2_shifted)
# plt.figure()
# plt.subplot(1, 2, 1)
# plt.imshow(image1[:,:,i])
# plt.subplot(1, 2, 2)
# plt.imshow(image2_shifted[:,:,0])
print("Processing image {}".format(i))
return corr
def correlation(image1, image2):
# image1 is the reference image (mxn), image2 is the rotated image (mxnxp)
#ref_image_std = np.std(image1, axis=(1,2))
#ref_image_mean = np.mean(image1, axis=(1,2))
ref_image_std = np.std(image1)
ref_image_mean = np.mean(image1)
rot_image_std = np.std(image2, axis=(0,1))
rot_image_mean = np.mean(image2, axis=(0,1))
# compute the denominator (elementwise multiplication)
denom = ref_image_std*rot_image_std
# flatten images so we can perform matrix multiplication
image1 = np.reshape(image1, (-1, 1), order = 'F') # use fortran (matlab) indexing
image2 = np.reshape(image2, (image2.shape[0]*image2.shape[1], -1), order = 'F')
# compute de-meaned values
g_minus_g_mean = image2 - rot_image_mean
f_minus_f_mean = image1 - ref_image_mean
# compute the product of two images
product = np.dot(f_minus_f_mean.T, g_minus_g_mean)/denom
#product = (f_minus_f_mean.reshape(image1.shape[0], image1.shape[1], 1)*g_minus_g_mean)/denom
corr = product/(image1.shape[0]*image1.shape[1])
corr = (corr+1)/2
return corr
|
[
"[email protected]"
] | |
865863231c1bcff849f7fb8c7a024438ae3a314c
|
0ee42b307fc252acabc1acba23300b8b51184ecb
|
/plot_filtered_aerosol.py
|
9b1a70df7e332cd6dc9c51437b98a0b5522c071d
|
[] |
no_license
|
KimDube/Masters-O3-Aerosol-Temp
|
5689a1b9fad1d5a12672e7e31ce44e630065d106
|
38e16adea952ffdd1ba504e36cf557471e944fbe
|
refs/heads/master
| 2020-03-20T15:08:57.122807 | 2018-08-21T20:38:49 | 2018-08-21T20:38:49 | 137,504,613 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,413 |
py
|
import matplotlib.pyplot as plt
import seaborn as sns
import datetime
import numpy as np
import pandas as pd
from Code_TimeSeriesAnalysis import SolarData
alts = np.arange(19500, 35500, 1000) / 1000
mg_02to15 = SolarData.loadmg2(2002, 1, 1, 2015, 12, 31)
mganomaly = (mg_02to15 - np.nanmean(mg_02to15)) / np.nanmean(mg_02to15)
ymg = pd.Series(mganomaly)
fmg = ymg.rolling(center=True, window=6).mean()
f2mg = fmg.rolling(center=True, window=35).mean()
xmg_02to15 = fmg - f2mg
temperature = np.load('/home/kimberlee/Masters/npyvars/temperature_02to15_trop_filtered.npy')
aerosol = np.load('/home/kimberlee/Masters/npyvars/aerosol_02to15_trop_filtered.npy')
start = datetime.date(2002, 1, 1)
end = datetime.date(2015, 12, 31)
delta = end - start
days = []
for j in range(delta.days + 1):
days.append(start + datetime.timedelta(days=j))
startofyear = 4018+366 # 730
endofyear = 4018+365+366+150+500 # 1096
# change time axis to have month only
from matplotlib.dates import MonthLocator, DateFormatter
months = MonthLocator(range(1, 13), bymonthday=1)
monthsFmt = DateFormatter("%b")
sns.set(context="talk", style="white", rc={'font.family': [u'serif']})
colours = ['blue', 'grass green', 'red']
sns.set_palette(sns.xkcd_palette(colours))
f, (ax1, ax2, ax3, ax4, ax5) = plt.subplots(5, sharex=True, sharey=True, figsize=(12, 12))
ax1.plot(days[startofyear:endofyear], 100 * aerosol[15, startofyear:endofyear], label='Aerosol')
ax1.plot(days[startofyear:endofyear], 100 * temperature[15, startofyear:endofyear], label='Temperature')
ax1.plot(days[startofyear:endofyear], 500 * xmg_02to15[startofyear:endofyear], label='Mg II')
ax1.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
ncol=3, mode="expand", borderaxespad=0.)
ax1.text(days[1050], 4, '%.1f km' % alts[15])
ax2.plot(days[startofyear:endofyear], 100 * aerosol[12, startofyear:endofyear], label='Aerosol')
ax2.plot(days[startofyear:endofyear], 100 * temperature[12, startofyear:endofyear], label='Temperature')
ax2.plot(days[startofyear:endofyear], 500 * xmg_02to15[startofyear:endofyear], label='Mg II')
ax2.text(days[1050], 4, '%.1f km' % alts[12])
ax3.plot(days[startofyear:endofyear], 100 * aerosol[9, startofyear:endofyear], label='Aerosol')
ax3.plot(days[startofyear:endofyear], 100 * temperature[9, startofyear:endofyear], label='Temperature')
ax3.plot(days[startofyear:endofyear], 500 * xmg_02to15[startofyear:endofyear], label='Mg II')
ax3.set_ylabel("Anomaly [%]")
ax3.text(days[1050], 4, '%.1f km' % alts[9])
ax4.plot(days[startofyear:endofyear], 100 * aerosol[6, startofyear:endofyear], label='Aerosol')
ax4.plot(days[startofyear:endofyear], 100 * temperature[6, startofyear:endofyear], label='Temperature')
ax4.plot(days[startofyear:endofyear], 500 * xmg_02to15[startofyear:endofyear], label='Mg II')
ax4.text(days[1050], 4, '%.1f km' % alts[6])
ax5.plot(days[startofyear:endofyear], 100 * aerosol[2, startofyear:endofyear], label='Aerosol')
ax5.plot(days[startofyear:endofyear], 100 * temperature[2, startofyear:endofyear], label='Temperature')
ax5.plot(days[startofyear:endofyear], 500 * xmg_02to15[startofyear:endofyear], label='Mg II')
ax5.text(days[1050], 4, '%.1f km' % alts[2])
ax5.xaxis.set_major_locator(months)
ax5.xaxis.set_major_formatter(monthsFmt)
plt.ylim([-50, 50])
plt.tight_layout()
# plt.savefig("/home/kimberlee/Masters/Thesis/Figures/35dayfilterexample.png", format='png', dpi=150)
plt.show()
|
[
"[email protected]"
] | |
410f343e06b5a2e46e0ac58189f5fc2337669859
|
15a992391375efd487b6442daf4e9dd963167379
|
/monai/networks/nets/__init__.py
|
cd9329f61baf93158a6a3aa20992150937c07ed3
|
[
"Apache-2.0"
] |
permissive
|
Bala93/MONAI
|
b0e68e1b513adcd20eab5158d4a0e5c56347a2cd
|
e0a7eff5066da307a73df9145077f6f1fec7a514
|
refs/heads/master
| 2022-08-22T18:01:25.892982 | 2022-08-12T18:13:53 | 2022-08-12T18:13:53 | 259,398,958 | 2 | 0 | null | 2020-04-27T17:09:12 | 2020-04-27T17:09:11 | null |
UTF-8
|
Python
| false | false | 2,805 |
py
|
# Copyright (c) MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .ahnet import AHnet, Ahnet, AHNet
from .attentionunet import AttentionUnet
from .autoencoder import AutoEncoder
from .basic_unet import BasicUNet, BasicUnet, Basicunet, basicunet
from .classifier import Classifier, Critic, Discriminator
from .densenet import (
DenseNet,
Densenet,
DenseNet121,
Densenet121,
DenseNet169,
Densenet169,
DenseNet201,
Densenet201,
DenseNet264,
Densenet264,
densenet121,
densenet169,
densenet201,
densenet264,
)
from .dints import DiNTS, TopologyConstruction, TopologyInstance, TopologySearch
from .dynunet import DynUNet, DynUnet, Dynunet
from .efficientnet import (
BlockArgs,
EfficientNet,
EfficientNetBN,
EfficientNetBNFeatures,
drop_connect,
get_efficientnet_image_size,
)
from .flexible_unet import FlexibleUNet
from .fullyconnectednet import FullyConnectedNet, VarFullyConnectedNet
from .generator import Generator
from .highresnet import HighResBlock, HighResNet
from .hovernet import Hovernet, HoVernet, HoVerNet, HoverNet
from .milmodel import MILModel
from .netadapter import NetAdapter
from .regressor import Regressor
from .regunet import GlobalNet, LocalNet, RegUNet
from .resnet import ResNet, resnet10, resnet18, resnet34, resnet50, resnet101, resnet152, resnet200
from .segresnet import SegResNet, SegResNetVAE
from .senet import (
SENet,
SEnet,
Senet,
SENet154,
SEnet154,
Senet154,
SEResNet50,
SEresnet50,
Seresnet50,
SEResNet101,
SEresnet101,
Seresnet101,
SEResNet152,
SEresnet152,
Seresnet152,
SEResNext50,
SEResNeXt50,
SEresnext50,
Seresnext50,
SEResNext101,
SEResNeXt101,
SEresnext101,
Seresnext101,
senet154,
seresnet50,
seresnet101,
seresnet152,
seresnext50,
seresnext101,
)
from .swin_unetr import PatchMerging, PatchMergingV2, SwinUNETR
from .torchvision_fc import TorchVisionFCModel
from .transchex import BertAttention, BertMixedLayer, BertOutput, BertPreTrainedModel, MultiModal, Pooler, Transchex
from .unet import UNet, Unet
from .unetr import UNETR
from .varautoencoder import VarAutoEncoder
from .vit import ViT
from .vitautoenc import ViTAutoEnc
from .vnet import VNet
|
[
"[email protected]"
] | |
abc9ebe106b0c81f49934c11bac422b6b1e820d1
|
7f41b1033707cacc37d4dfed781ea367e866a428
|
/myapi/urls.py
|
296da9ec5e571a60b56b726ceaca6234b7fc80de
|
[] |
no_license
|
kushagrasgupta/ApiFetchPostDjango
|
57f496bbe2aca5bd13b79d2099a24bf3682c0b33
|
bdc4f89029d625f2d0301e2a2043bb3191e1c21d
|
refs/heads/main
| 2023-07-13T00:08:09.663732 | 2021-08-16T13:16:09 | 2021-08-16T13:16:09 | 396,801,629 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 376 |
py
|
from django.contrib import admin
from django.urls import path
from myapi import views
from rest_framework.urlpatterns import format_suffix_patterns
from django.views.static import serve
urlpatterns = [
path('admin/', admin.site.urls),
path('userinfo/', views.ImageViewSet.as_view()),
url('media/(?P<path>.*)$', serve, {'document_root': settings.MEDIA_ROOT,})
]
|
[
"[email protected]"
] | |
90c3ead82a6a27450a7f5146d6da0898e67ada34
|
e6cee4336f354e76695bee438b902df5e9053427
|
/proTwo/proTwo/appTwo/urls.py
|
a0c5d71df9dfe25559cf4b1382a893af027b3e86
|
[] |
no_license
|
stark019/Django_tutorial_work
|
56d58efcea36d737c5b24342369b20b6a8f0d485
|
97511d80326f58863e77d591a187447379a4ca3d
|
refs/heads/master
| 2020-05-26T08:16:57.715655 | 2019-06-12T14:35:15 | 2019-06-12T14:35:15 | 188,163,927 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 116 |
py
|
from django.conf.urls import url
from appTwo import views
urlpatterns = [
url(r'^$',views.users,name='users'),
]
|
[
"[email protected]"
] | |
189234dba477920e20978a90104fe63bbe85f33a
|
ccce57307a499b49b14c8b16706166b08df1c5c1
|
/database.py
|
e454dfecf3c38702f9373d274b585f469e9ff64e
|
[
"MIT"
] |
permissive
|
simrit1/CubeTimer
|
6ea1ca4549865317c947a3a91d3a57f1786f198c
|
b226ae875cde35fb573c618d70a408421e0e9f07
|
refs/heads/master
| 2023-07-01T20:38:20.983300 | 2021-07-18T02:04:33 | 2021-07-18T02:04:33 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,306 |
py
|
import sqlite3
from CubeUtilities import Time, MultiPhaseTime
class Database:
def __init__(self, table_name, db_dir):
self.database_path = db_dir
self.table_name = table_name
self.closed = False
try:
self.conn = sqlite3.connect(self.database_path)
except sqlite3.Error:
raise Exception(f"'{self.database_path}' doesn't exist.")
self.cursor = self.conn.cursor()
self.create_table()
def create_table(self):
"""
Attempts to create the table
:returns: bool
"""
with self.conn:
if self.table_name == "times":
try:
self.cursor.execute("""CREATE TABLE times ( time float, scramble text, date text, DNF integer, multiphase text )""")
except sqlite3.OperationalError:
return False
elif self.table_name == "settings":
try:
self.cursor.execute("""CREATE TABLE settings ( inspection integer, display_time integer, scramble_len integer, multiphase integer, puzzle_type text )""")
except sqlite3.OperationalError:
return False
else:
raise ValueError(f"Invalid table name, couldn't create table with name '{self.table_name}'")
return True
def insert_record(self, record):
"""
Adds a new record to the database
:param record: Time, MultiPhaseTime, dict
:returns: bool
"""
if self.table_name == "settings":
with self.conn:
self.cursor.execute("INSERT INTO settings VALUES (:inspection, :display_time, :scramble_len, :multiphase, :puzzle_type)", record)
elif self.table_name == "times" and isinstance(record, MultiPhaseTime):
with self.conn:
times = record.get_times()
for index in range(len(times)):
times[index] = str(times[index])
times = ", ".join(times)
with self.conn:
self.cursor.execute("INSERT INTO times VALUES (?, ?, ?, ?, ?)", (record.time, record.scramble, record.date, int(record.DNF), times))
elif self.table_name == "times" and isinstance(record, Time):
print ("saving")
with self.conn:
self.cursor.execute("INSERT INTO times VALUES (?, ?, ?, ?, ?)",
(record.time, record.scramble, record.date, int(record.DNF), ""))
def delete_record(self, oid=None):
"""
Deletes the record with the oid provided, if oid is None, and the table name is settings
then all records in the database are deleted.
:param oid: int, None
:param bool
"""
if self.table_name == "settings":
self.delete_all_records()
return True
elif self.table_name == "times" and oid is not None:
with self.conn:
self.cursor.execute("DELETE FROM times WHERE oid = :oid",
{"oid": oid})
self.cursor.execute("VACUUM")
return True
return False
def update_record(self, record_attr, new_value, identifier):
"""
Updates a record in the database with the attribute record_attr, to new_value.
Identifier can be an oid, or a dictionary with a seperate record attribute along with it's known value
:param record_attr: str
:param new_value: str, int
:param identifier: int, dict
:returns: bool
"""
if self.table_name == "times":
with self.conn:
try:
self.cursor.execute(f"UPDATE times SET {record_attr}=:new_value WHERE oid=:oid", {"oid": identifier, "new_value": str(new_value)})
except sqlite3.Error as e:
return False
return True
elif self.table_name == "settings":
with self.conn:
try:
known_attr, known_val = list(identifier.keys())[0], identifier.get(list(identifier.keys())[0])
try:
known_val = int(known_val)
except ValueError:
pass
self.cursor.execute(f"UPDATE settings SET {record_attr}=:new_value WHERE {known_attr}=:known_val",
{"new_value": str(new_value), "known_val": known_val})
except sqlite3.Error:
return False
except (AttributeError, TypeError):
raise Exception("identifier argument must be a dictionary with a key of a seperate record attribute, and it's value is the record attributes known value. Ex: identifier={'puzzle_type': '3x3'}")
return True
return False
def get_record(self, oid=None):
"""
Gets the record with the specified oid, if no oid is specified,
then all records are returned
:param oid: int, None
:return: list[record_tuple]
"""
if self.table_name == "settings":
return self.get_all_records()
self.cursor.execute("SELECT * FROM times WHERE oid=:oid", {"oid": oid})
return self.cursor.fetchall()
def get_all_records(self):
"""
Gets every record in the database
:returns: list[record_tuple]
"""
with self.conn:
try:
self.cursor.execute(f"SELECT * FROM {self.table_name}")
except sqlite3.Error:
return []
return self.cursor.fetchall()
def delete_all_records(self):
"""
Deletes every record in the database
:returns: bool
"""
with self.conn:
try:
self.cursor.execute(f"DELETE FROM {self.table_name}")
self.create_table()
except sqlite3.Error:
return False
else:
self.cursor.execute("VACUUM")
return True
def close_connection(self):
"""
Closes the conection to the database:
:returns: None
"""
self.conn.close()
self.closed = True
|
[
"[email protected]"
] | |
50757308714e1748bc154ae1b6b8a9944dfd0fca
|
acb8e84e3b9c987fcab341f799f41d5a5ec4d587
|
/langs/7/sxj.py
|
95c3a20eebf162044696cda87af0444abb3afeb2
|
[] |
no_license
|
G4te-Keep3r/HowdyHackers
|
46bfad63eafe5ac515da363e1c75fa6f4b9bca32
|
fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2
|
refs/heads/master
| 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 486 |
py
|
import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'sxJ':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1])
|
[
"[email protected]"
] | |
5115b3a9bc8cd124a75b834992a50d198be0fbdd
|
d99187b8b27cc3c56fef7883487dfcc82ea235c2
|
/setdefault-fun.py
|
a44932b8d823b666db5bf2bfc90a45701dec1401
|
[] |
no_license
|
SanketGawande27/Chapter1--Automate-the-Boring-Stuff-With-Python
|
898fdd1e6537b84b1f42dbc2f7de38a24c27e902
|
74727c7cfed855e0f32b1cfe51085fedaeb264b4
|
refs/heads/master
| 2022-12-02T09:14:51.831936 | 2020-08-13T09:14:38 | 2020-08-13T09:14:38 | 274,306,362 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 221 |
py
|
message = 'Hello Friendss ... I am sanket. This program is for setdefault () function...'
count = {}
for character in message:
count.setdefault(character, 0)
count[character] = count[character] + 1
print(count)
|
[
"[email protected]"
] | |
5a4102380ceda801c33ba27df61c91998ba24ab0
|
c4943748c504f26e197ce391c747bb5a4c146be2
|
/trade_data_get/future_daily_point_data.py
|
242777b92ad8080f11fc2e523a24c024c3dba7a1
|
[] |
no_license
|
NewLanded/security_data_store
|
88919c233d6bd22b20d0d9918c8e2ffcafc33c3e
|
d23c68777e6ecb0641cb5c6f7061b1c11d208886
|
refs/heads/master
| 2021-07-21T12:55:47.650454 | 2021-06-30T07:32:00 | 2021-06-30T07:32:00 | 133,665,767 | 1 | 0 | null | 2018-05-16T13:03:35 | 2018-05-16T12:56:05 | null |
UTF-8
|
Python
| false | false | 1,715 |
py
|
import datetime
import time
import tushare as ts
from conf import PRO_KEY
from util_base.date_util import convert_datetime_to_str, convert_str_to_datetime, get_date_range
from util_base.db_util import engine
from util_base.db_util import store_failed_message
from util_data.date import Date
ts.set_token(PRO_KEY)
pro = ts.pro_api()
def get_all_future_daily_point_data(data_date_str):
time.sleep(2)
all_future_daily_point_data = pro.fut_daily(trade_date=data_date_str)
time.sleep(2)
return all_future_daily_point_data
def store_future_daily_point_data(future_daily_point_data):
future_daily_point_data["update_date"] = datetime.datetime.now()
future_daily_point_data["trade_date"] = future_daily_point_data["trade_date"].apply(convert_str_to_datetime)
future_daily_point_data.to_sql("future_daily_point_data", engine, index=False, if_exists="append")
def start(date_now=None):
date_now = datetime.datetime.now() if date_now is None else date_now
date_now = datetime.datetime(date_now.year, date_now.month, date_now.day)
if Date().is_workday(date_now):
try:
all_future_daily_point_data = get_all_future_daily_point_data(convert_datetime_to_str(date_now))
store_future_daily_point_data(all_future_daily_point_data)
except Exception as e:
store_failed_message("", "future_daily_point_data", str(e), date_now)
if __name__ == "__main__":
pass
for date_now in get_date_range(datetime.datetime(2015, 1, 1), datetime.datetime(2021, 6, 18)):
print(date_now)
start(date_now)
# start(datetime.datetime(2020, 5, 19))
# all_future_daily_point_data = pro.daily(trade_date="20181008")
pass
|
[
"[email protected]"
] | |
6b2ed7d805f5d1bc63de6352fa85ee532c59ff14
|
2cbd9b29cc91236df0034325cdb2416d5eca997b
|
/tests/selenium/delete_devices/login_test_emf.py
|
94f8492b8bbf164444527c6a986ffd9e32962929
|
[
"MIT"
] |
permissive
|
sivaanil/laravel
|
6cce9d3272be1313abc99c7fae40954b4c0e9488
|
14900b071a514379f0161c5fd7bea05300dee083
|
refs/heads/master
| 2021-01-11T17:36:18.115702 | 2017-02-20T13:57:51 | 2017-02-20T13:57:51 | 82,559,033 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,657 |
py
|
# -*- coding: utf-8 -*-
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import Select
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import NoAlertPresentException
import sys
sys.path.append('..')
import c2_test_case
import selenium_config
import unittest, time, re, os, subprocess
class Login(c2_test_case.C2TestCase):
def test_login_success(self):
driver = self.config.driver
driver.get(self.config.base_url + "")
# To ensure enough time for page load
time.sleep(2)
driver.find_element_by_id("username").clear()
driver.find_element_by_id("username").send_keys("G8Keeper")
driver.find_element_by_id("password").click()
driver.find_element_by_id("password").clear()
driver.find_element_by_id("password").send_keys("C2sYt_gA8")
driver.find_element_by_id("login-button").click()
time.sleep(5)
"""
timeout = 4
time.sleep(timeout)
expected_url = '{}{}'.format(self.config.base_url, 'home#/nodes/321')
self.assertEqual(driver.current_url,
expected_url,
"{} FAILURE! URL Redirect to '{}' did not work within {} seconds.".format(__file__,
expected_url,
timeout))
"""
if __name__ == "__main__":
unittest.main()
|
[
"[email protected]"
] | |
b19697dd2141337e43e8637db3d1b3194c5727c4
|
2db2eecff0095a55b057bdf3df012ad92f6c11a7
|
/keras_explain/deep_viz_keras/visual_backprop.py
|
79f0abcf1c308c75a6d90703299f3442bb93407a
|
[
"MIT"
] |
permissive
|
PrimozGodec/keras-explain
|
b83ed45f85d53b4a7b26f0d4697ffd98c7a214d6
|
692785e881dc12a1500a9e651b07c7ff6fef6f9d
|
refs/heads/master
| 2020-03-26T23:52:55.062767 | 2020-02-19T18:34:51 | 2020-02-19T18:34:51 | 145,574,595 | 18 | 7 |
MIT
| 2020-02-19T18:34:53 | 2018-08-21T14:21:33 |
Python
|
UTF-8
|
Python
| false | false | 2,437 |
py
|
import keras.backend as K
import numpy as np
from keras.initializers import Ones, Zeros
from keras.layers import Input, Conv2DTranspose
from keras.models import Model
from keras_explain.deep_viz_keras.saliency import SaliencyMask
class VisualBackprop(SaliencyMask):
"""A SaliencyMask class that computes saliency masks with VisualBackprop (https://arxiv.org/abs/1611.05418).
"""
def __init__(self, model, output_index=0):
"""Constructs a VisualProp SaliencyMask."""
inps = [model.input, K.learning_phase()] # input placeholder
outs = [layer.output for layer in model.layers] # all layer outputs
self.forward_pass = K.function(inps, outs) # evaluation function
self.model = model
def get_mask(self, input_image):
"""Returns a VisualBackprop mask."""
x_value = np.expand_dims(input_image, axis=0)
visual_bpr = None
layer_outs = self.forward_pass([x_value, 0])
for i in range(len(self.model.layers)-1, -1, -1):
if 'Conv2D' in str(type(self.model.layers[i])):
layer = np.mean(layer_outs[i], axis=3, keepdims=True)
layer = layer - np.min(layer)
layer = layer/(np.max(layer)-np.min(layer)+1e-6)
if visual_bpr is not None:
if visual_bpr.shape != layer.shape:
visual_bpr = self._deconv(visual_bpr)
visual_bpr = visual_bpr * layer
else:
visual_bpr = layer
return visual_bpr[0]
def _deconv(self, feature_map):
"""The deconvolution operation to upsample the average feature map downstream"""
x = Input(shape=(None, None, 1))
y = Conv2DTranspose(filters=1,
kernel_size=(3,3),
strides=(2,2),
padding='same',
kernel_initializer=Ones(),
bias_initializer=Zeros())(x)
deconv_model = Model(inputs=[x], outputs=[y])
inps = [deconv_model.input, K.learning_phase()] # input placeholder
outs = [deconv_model.layers[-1].output] # output placeholder
deconv_func = K.function(inps, outs) # evaluation function
return deconv_func([feature_map, 0])[0]
|
[
"[email protected]"
] | |
cc004fdb9d5cd9911bf6ba3f0a30d66c4afac7bd
|
4a59ac4c2d94785ee9458627da964bac70c1deba
|
/sentences/migrations/0001_initial.py
|
3921bbfeb72a6a59eaa673ff1f10a60f62cb6887
|
[] |
no_license
|
showerbugs/acidrain-api
|
f7d9ced324383719569a64ef01172699ed9db0da
|
91f5e38390e67db78186478d13dd7b0535023835
|
refs/heads/master
| 2022-12-18T22:38:15.081982 | 2017-08-13T08:57:00 | 2017-08-13T08:57:00 | 99,404,448 | 0 | 0 | null | 2022-12-08T00:42:09 | 2017-08-05T06:38:13 |
Python
|
UTF-8
|
Python
| false | false | 795 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-08-05 07:51
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Sentence',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('type', models.CharField(max_length=10)),
('body', models.TextField()),
('difficulty', models.IntegerField()),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
]
|
[
"[email protected]"
] | |
fcf155feccc0ab8f84ca6c573909d2fba3b7aea2
|
959a88c0b91c86ff3e78f9be60b1d9aafb2e2d3c
|
/tweetme/urls.py
|
6d45da2cf23f46a5f0f737f3fb486daafcf26a24
|
[] |
no_license
|
19sanchit19/Twitter-Mini
|
90c2d7f21a24cfa62092077b5319ec698bf96e71
|
dda70c7cc19a3b4c639620b98edbd64ef5fc73db
|
refs/heads/master
| 2022-11-23T21:10:25.533486 | 2020-07-24T22:53:26 | 2020-07-24T22:53:26 | 282,317,197 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,976 |
py
|
"""tweetme URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.conf.urls import url,include
from django.urls import path
from django.conf import settings
from django.conf.urls.static import static
from .views import home,SearchView
from tweets.api.views import SearchTweetAPIView
from hashtags.views import HashTagView
from hashtags.api.views import TagTweetAPIView
from tweets.views import TweetListView
from accounts.views import UserRegisterView
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^$', TweetListView.as_view(), name='home'),
url(r'^search/$', SearchView.as_view(), name='search'),
url(r'^',include('accounts.urls', namespace='profiles' )),
url(r'^tweets/',include('tweets.urls', namespace='tweet' )),
url(r'^api/tags/(?P<hashtag>.*)/$', TagTweetAPIView.as_view(), name='tag-tweet-api'),
url(r'^api/search/$', SearchTweetAPIView.as_view(), name='search-api'),
url(r'^tags/(?P<hashtag>.*)/$',HashTagView.as_view(), name='hashtag'),
url(r'^api/tweets/',include('tweets.api.urls', namespace='tweet-api' )),
url(r'^', include('django.contrib.auth.urls')),
url(r'^register/$', UserRegisterView.as_view(), name='register'),
url(r'^api/',include('accounts.api.urls', namespace='profiles-api' )),
]
if settings.DEBUG:
urlpatterns+=(static(settings.STATIC_URL, document_root=settings.STATIC_ROOT))
|
[
"[email protected]"
] | |
ae256cc65559aa8a3d48b4544878f3e404989090
|
c61d358bcbfffb141c3394c38e72d62dc03d2ada
|
/23ListelerinveDemetlerinMetotlari.py
|
d76a89eef85768ff9e2f35ca2a357829033349d3
|
[] |
no_license
|
sahinbesinci/Python
|
75e2e81a897ca4cbe478981d49fc7bb77e247a20
|
a8e0ca1214bfb49f2a7c016391197d57ed2b6f5f
|
refs/heads/master
| 2020-07-02T14:23:01.123843 | 2019-09-05T21:37:34 | 2019-09-05T21:37:34 | 201,555,415 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,990 |
py
|
#
#
# Listelerin ve Demetlerin Metotları
#
#
#
# Listelerin Metotları
#
>>> dir(list)
>>> dir([])
['__add__', '__class__', '__contains__', '__delattr__', '__delitem__',
'__dir__', '__doc__', '__eq__', '__format__', '__ge__', '__getattribute__',
'__getitem__', '__gt__', '__hash__', '__iadd__', '__imul__', '__init__',
'__iter__', '__le__', '__len__', '__lt__', '__mul__', '__ne__', '__new__',
'__reduce__', '__reduce_ex__', '__repr__', '__reversed__', '__rmul__',
'__setattr__', '__setitem__', '__sizeof__', '__str__', '__subclasshook__',
'append', 'clear', 'copy', 'count', 'extend', 'index', 'insert', 'pop',
'remove', 'reverse', 'sort']
>>> [i for i in dir(list) if not "_" in i]
['append', 'clear', 'copy', 'count', 'extend', 'index',
'insert', 'pop', 'remove', 'reverse', 'sort']
#
# append()
#
>>> liste = ["elma", "armut", "çilek"]
>>> liste.append("erik")
>>> liste = ["elma", "armut", "çilek"]
>>> liste = liste + ["erik"]
>>> print(liste)
['elma', 'armut', 'çilek', 'erik']
>>> liste = [1, 2, 3]
>>> liste.append([4, 5, 6])
>>> print(liste)
[1, 2, 3, [4, 5, 6]]
#
# extend()
#
>>> li1 = [1, 3, 4]
>>> li2 = [10, 11, 12]
>>> li1. append(li2)
>>> print(li1)
[1, 3, 4, [10, 11, 12]]
li1 = [1, 3, 4]
li2 = [10, 11, 12]
li1. extend(li2)
print(li1)
#
# insert()
#
>>> liste = ["elma", "armut", "çilek"]
>>> liste.insert(0, "erik")
>>> print(liste)
#
# remove()
#
>>> liste = ["elma", "armut", "çilek"]
>>> liste.remove("elma")
>>> liste
['armut', 'çilek']
#
# reverse()
#
>>> liste = ["elma", "armut", "çilek"]
>>> liste.reverse()
>>> liste
['çilek', 'armut', 'elma']
>>> meyveler = ["elma", "armut", "çilek", "kiraz"]
>>> meyveler[::-1]
['kiraz', 'çilek', 'armut', 'elma']
>>> print(*reversed(meyveler))
kiraz çilek armut elma
>>> print(list(reversed(meyveler)))
['kiraz', 'çilek', 'armut', 'elma']
>>> for i in reversed(meyveler):
... print(i)
...
kiraz
çilek
armut
elma
#
# pop()
#
>>> liste = ["elma", "armut", "çilek"]
>>> liste.pop()
'çilek'
>>> liste
['elma', 'armut']
>>> liste.pop(0)
'elma'
>>> liste
['armut']
#
# sort()
#
>>> üyeler = ['Ahmet', 'Mehmet', 'Ceylan', 'Seyhan', 'Mahmut', 'Zeynep',
... 'Abdullah', 'Kadir', 'Kemal', 'Kamil', 'Selin', 'Senem',
... 'Sinem', 'Tayfun', 'Tuna', 'Tolga']
>>> üyeler.sort()
>>> print(üyeler)
['Abdullah', 'Ahmet', 'Ceylan', 'Kadir', 'Kamil', 'Kemal', 'Mahmut', 'Mehmet', 'Selin', 'Senem', 'Seyhan', 'Sinem', 'Tayfun', 'Tolga', 'Tuna', 'Zeynep']
>>> sayılar = [1, 0, -1, 4, 10, 3, 6]
>>> sayılar.sort()
>>> print(sayılar)
[-1, 0, 1, 3, 4, 6, 10]
>>> üyeler = ['Ahmet', 'Mehmet', 'Ceylan', 'Seyhan', 'Mahmut', 'Zeynep',
... 'Abdullah', 'Kadir', 'Kemal', 'Kamil', 'Selin', 'Senem',
... 'Sinem', 'Tayfun', 'Tuna', 'Tolga']
>>> üyeler.sort(reverse=True)
>>> print(üyeler)
['Zeynep', 'Tuna', 'Tolga', 'Tayfun', 'Sinem', 'Seyhan', 'Senem', 'Selin', 'Mehmet', 'Mahmut', 'Kemal', 'Kamil', 'Kadir', 'Ceylan', 'Ahmet', 'Abdullah']
>>> isimler = ["Ahmet", "Işık", "İsmail", "Çiğdem", "Can", "Şule"]
>>> isimler.sort()
>>> isimler
['Ahmet', 'Can', 'Işık', 'Çiğdem', 'İsmail', 'Şule']
#
# index()
#
>>> liste = ["elma", "armut", "çilek"]
>>> liste.index("elma")
0
#
# count()
#
>>> liste = ["elma", "armut", "elma", "çilek"]
>>> liste.count("elma")
2
#
# copy()
#
>>> liste1 = ["ahmet", "mehmet", "özlem"]
>>> liste2 = liste1[:]
>>> liste2 = list(liste1)
>>> liste2 = liste1.copy()
#
# clear()
#
>>> liste = [1, 2, 3, 5, 10, 20, 30, 45]
>>> liste.clear()
>>> liste
[]
#
# Demetlerin Metotları
#
>>> dir(tuple)
#
# index()
#
>>> demet = ("elma", "armut", "çilek")
>>> demet.index("elma")
0
#
# count()
#
>>> demet = ("elma", "armut", "elma", "çilek")
>>> demet.count("elma")
2
|
[
"sbesinci@ubuntu.empra4vdrldehkkuhwt3fipuec.bx.internal.cloudapp.net"
] |
sbesinci@ubuntu.empra4vdrldehkkuhwt3fipuec.bx.internal.cloudapp.net
|
29b781906de5f2fb4d497b7821cbd335e7a08c7b
|
a9665726d599cdaabae562dde51e4dfa6f36f9f9
|
/manage.py
|
3b915dbc96f0a8e68ca4986a9cc826ca4a331c97
|
[
"BSD-3-Clause"
] |
permissive
|
gh0stsh0t/TwitterSlave
|
fda50864e8e5c3e78d323f27a14355e3896bf337
|
b6f272cab45b7e2aff9781e6ddacfccbb32c53a2
|
refs/heads/master
| 2022-05-04T06:32:43.999800 | 2020-07-20T06:29:55 | 2020-07-20T06:29:55 | 66,742,577 | 0 | 1 |
BSD-3-Clause
| 2022-04-22T20:50:44 | 2016-08-28T00:59:03 |
Python
|
UTF-8
|
Python
| false | false | 251 |
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "TwitterSlave.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
[
"[email protected]"
] | |
bc232bb3094a7b2072b027fedf93271713aef6b9
|
884fecb1be9b97483301d76ad5c45491290b817b
|
/iaproject/portfolio/migrations/0012_auto_20200204_0900.py
|
d2b5c0dd6c171a1f80dbf36b866144e7e0eb1fb7
|
[] |
no_license
|
Code-Institute-Submissions/interior-architect
|
de9989769927a99d608f394b90d792560f87af8c
|
0fcb1dd6f4bb2558f1f16b490f1b2f29f592d76a
|
refs/heads/master
| 2021-02-07T17:15:35.757591 | 2020-02-29T23:04:27 | 2020-02-29T23:04:27 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 452 |
py
|
# Generated by Django 2.2.9 on 2020-02-04 09:00
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('portfolio', '0011_auto_20200129_1649'),
]
operations = [
migrations.RemoveField(
model_name='section',
name='image_landscape',
),
migrations.RemoveField(
model_name='section',
name='image_portrait',
),
]
|
[
"[email protected]"
] | |
c74942de61e4a32ff2a0a0be62da3f16bf3c27a3
|
487ce91881032c1de16e35ed8bc187d6034205f7
|
/codes/BuildLinks1.10/test_input/CJ_16_2/16_2_1_anthrocoder_digits.py
|
595c608a01ecda0f5fcd93bfb768e0ff0aab1314
|
[] |
no_license
|
DaHuO/Supergraph
|
9cd26d8c5a081803015d93cf5f2674009e92ef7e
|
c88059dc66297af577ad2b8afa4e0ac0ad622915
|
refs/heads/master
| 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 |
Python
|
UTF-8
|
Python
| false | false | 944 |
py
|
import sys
try:
f = open(sys.argv[1])
out = open(sys.argv[1].rpartition("\\")[2]+".out", 'w')
numTests = int(f.readline())
for i in range (0, numTests):
note = f.readline()
# print (note)
phoneNo = ""
zeros = note.count("Z")
# print ("found zeros: " + str(zeros))
twos = note.count("W")
# print ("found twos: " + str(twos))
fours = note.count("U")
sixes = note.count("X")
eights = note.count("G")
ones = note.count("O") - twos - fours - zeros
threes = note.count("H") - eights
fives = note.count("F") - fours
sevens = note.count("V") - fives
nines = note.count("I") - fives - sixes - eights
phoneNo = ("0" * zeros) + ("1" * ones) + ("2" * twos) + ("3"*threes)+("4"*fours)+("5"*fives)+("6"*sixes)+("7"*sevens)+("8"*eights)+("9"*nines)
out.write("Case #" + str(i+1) +": " + phoneNo + "\n")
except IOError as e:
print ('Error:', err)
|
[
"[[email protected]]"
] | |
739262093cf0e020865d48af35d7a705fda23d30
|
8e73a76ddc32b2587592a21aa9feeb1b1b6a8517
|
/tkinter/text.py
|
c08f8c2755041562468b52938a77d0cf37448fc6
|
[] |
no_license
|
bjavier9/python-curso
|
d754c6b771d3c1f77c515bbbb290317aff68700d
|
f5ea25c36e2fc2ed0e9c78e9d5aee0a562bf8772
|
refs/heads/master
| 2020-04-14T22:36:34.678645 | 2019-01-21T19:22:52 | 2019-01-21T19:22:52 | 164,169,802 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 188 |
py
|
from tkinter import *
root = Tk()
texto = Text(root)
texto.pack()
texto.config(width=30, height=10, font=("Consolas",12), padx=15, pady=15, selectbackground="red")
root.mainloop()
|
[
"[email protected]"
] | |
658188357a420a967626a633ab73119b6a6a95f5
|
f89b26d9c53b1d5cc6b14d7f20c57772c98fb53d
|
/plus minus.py
|
3620c88e652db9cf30d344d0e8462e9fc3708813
|
[] |
no_license
|
Mityun/Analitic_of_my_book
|
9be73824b0d218f87619e938ef0b0ceeb57e1310
|
dd9842925205b3ec55179ae00df798031dcf8c26
|
refs/heads/main
| 2023-08-14T10:41:33.105877 | 2021-10-10T07:32:23 | 2021-10-10T07:32:23 | 326,292,671 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 323 |
py
|
q = float(input())
w = float(input())
e = input()
if e == "-" and w != 0:
print(q - w)
elif e == "+" and w != 0:
print(q + w)
elif e == "*" and w != 0:
print(q * w)
elif e == "/" and w != 0:
print(q / w)
elif e != "+" and e != "-" and e != "*" and e != "/":
print(888888)
elif w == 0:
print(888888)
|
[
"[email protected]"
] | |
d8f72a29e4debb13f575d834a3fd85f7a8f5a2f5
|
6ce98e2d496adc66e8b96e695b6ff75e87b17461
|
/hello/urls.py
|
4a248afa04cdb0f27ca0e17bdc19dacafa4b9e03
|
[] |
no_license
|
devrajkakoty/Django_first
|
58e4c7c385092cae15f4fe836b4b72f68ed9f5a3
|
cdd10fdca6237310459e392ed46d8b95e0dbd4bf
|
refs/heads/master
| 2022-11-15T21:54:37.449226 | 2020-07-19T08:12:58 | 2020-07-19T08:12:58 | 280,639,396 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 928 |
py
|
"""hello URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
admin.site.site_header = "Devraj Admin"
admin.site.site_title = "Devraj Admin Portal"
admin.site.index_title = "Welcome to Devraj Django"
urlpatterns = [
path('admin/', admin.site.urls),
path('',include('home.urls'))
]
|
[
"[email protected]"
] | |
950ffaae1721bda634c1e0426402fff9e8f5e767
|
95e2d468721f90d3c5969259c287d467bd7acd02
|
/plugins/mirrorTablePlugin.py
|
4251065d4a7edadc7135da64e1d71771e76ff31d
|
[
"BSD-3-Clause"
] |
permissive
|
jonntd/studioLibrary
|
64b49abc4bc8e5c026825031d9ea6f3f7f035368
|
98fc9f3a26690bf884537a93956d66701b837af3
|
refs/heads/master
| 2020-12-25T05:29:37.170103 | 2014-11-25T22:16:44 | 2014-11-25T22:16:44 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,685 |
py
|
#Embedded file name: C:/Users/hovel/Dropbox/packages/studioLibrary/1.5.8/build27/studioLibrary\plugins\mirrorTablePlugin.py
"""
# Released subject to the BSD License
# Please visit http://www.voidspace.org.uk/python/license.shtml
#
# Copyright (c) 2014, Kurt Rathjen
# All rights reserved.
# Comments, suggestions and bug reports are welcome.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Kurt Rathjen nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY KURT RATHJEN ''AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL KURT RATHJEN BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
"""
try:
from PySide import QtGui
from PySide import QtCore
except ImportError:
from PyQt4 import QtGui
from PyQt4 import QtCore
try:
import maya.cmds
except ImportError:
import traceback
traceback.print_exc()
import mutils
import studioLibrary
import studioLibrary.plugins.mayaBasePlugin as mayaBasePlugin
class SelectionSetPluginError(Exception):
"""Base class for exceptions in this module."""
pass
class Plugin(mayaBasePlugin.Plugin):
def __init__(self, parent):
"""
@type parent:
"""
studioLibrary.Plugin.__init__(self, parent)
self.setName('Mirror Table')
self.setIcon(self.dirname() + '/images/mirrortable.png')
self.setExtension('mirror')
self.setRecord(Record)
self.setInfoWidget(MirrorTableInfoWidget)
self.setCreateWidget(MirrorTableCreateWidget)
self.setPreviewWidget(MirrorTablePreviewWidget)
def mirrorAnimation(self):
"""
@rtype: bool
"""
return self.settings().get('mirrorAnimation', True)
def mirrorOption(self):
"""
@rtype: mutils.MirrorOption
"""
return self.settings().get('mirrorOption', mutils.MirrorOption.Swap)
class Record(mayaBasePlugin.Record):
def __init__(self, *args, **kwargs):
mayaBasePlugin.Record.__init__(self, *args, **kwargs)
self._transferObject = None
def transferPath(self):
return self.dirname() + '/mirrortable.json'
def transferObject(self):
if self._transferObject is None:
self._transferObject = mutils.MirrorTable.createFromPath(self.transferPath())
return self._transferObject
def keyPressEvent(self, event):
if event.key() == QtCore.Qt.Key_M:
pass
@mutils.showWaitCursor
def load(self, option = None, animation = None, time = None):
"""
"""
if option is None:
option = self.plugin().mirrorOption()
if animation is None:
animation = self.plugin().mirrorAnimation()
objects = maya.cmds.ls(selection=True)
try:
self.transferObject().load(objects, namespaces=self.namespaces(), option=option, animation=animation, time=time)
except Exception as msg:
self.window().setError(str(msg))
raise
def doubleClicked(self):
"""
"""
self.load()
class MirrorTableInfoWidget(mayaBasePlugin.InfoWidget):
def __init__(self, parent = None, record = None):
"""
:param parent:
:param record:
"""
mayaBasePlugin.InfoWidget.__init__(self, parent, record)
class MirrorTablePreviewWidget(mayaBasePlugin.PreviewWidget):
def __init__(self, *args, **kwargs):
"""
:param args:
:param kwargs:
"""
mayaBasePlugin.PreviewWidget.__init__(self, *args, **kwargs)
self.connect(self.ui.mirrorAnimationCheckBox, QtCore.SIGNAL('stateChanged(int)'), self.stateChanged)
self.connect(self.ui.mirrorOptionComboBox, QtCore.SIGNAL('currentIndexChanged(const QString&)'), self.stateChanged)
mt = self.record().transferObject()
self.ui.left.setText(mt.left())
self.ui.right.setText(mt.right())
def mirrorOption(self):
return self.ui.mirrorOptionComboBox.findText(self.ui.mirrorOptionComboBox.currentText(), QtCore.Qt.MatchExactly)
def mirrorAnimation(self):
return self.ui.mirrorAnimationCheckBox.isChecked()
def saveSettings(self):
"""
"""
super(MirrorTablePreviewWidget, self).saveSettings()
s = self.settings()
s.set('mirrorOption', int(self.mirrorOption()))
s.set('mirrorAnimation', bool(self.mirrorAnimation()))
s.save()
def loadSettings(self):
"""
"""
super(MirrorTablePreviewWidget, self).loadSettings()
s = self.settings()
self.ui.mirrorOptionComboBox.setCurrentIndex(s.get('mirrorOption', mutils.MirrorOption.Swap))
self.ui.mirrorAnimationCheckBox.setChecked(s.get('mirrorAnimation', True))
def accept(self):
"""
"""
self.record().load()
class MirrorTableCreateWidget(mayaBasePlugin.CreateWidget):
def __init__(self, *args, **kwargs):
"""
:param args:
:param kwargs:
"""
mayaBasePlugin.CreateWidget.__init__(self, *args, **kwargs)
self.ui.selectionSetButton.hide()
def selectionChanged(self):
objects = maya.cmds.ls(selection=True) or []
if not self.ui.left.text():
self.ui.left.setText(mutils.MirrorTable.findLeftSide(objects))
if not self.ui.right.text():
self.ui.right.setText(mutils.MirrorTable.findRightSide(objects))
mt = mutils.MirrorTable.createFromObjects([], left=str(self.ui.left.text()), right=str(self.ui.right.text()))
self.ui.leftCount.setText(str(mt.leftCount(objects)))
self.ui.rightCount.setText(str(mt.rightCount(objects)))
mayaBasePlugin.CreateWidget.selectionChanged(self)
@mutils.showWaitCursor
def accept(self):
"""
:raise:
"""
mayaBasePlugin.CreateWidget.accept(self)
msg = 'An error has occurred while saving the mirror table! Please check the script editor for more details.'
try:
path = studioLibrary.getTempDir() + '/mirrortable.json'
left = str(self.ui.left.text())
right = str(self.ui.right.text())
mt = mutils.MirrorTable.createFromObjects(maya.cmds.ls(selection=True), left=left, right=right)
mt.save(path)
self.record().save(content=[path], icon=self._thumbnail)
except Exception:
self.record().window().setError(msg)
raise
if __name__ == '__main__':
import studioLibrary
studioLibrary.main()
|
[
"[email protected]"
] | |
df05476a55d74eac175c02cf47d0431568781b2d
|
a84e1ed67ef2592cf22f7d19cdddaf16700d6a8e
|
/graveyard/web/VNET/branches/vnf/vnf/components/NeutronExperiment.py
|
8fd30a324c74a91f7ace04f9b8a10a6528a0f084
|
[] |
no_license
|
danse-inelastic/inelastic-svn
|
dda998d7b9f1249149821d1bd3c23c71859971cc
|
807f16aa9510d45a45360d8f59f34f75bb74414f
|
refs/heads/master
| 2016-08-11T13:40:16.607694 | 2016-02-25T17:58:35 | 2016-02-25T17:58:35 | 52,544,337 | 1 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 24,209 |
py
|
#!/usr/bin/env python
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Jiao Lin
# California Institute of Technology
# (C) 2007 All Rights Reserved
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
from Actor import actionRequireAuthentication, action_link, AuthenticationError
from FormActor import FormActor as base
class NeutronExperiment(base):
class Inventory(base.Inventory):
import pyre.inventory
id = pyre.inventory.str("id", default=None)
id.meta['tip'] = "the unique identifier of the experiment"
ncount = pyre.inventory.float( 'ncount', default = 1e6 )
ncount.meta['tip'] = 'number of neutrons'
pass # end of Inventory
def default(self, director):
try:
page = director.retrieveSecurePage( 'neutronexperiment' )
except AuthenticationError, err:
return err.page
main = page._body._content._main
# populate the main column
document = main.document(title='Neutron Experiment')
document.description = ''
document.byline = 'byline?'
p = document.paragraph()
action = actionRequireAuthentication(
actor = 'neutronexperimentwizard', sentry = director.sentry,
label = 'this wizard', routine = 'start',
)
wizard_link = action_link( action, director.cgihome )
action = actionRequireAuthentication(
actor = 'neutronexperiment', sentry = director.sentry,
label = 'experiments', routine = 'listall',
)
list_link = action_link( action, director.cgihome )
p.text = [
'In this virtual neutron facility, you can set up',
'a new experiment by using %s.' % wizard_link,
'Or you can select from one of the %s you have run' % list_link,
'and rerun it with new settings.',
]
return page
def delete(self, director):
try:
page = director.retrieveSecurePage( 'neutronexperiment' )
except AuthenticationError, error:
return error.page
record = director.clerk.getNeutronExperiment( self.inventory.id )
director.clerk.deleteRecord( record )
return self.listall(director)
def listall(self, director):
try:
page = director.retrieveSecurePage( 'neutronexperiment' )
except AuthenticationError, err:
return err.page
main = page._body._content._main
# populate the main column
document = main.document(title='Experiments')
document.description = ''
document.byline = 'byline?'
#
p = document.paragraph()
action = actionRequireAuthentication(
label = 'this wizard',
actor = 'neutronexperimentwizard',
routine = 'start',
sentry = director.sentry,
)
link = action_link( action, director.cgihome )
p.text = [
'You can perform various kinds of neutron experiments in',
'this virtual neutron facility.',
'To start, you can plan a new experiment by following %s.' % link,
]
# retrieve id:record dictionary from db
clerk = director.clerk
experiments = clerk.indexNeutronExperiments()
# make a list of all experiments
listexperiments( experiments.values(), document, director )
return page
def view(self, director):
try:
page = director.retrieveSecurePage( 'neutronexperiment' )
except AuthenticationError, err:
return err.page
# the record we are working on
id = self.inventory.id
experiment = director.clerk.getNeutronExperiment( id )
#see if the experiment is constructed or not. if not
#ask the wizard to do the editing.
if experiment.status in ['started', 'partially configured']:
director.routine = 'submit_experiment'
actor = director.retrieveActor( 'neutronexperimentwizard')
director.configureComponent( actor )
actor.inventory.id = self.inventory.id
return actor.submit_experiment( director )
main = page._body._content._main
# populate the main column
document = main.document(
title='Experiment %r' % experiment.short_description )
document.description = ( '')
document.byline = 'byline?'
status = experiment.status
method = '_view_%s' % status
method = getattr(self, method)
method( document, director )
return page
def edit(self, director):
try:
page, document = self._head( director )
except AuthenticationError, error:
return error.page
self.processFormInputs( director )
#see if the experiment is constructed or not. if not
#ask the wizard to do the editing.
experiment = director.clerk.getNeutronExperiment( self.inventory.id )
if experiment.status != 'constructed':
director.routine = 'start'
actor = director.retrieveActor( 'neutronexperimentwizard')
director.configureComponent( actor )
actor.inventory.id = self.inventory.id
return actor.start( director )
formcomponent = self.retrieveFormToShow( 'run_neutron_experiment' )
formcomponent.inventory.id = self.inventory.id
formcomponent.director = director
# create form
form = document.form(
name='neutronexperiment',
legend= formcomponent.legend(),
action=director.cgihome)
# specify action
action = actionRequireAuthentication(
actor = 'job', sentry = director.sentry,
label = '', routine = 'edit',
arguments = {'form-received': formcomponent.name } )
from vnf.weaver import action_formfields
action_formfields( action, form )
# expand the form with fields of the data object that is being edited
formcomponent.expand( form )
# run button
submit = form.control(name="submit", type="submit", value="Run")
return page
def run(self, director):
try:
page = director.retrieveSecurePage( 'neutronexperiment' )
except AuthenticationError, err:
return err.page
experiment = director.clerk.getNeutronExperiment(
self.inventory.id)
job_id = experiment.job_id
if empty_id(job_id):
raise RuntimeError, "job not yet established"
job_id = experiment.job_id
job = director.clerk.getJob( job_id )
try:
Scheduler.schedule(job, director)
experiment.status = 'submitted'
except Exception, err:
raise
import traceback
experiment.status = 'submissionfailed'
job.error = traceback.format_exc()
# update db
director.clerk.updateRecord( job )
director.clerk.updateRecord( experiment )
# check status of job
Scheduler.check( job, director )
return self.view( director )
def selectinstrument(self, director):
try:
page, document = self._head( director )
except AuthenticationError, error:
return error.page
experiment = director.clerk.getNeutronExperiment(
self.inventory.id )
# create form to set scatterer type
formcomponent = self.retrieveFormToShow( 'selectneutroninstrument' )
formcomponent.inventory.experiment_id = experiment.id
formcomponent.director = director
# create form
form = document.form(
name='selectneutroninstrument',
legend= formcomponent.legend(),
action=director.cgihome)
# specify action
action = actionRequireAuthentication(
actor = 'neutronexperiment', sentry = director.sentry,
label = '', routine = 'edit',
arguments = { 'id': experiment.id,
'form-received': formcomponent.name } )
from vnf.weaver import action_formfields
action_formfields( action, form )
# expand the form with fields of the data object that is being edited
formcomponent.expand( form )
# ok button
submit = form.control(name="submit", type="submit", value="OK")
return page
def __init__(self, name=None):
if name is None:
name = "neutronexperiment"
super(NeutronExperiment, self).__init__(name)
return
def _add_review(self, document, director):
experiment = director.clerk.getNeutronExperiment(self.inventory.id)
experiment = director.clerk.getHierarchy( experiment )
from TreeViewCreator import create
view = create( experiment )
document.contents.append( view )
return
def _add_revision_sentence(self, document, director):
p = document.paragraph()
action = actionRequireAuthentication(
label = 'here',
actor = 'neutronexperimentwizard',
routine = 'start',
sentry = director.sentry,
id = self.inventory.id)
link = action_link( action, director.cgihome )
p.text = [
'If you need to make changes to this experiment,',
'please click %s.' % link,
]
return
def _add_run_sentence(self, document, director):
p = document.paragraph()
action = actionRequireAuthentication(
label = 'here',
actor = 'neutronexperiment',
routine = 'run',
sentry = director.sentry,
id = self.inventory.id)
link = action_link( action, director.cgihome )
p.text = [
'If you are done with experiment configuration,',
'please click %s to start this experiment.' % link,
]
return
def _add_delete_sentence(self, document, director):
p = document.paragraph()
action = actionRequireAuthentication(
label = 'here',
actor = 'neutronexperiment',
routine = 'delete',
sentry = director.sentry,
id = self.inventory.id)
link = action_link( action, director.cgihome )
p.text = [
'To delete this experiment, please click %s.' % link,
]
return
def _view_constructed(self, document, director):
experiment = director.clerk.getNeutronExperiment(self.inventory.id)
p = document.paragraph()
p.text = [
'Experiment %r has been constructed.' % experiment.short_description,
]
p.text += [
'Configuration details of this experiment can be',
'found out in the following tree view.',
'Please review them before you start the experiment.',
]
self._add_review( document, director )
self._add_revision_sentence( document, director )
self._add_run_sentence( document, director )
self._add_delete_sentence( document, director )
return
def _view_submissionfailed(self, document, director):
p = document.paragraph( )
p.text = [
'We have tried to start experiment %r for you but failed.' % experiment.short_description,
'This could be due to network error.',
'The error message returned from computation server is:',
]
experiment = director.clerk.getNeutronExperiment(self.inventory.id)
experiment = director.clerk.getHierarchy( experiment )
p = document.paragraph(cls = 'error' )
p.text = [ experiment.job.error ]
p = document.paragraph()
p.text += [
'Configuration details of this experiment can be',
'found out in the following tree view.',
]
self._add_review( document, director )
self._add_revision_sentence( document, director )
self._add_run_sentence( document, director )
self._add_delete_sentence( document, director )
return
def _view_submitted(self, document, director):
experiment = director.clerk.getNeutronExperiment(self.inventory.id)
experiment = director.clerk.getHierarchy( experiment )
#refresh script
p = document.paragraph()
p.text = [
'''
<script>
<!--
/*
Auto Refresh Page with Time script
By JavaScript Kit (javascriptkit.com)
Over 200+ free scripts here!
*/
//enter refresh time in "minutes:seconds" Minutes should range from 0 to inifinity. Seconds should range from 0 to 59
var limit="0:10"
var parselimit=limit.split(":")
parselimit=parselimit[0]*60+parselimit[1]*1
function beginrefresh(){
if (parselimit==1)
window.location.reload()
else{
parselimit-=1
curmin=Math.floor(parselimit/60)
cursec=parselimit%60
if (curmin!=0)
curtime=curmin+" minutes and "+cursec+" seconds left until page refresh!"
else
curtime=cursec+" seconds left until page refresh!"
window.status=curtime
setTimeout("beginrefresh()",1000)
}
}
window.onload=beginrefresh
//-->
</script>
''',
]
panel = document.form(
name='null',
legend= 'Summary',
action='')
p = panel.paragraph()
p.text = [
'Experiment %r was started %s on server %r, using %s nodes.' % (
experiment.short_description, experiment.job.timeStart,
experiment.job.computation_server.short_description,
experiment.job.numprocessors,
),
]
p.text += [
'Configuration details of this experiment can be',
'found out in the following tree view.',
]
self._add_review( panel, director )
self._add_results( document, director )
#update status
if experiment.job.status == 'finished': experiment.status = 'finished'
director.clerk.updateRecord( experiment )
return
def _view_finished(self, document, director):
experiment = director.clerk.getNeutronExperiment(self.inventory.id)
experiment = director.clerk.getHierarchy( experiment )
panel = document.form(
name='null',
legend= 'Summary',
action='')
p = panel.paragraph()
p.text = [
'Experiment %r was started %s on server %r, using %s nodes.' % (
experiment.short_description, experiment.job.timeStart,
experiment.job.computation_server.short_description,
experiment.job.numprocessors,
),
]
p.text += [
'Configuration details of this experiment can be',
'found out in the following tree view.',
]
self._add_review( panel, director )
self._add_results( document, director )
#update status
if experiment.job.status == 'finished': experiment.status = 'finished'
director.clerk.updateRecord( experiment )
return
def _add_results(self, document, director):
experiment = director.clerk.getNeutronExperiment( self.inventory.id )
# data path
job_id = experiment.job_id
job = director.clerk.getJob( job_id )
from JobDataManager import JobDataManager
jobdatamanager = JobDataManager( job, director )
path = jobdatamanager.localpath()
server = job.computation_server
# list entries in the job directory in the remote server
output_files = jobdatamanager.listremotejobdir()
document = document.form(
name='null',
legend= 'Data',
action='')
# loop over expected results and see if any of them is available
# and post it
expected = experiment.expected_results
import os
for item in expected:
filename = item
if filename in output_files:
#f = os.path.join( path, item )
#retieve file from computation server
localcopy = jobdatamanager.makelocalcopy( filename )
self._post_result( localcopy, document, director )
continue
return
def _post_result(self, resultfile, document, director):
drawer = ResultDrawer( )
experiment = director.clerk.getNeutronExperiment( self.inventory.id )
drawer.draw( experiment, resultfile, document, director )
return
def _head(self, director):
page = director.retrieveSecurePage( 'neutronexperiment' )
main = page._body._content._main
# the record we are working on
id = self.inventory.id
experiment = director.clerk.getNeutronExperiment( id )
# populate the main column
document = main.document(
title='Neutron Experiment: %s' % experiment.short_description )
document.description = ( '')
document.byline = '<a href="http://danse.us">DANSE</a>'
return page, document
def _configure(self):
base._configure(self)
self.id = self.inventory.id
return
pass # end of NeutronExperiment
from wording import plural, present_be
def listexperiments( experiments, document, director ):
p = document.paragraph()
n = len(experiments)
p.text = [ 'Here is a list of experiments you have planned or run:' ]
formatstr = '%(index)s: %(viewlink)s (%(status)s) is a measurement of %(sample)r in %(instrument)r (%(deletelink)s)'
actor = 'neutronexperiment'
container = experiments
for i, element in enumerate( container ):
p = document.paragraph()
name = element.short_description
if name in ['', None, 'None'] : name = 'undefined'
action = actionRequireAuthentication(
actor, director.sentry,
routine = 'view',
label = name,
id = element.id,
)
viewlink = action_link( action, director.cgihome )
action = actionRequireAuthentication(
actor, director.sentry,
routine = 'delete',
label = 'delete',
id = element.id,
)
deletelink = action_link( action, director.cgihome )
element = director.clerk.getHierarchy( element )
if element.instrument is None \
or element.instrument.instrument is None:
action = actionRequireAuthentication(
'neutronexperimentwizard', sentry = director.sentry,
label = 'select instrument',
routine = 'select_instrument',
id = element.id,
)
link = action_link( action, director.cgihome )
instrument = link
else:
instrument = element.instrument.instrument
instrument = instrument.short_description
pass # end if
subs = {'index': i+1,
'viewlink': viewlink,
'deletelink': deletelink,
'status': element.status,
'instrument': instrument,
'sample': 'sample',
}
p.text += [
formatstr % subs,
]
continue
return
def view_instrument(instrument, form):
p = form.paragraph()
p.text = [
'This experiment is to be performed in instrument %s' % instrument.short_description,
]
from TreeViewCreator import create
view = create( instrument )
form.contents.append( view )
return
def view_sampleassembly(sampleassembly, form):
p = form.paragraph()
p.text = [
'The sample to study: %s' % sampleassembly.short_description,
]
from TreeViewCreator import create
view = create( sampleassembly )
form.contents.append( view )
return
def view_instrument_plain(instrument, form):
p = form.paragraph()
p.text = [
'This experiment is to be performed in instrument %s' % instrument.short_description,
]
p = form.paragraph()
geometer = instrument.geometer
components = instrument.componentsequence
p.text = [
'Instrument %r has %s components: %s' % (
instrument.short_description, len(components),
', '.join( [ comp for comp in components ] ) ),
]
excluded_cols = [
'id', 'creator', 'date', 'short_description',
]
p = form.paragraph()
p.text = [ '<UL>' ]
for component in components:
if component != 'sample':
component_record = getattr( instrument, component ).realcomponent
component_type = component_record.__class__.__name__
else:
component_type = ''
pass # endif
p.text.append( '<li>%s: %s' % (component, component_type) )
p.text.append( '<UL>' )
record = geometer[ component ]
p.text.append( '<li>Position: %s' % (record.position,) )
p.text.append( '<li>Orientation: %s' % (record.orientation,) )
if component == 'sample':
p.text.append( '</UL>' )
continue
columns = component_record.getColumnNames()
for col in columns:
if col in excluded_cols: continue
value = getattr( component_record, col )
p.text.append('<li>%s: %s' % (col, value) )
continue
p.text.append( '</UL>' )
continue
p.text.append( '</UL>' )
return
class ResultDrawer:
def draw(self, experiment, result, document, director):
#special place to save plots
plots_path = 'images/plots'
#
results = director.clerk.getSimulationResults( experiment )
labels = [ r.label for r in results ]
if result in labels:
#if result already saved, just fetch that
id = filter( lambda r: r.label == result, results )[0].id
else:
#otherwise, we need to have a new record in simulatoinresults table
#and also need to save result in the special place
src = result
#simulationresults record
from vnf.dom.SimulationResult import SimulationResult
result_record = director.clerk.new_dbobject(SimulationResult)
result_record.label = result
result_record.simulation_type = 'NeutronExperiment'
result_record.simulation_id = experiment.id
director.clerk.updateRecord( result_record )
id = result_record.id
# copy file to a special place
filepath1 = os.path.join( plots_path, '%s.png' % id )
dest = os.path.join( 'html', filepath1 )
#copy
import shutil
shutil.copyfile( src, dest )
filepath1 = os.path.join( plots_path, '%s.png' % id )
#create view
#hack
path, name = os.path.split( result )
name, ext = os.path.splitext( name )
p = document.paragraph()
p.text = [
name,
]
p = document.paragraph()
p.text = [
'<img src="%s/%s">' % ( director.home, filepath1 ),
]
return
#switch pylab backend to ps so that it does not need interactivity
import os, spawn
import Scheduler
from misc import empty_id
# version
__id__ = "$Id$"
# End of file
|
[
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.