blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
283
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
41
| license_type
stringclasses 2
values | repo_name
stringlengths 7
96
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 58
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 12.7k
662M
⌀ | star_events_count
int64 0
35.5k
| fork_events_count
int64 0
20.6k
| gha_license_id
stringclasses 11
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 43
values | src_encoding
stringclasses 9
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
5.88M
| extension
stringclasses 30
values | content
stringlengths 7
5.88M
| authors
sequencelengths 1
1
| author
stringlengths 0
73
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5ff1f4ca1dfed4b0ebb5d1bc6f7995b0366fe5f3 | e69a040f13518314321cb5ce13828be411636849 | /client/models.py | b57f89b11dbd4283dee5fb0977216a7d20947ceb | [] | no_license | maQgl/web2 | 0b85482366efc29fbeb819be727cef5ed4dcf2d7 | d797bec53bcc95d2ad63f15f90b981d1a56b5ae1 | refs/heads/master | 2021-01-10T23:28:18.555385 | 2016-10-11T10:38:36 | 2016-10-11T10:38:36 | 70,582,894 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 300 | py | from __future__ import unicode_literals
from django.db import models
from core.models import BaseModel
class Client(BaseModel):
first_name = models.CharField(max_length=20)
last_name = models.CharField(max_length=20)
birthdate = models.DateField()
description = models.TextField()
| [
"[email protected]"
] | |
42a30a70db0281e234b835cd86b02392ad8ec116 | 55bbe1e41f7f4e28ed1b2bf938ab661f6c3132a9 | /grading.py | b7639cfe7ceec2ee4a2806d975a63149aecc3183 | [] | no_license | dllkou001/PacMan | 5ea154d105b8d3afc8f4528ff19d90f26991d660 | 14c450d71669a4536a4ecd0944508b30585f6c60 | refs/heads/master | 2023-01-02T04:50:09.033045 | 2020-10-25T13:00:45 | 2020-10-25T13:00:45 | 306,051,117 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,604 | py | # grading.py
# ----------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# ([email protected]) and Dan Klein ([email protected]).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel ([email protected]).
"Common code for autograders"
import html
import cgi
import time
import sys
import json
import traceback
import pdb
from collections import defaultdict
import util
class Grades:
"A data structure for project grades, along with formatting code to display them"
def __init__(self, projectName, questionsAndMaxesList,
gsOutput=False, edxOutput=False, muteOutput=False):
"""
Defines the grading scheme for a project
projectName: project name
questionsAndMaxesDict: a list of (question name, max points per question)
"""
self.questions = [el[0] for el in questionsAndMaxesList]
self.maxes = dict(questionsAndMaxesList)
self.points = Counter()
self.messages = dict([(q, []) for q in self.questions])
self.project = projectName
self.start = time.localtime()[1:6]
self.sane = True # Sanity checks
self.currentQuestion = None # Which question we're grading
self.edxOutput = edxOutput
self.gsOutput = gsOutput # GradeScope output
self.mute = muteOutput
self.prereqs = defaultdict(set)
#print('Autograder transcript for %s' % self.project)
print('Starting on %d-%d at %d:%02d:%02d' % self.start)
def addPrereq(self, question, prereq):
self.prereqs[question].add(prereq)
def grade(self, gradingModule, exceptionMap = {}, bonusPic = False):
"""
Grades each question
gradingModule: the module with all the grading functions (pass in with sys.modules[__name__])
"""
completedQuestions = set([])
for q in self.questions:
print('\nQuestion %s' % q)
print('=' * (9 + len(q)))
print
self.currentQuestion = q
incompleted = self.prereqs[q].difference(completedQuestions)
if len(incompleted) > 0:
prereq = incompleted.pop()
print(
"""*** NOTE: Make sure to complete Question %s before working on Question %s,
*** because Question %s builds upon your answer for Question %s.
""" % (prereq, q, q, prereq))
continue
if self.mute: util.mutePrint()
try:
util.TimeoutFunction(getattr(gradingModule, q),1800)(self) # Call the question's function
#TimeoutFunction(getattr(gradingModule, q),1200)(self) # Call the question's function
except Exception as inst:
self.addExceptionMessage(q, inst, traceback)
self.addErrorHints(exceptionMap, inst, q[1])
except:
self.fail('FAIL: Terminated with a string exception.')
finally:
if self.mute: util.unmutePrint()
if self.points[q] >= self.maxes[q]:
completedQuestions.add(q)
print('\n### Question %s: %d/%d ###\n' % (q, self.points[q], self.maxes[q]))
print('\nFinished at %d:%02d:%02d' % time.localtime()[3:6])
print("\nProvisional grades\n==================")
for q in self.questions:
print('Question %s: %d/%d' % (q, self.points[q], self.maxes[q]))
print('------------------')
print('Total: %d/%d' % (self.points.totalCount(), sum(self.maxes.values())))
if bonusPic and self.points.totalCount() == 25:
print("""
ALL HAIL GRANDPAC.
LONG LIVE THE GHOSTBUSTING KING.
--- ---- ---
| \ / + \ / |
| + \--/ \--/ + |
| + + |
| + + + |
@@@@@@@@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
\ @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
\ / @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
V \ @@@@@@@@@@@@@@@@@@@@@@@@@@@@
\ / @@@@@@@@@@@@@@@@@@@@@@@@@@
V @@@@@@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@
/\ @@@@@@@@@@@@@@@@@@@@@@
/ \ @@@@@@@@@@@@@@@@@@@@@@@@@
/\ / @@@@@@@@@@@@@@@@@@@@@@@@@@@
/ \ @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
/ @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@@
""")
print("""
Your grades are NOT yet registered. To register your grades, make sure
to follow your instructor's guidelines to receive credit on your project.
""")
if self.edxOutput:
self.produceOutput()
if self.gsOutput:
self.produceGradeScopeOutput()
def addExceptionMessage(self, q, inst, traceback):
"""
Method to format the exception message, this is more complicated because
we need to cgi.escape the traceback but wrap the exception in a <pre> tag
"""
self.fail('FAIL: Exception raised: %s' % inst)
self.addMessage('')
for line in traceback.format_exc().split('\n'):
self.addMessage(line)
def addErrorHints(self, exceptionMap, errorInstance, questionNum):
typeOf = str(type(errorInstance))
questionName = 'q' + questionNum
errorHint = ''
# question specific error hints
if exceptionMap.get(questionName):
questionMap = exceptionMap.get(questionName)
if (questionMap.get(typeOf)):
errorHint = questionMap.get(typeOf)
# fall back to general error messages if a question specific
# one does not exist
if (exceptionMap.get(typeOf)):
errorHint = exceptionMap.get(typeOf)
# dont include the HTML if we have no error hint
if not errorHint:
return ''
for line in errorHint.split('\n'):
self.addMessage(line)
def produceGradeScopeOutput(self):
out_dct = {}
# total of entire submission
total_possible = sum(self.maxes.values())
total_score = sum(self.points.values())
out_dct['score'] = total_score
out_dct['max_score'] = total_possible
out_dct['output'] = "Total score (%d / %d)" % (total_score, total_possible)
# individual tests
tests_out = []
for name in self.questions:
test_out = {}
# test name
test_out['name'] = name
# test score
test_out['score'] = self.points[name]
test_out['max_score'] = self.maxes[name]
# others
is_correct = self.points[name] >= self.maxes[name]
test_out['output'] = " Question {num} ({points}/{max}) {correct}".format(
num=(name[1] if len(name) == 2 else name),
points=test_out['score'],
max=test_out['max_score'],
correct=('X' if not is_correct else ''),
)
test_out['tags'] = []
tests_out.append(test_out)
out_dct['tests'] = tests_out
# file output
with open('gradescope_response.json', 'w') as outfile:
json.dump(out_dct, outfile)
return
def produceOutput(self):
edxOutput = open('edx_response.html', 'w')
edxOutput.write("<div>")
# first sum
total_possible = sum(self.maxes.values())
total_score = sum(self.points.values())
checkOrX = '<span class="incorrect"/>'
if (total_score >= total_possible):
checkOrX = '<span class="correct"/>'
header = """
<h3>
Total score ({total_score} / {total_possible})
</h3>
""".format(total_score = total_score,
total_possible = total_possible,
checkOrX = checkOrX
)
edxOutput.write(header)
for q in self.questions:
if len(q) == 2:
name = q[1]
else:
name = q
checkOrX = '<span class="incorrect"/>'
if (self.points[q] >= self.maxes[q]):
checkOrX = '<span class="correct"/>'
#messages = '\n<br/>\n'.join(self.messages[q])
messages = "<pre>%s</pre>" % '\n'.join(self.messages[q])
output = """
<div class="test">
<section>
<div class="shortform">
Question {q} ({points}/{max}) {checkOrX}
</div>
<div class="longform">
{messages}
</div>
</section>
</div>
""".format(q = name,
max = self.maxes[q],
messages = messages,
checkOrX = checkOrX,
points = self.points[q]
)
# print("*** output for Question %s " % q[1])
# print(output)
edxOutput.write(output)
edxOutput.write("</div>")
edxOutput.close()
edxOutput = open('edx_grade', 'w')
edxOutput.write(str(self.points.totalCount()))
edxOutput.close()
def fail(self, message, raw=False):
"Sets sanity check bit to false and outputs a message"
self.sane = False
self.assignZeroCredit()
self.addMessage(message, raw)
def assignZeroCredit(self):
self.points[self.currentQuestion] = 0
def addPoints(self, amt):
self.points[self.currentQuestion] += amt
def deductPoints(self, amt):
self.points[self.currentQuestion] -= amt
def assignFullCredit(self, message="", raw=False):
self.points[self.currentQuestion] = self.maxes[self.currentQuestion]
if message != "":
self.addMessage(message, raw)
def addMessage(self, message, raw=False):
if not raw:
# We assume raw messages, formatted for HTML, are printed separately
if self.mute: util.unmutePrint()
print('*** ' + message)
if self.mute: util.mutePrint()
message = html.escape(message)
self.messages[self.currentQuestion].append(message)
def addMessageToEmail(self, message):
print("WARNING**** addMessageToEmail is deprecated %s" % message)
for line in message.split('\n'):
pass
#print('%%% ' + line + ' %%%')
#self.messages[self.currentQuestion].append(line)
class Counter(dict):
"""
Dict with default 0
"""
def __getitem__(self, idx):
try:
return dict.__getitem__(self, idx)
except KeyError:
return 0
def totalCount(self):
"""
Returns the sum of counts for all keys.
"""
return sum(self.values())
| [
"[email protected]"
] | |
c649bb417016312af3c7c4e8a9f0d1dd722dfefa | 5a0bfb3db46eb2cc90c1b1721169674119808730 | /Finding_Roots/bisection.py | ba756d6a279c39a3c63bba90a973f69d99ba8eef | [] | no_license | gabrielrmachado/NumericalMethods | fe9c1a826f61ac4776a13a20f28d44ad68e11be0 | 7f8b86bd327c287953cc208324575a93542c310d | refs/heads/master | 2020-06-22T10:48:04.422413 | 2019-09-10T16:03:32 | 2019-09-10T16:03:32 | 197,110,480 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,258 | py | import math
import numpy as np
def function(x):
# return math.pow(math.e, -x) * math.cos(x)
# return ((math.pow(x,3)) - (7*math.pow(x,2)) + (14*x) - 6)
return ((32 * math.pow(x,6)) - (48 * math.pow(x,4)) + (18 * math.pow(x,2)) - 1)
# return math.sin(math.cos(math.pow(math.e, x)))
def bisection(a, b, max_iter, TOL, counter):
if counter < max_iter:
counter = counter + 1
ans1 = function(a)
ans2 = function(b)
if abs(ans1) <= TOL:
print("Root found on iteration {0}.\nf({1}) = {2:.10f}".format(counter, a, function(a)))
elif abs(ans2) <= TOL:
print("Root found on iteration {0}.\nf({1}) = {2:.10f}".format(counter, b, function(b)))
elif ans1*ans2 < 0:
p = (a+b)/2
ans = function(p)
print("Iter. {0}: f({1}) = {2}".format(counter, p, ans))
if np.sign(ans) == np.sign(ans2):
bisection(a, p, max_iter, TOL, counter)
else:
bisection(p, b, max_iter, TOL, counter)
else:
print("F(a) and F(b) must have opposite signs!")
else:
print("Number of max iterations reached. No root was found.")
bisection(0.8, 1, 150, 0.001, 0) | [
"[email protected]"
] | |
5cd8e650c944ad0a10be42580412e8afd4ae592f | 8d8c8f332dbb7056f73baa7e25bc52de9a2d8616 | /train_emotion_classifier.py | 5faf70c5379c3834a790396cf76e8c58cc62475c | [] | no_license | youssefadel77/Emotion-Gender-Detection | ecedb57225f4eb2e6723d45537233a0983ccab19 | 1f0fd383cacb87b46befb5bf39c02cdc8339df15 | refs/heads/main | 2023-03-05T22:24:56.515256 | 2021-02-22T12:54:08 | 2021-02-22T12:54:08 | 340,699,563 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,641 | py | """
File: train_emotion_classifier.py
Author: Octavio Arriaga
Email: [email protected]
Github: https://github.com/oarriaga
Description: Train emotion classification model
"""
from keras.callbacks import CSVLogger, ModelCheckpoint, EarlyStopping
from keras.callbacks import ReduceLROnPlateau
from keras.preprocessing.image import ImageDataGenerator
from utils.datasets import DataManager
from utils.datasets import split_data
from utils.preprocessor import preprocess_input
from keras.layers import Activation, Convolution2D, Dropout, Conv2D
from keras.layers import AveragePooling2D, BatchNormalization
from keras.layers import GlobalAveragePooling2D
from keras.models import Sequential
from keras.layers import Flatten
from keras.models import Model
from keras.layers import Input
from keras.layers import MaxPooling2D
from keras.layers import SeparableConv2D
from keras import layers
from keras.regularizers import l2
def mini_XCEPTION(input_shape, num_classes, l2_regularization=0.01):
regularization = l2(l2_regularization)
# base
img_input = Input(input_shape)
x = Conv2D(8, (3, 3), strides=(1, 1), kernel_regularizer=regularization,
use_bias=False)(img_input)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(8, (3, 3), strides=(1, 1), kernel_regularizer=regularization,
use_bias=False)(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
# module 1
residual = Conv2D(16, (1, 1), strides=(2, 2),
padding='same', use_bias=False)(x)
residual = BatchNormalization()(residual)
x = SeparableConv2D(16, (3, 3), padding='same',
kernel_regularizer=regularization,
use_bias=False)(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = SeparableConv2D(16, (3, 3), padding='same',
kernel_regularizer=regularization,
use_bias=False)(x)
x = BatchNormalization()(x)
x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)
x = layers.add([x, residual])
# module 2
residual = Conv2D(32, (1, 1), strides=(2, 2),
padding='same', use_bias=False)(x)
residual = BatchNormalization()(residual)
x = SeparableConv2D(32, (3, 3), padding='same',
kernel_regularizer=regularization,
use_bias=False)(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = SeparableConv2D(32, (3, 3), padding='same',
kernel_regularizer=regularization,
use_bias=False)(x)
x = BatchNormalization()(x)
x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)
x = layers.add([x, residual])
# module 3
residual = Conv2D(64, (1, 1), strides=(2, 2),
padding='same', use_bias=False)(x)
residual = BatchNormalization()(residual)
x = SeparableConv2D(64, (3, 3), padding='same',
kernel_regularizer=regularization,
use_bias=False)(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = SeparableConv2D(64, (3, 3), padding='same',
kernel_regularizer=regularization,
use_bias=False)(x)
x = BatchNormalization()(x)
x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)
x = layers.add([x, residual])
# module 4
residual = Conv2D(128, (1, 1), strides=(2, 2),
padding='same', use_bias=False)(x)
residual = BatchNormalization()(residual)
x = SeparableConv2D(128, (3, 3), padding='same',
kernel_regularizer=regularization,
use_bias=False)(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = SeparableConv2D(128, (3, 3), padding='same',
kernel_regularizer=regularization,
use_bias=False)(x)
x = BatchNormalization()(x)
x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)
x = layers.add([x, residual])
x = Conv2D(num_classes, (3, 3),
# kernel_regularizer=regularization,
padding='same')(x)
x = GlobalAveragePooling2D()(x)
output = Activation('softmax', name='predictions')(x)
model = Model(img_input, output)
return model
# parameters
batch_size = 32
num_epochs = 10000
input_shape = (64, 64, 1)
validation_split = .2
verbose = 1
num_classes = 7
patience = 50
base_path = '../trained_models/emotion_models/'
# data generator
data_generator = ImageDataGenerator(
featurewise_center=False,
featurewise_std_normalization=False,
rotation_range=10,
width_shift_range=0.1,
height_shift_range=0.1,
zoom_range=.1,
horizontal_flip=True)
# model parameters/compilation
model = mini_XCEPTION(input_shape, num_classes)
model.compile(optimizer='adam', loss='categorical_crossentropy',
metrics=['accuracy'])
model.summary()
datasets = ['fer2013']
for dataset_name in datasets:
print('Training dataset:', dataset_name)
# callbacks
log_file_path = base_path + dataset_name + '_emotion_training.log'
csv_logger = CSVLogger(log_file_path, append=False)
early_stop = EarlyStopping('val_loss', patience=patience)
reduce_lr = ReduceLROnPlateau('val_loss', factor=0.1,
patience=int(patience/4), verbose=1)
trained_models_path = base_path + dataset_name + '_mini_XCEPTION'
model_names = trained_models_path + '.{epoch:02d}-{val_acc:.2f}.hdf5'
model_checkpoint = ModelCheckpoint(model_names, 'val_loss', verbose=1,
save_best_only=True)
callbacks = [model_checkpoint, csv_logger, early_stop, reduce_lr]
# loading dataset
data_loader = DataManager(dataset_name, image_size=input_shape[:2])
faces, emotions = data_loader.get_data()
faces = preprocess_input(faces)
num_samples, num_classes = emotions.shape
train_data, val_data = split_data(faces, emotions, validation_split)
train_faces, train_emotions = train_data
model.fit_generator(data_generator.flow(train_faces, train_emotions,
batch_size),
steps_per_epoch=len(train_faces) / batch_size,
epochs=num_epochs, verbose=1, callbacks=callbacks,
validation_data=val_data)
| [
"[email protected]"
] | |
ee4a2153a041faef8e9d64a0328557a97ce4e280 | db23328b2dedf1dcb8f1ec075f54d167f31905ce | /sumofnumbers.py | e3ece7a7df4f5eed8c919d27919b07f875334c31 | [] | no_license | Aleti-Prabhas/python-practice | ae9730e87488da44a521cf696b0648ba29438f00 | f8c0e120670ed856be9bdbbdd3406737e5642ef8 | refs/heads/main | 2023-04-18T02:04:55.176549 | 2021-05-21T13:10:58 | 2021-05-21T13:10:58 | 360,566,527 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 114 | py | t=3
while t:
n=int(input("enter the n :"))
print("the sum of {} is {}".format(n,n*(n+1)//2))
t-1
| [
"[email protected]"
] | |
63b17bd938e8d081c3a7b3235a33595612a0855e | 86ec17d662ad5fc741b1dc08e37fd248c11b175f | /lib/flights/__init__.py | 8dcb37b550b42b35ca4bccd44851a4fb999ff29c | [] | no_license | joewalk102/FlightReservations | 68b38ddbd02dd624c304f54ca5a2b951722b3308 | 4ae4e980cd284db929c2cd2b4164a3b947670437 | refs/heads/master | 2022-06-23T05:34:45.864746 | 2022-06-19T20:17:38 | 2022-06-19T20:17:38 | 88,907,662 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 28 | py | from .flights import Flight
| [
"[email protected]"
] | |
71eec46f398a39faf790c148a403919a82bfe18a | ec1664be697cb0e1c3ec7be9f17873ace304c4c6 | /He_atom_rhf.py | c223d79d8a0fe27741eaf89f989966ab3c975165 | [] | no_license | vincehouhou/LINEARHF | 4a2184053eb277c98e4711f067eaefbae2ca6a69 | 355d6648099cb82efed46c85697b2eeeac5ee17b | refs/heads/master | 2021-01-25T12:30:43.755538 | 2014-09-23T04:18:26 | 2014-09-23T04:18:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,048 | py |
# ************************************************************************
# File: He_atom_rhf.py
# Purpose: Solve the ground-state of He atom in restricted Hartree-Fock model, including energy correction step
# Version: FEniCS 1.4.0
# Author: Houdong Hu
# ************************************************************************
from dolfin import *
from math import *
import numpy as np
import datetime
mesh = BoxMesh(-40,-40,-40,40,40,40,2,2,2)
origin = Point(.0,.0,.0)
# construct priori adaptive mesh
for j in range(50):
cell_markers = CellFunction("bool",mesh)
cell_markers.set_all(False)
cmin = 100
for cell in cells(mesh):
p = cell.midpoint()
r =((p[0]-origin[0])**2+(p[1]-origin[1])**2+(p[2]-origin[2])**2)**0.5
if r<cmin:
cmin=r
for cell in cells(mesh):
p = cell.midpoint()
r =((p[0]-origin[0])**2+(p[1]-origin[1])**2+(p[2]-origin[2])**2)**0.5
if r<=cmin:
cell_markers[cell]=True
mesh = refine(mesh, cell_markers)
for k in range(0):
cell_markers = CellFunction("bool",mesh)
cell_markers.set_all(True)
mesh = refine(mesh, cell_markers)
cmin = 100
for cell in cells(mesh):
p = cell.midpoint()
r =((p[0]-origin[0])**2+(p[1]-origin[1])**2+(p[2]-origin[2])**2)**0.5
if r<cmin:
cmin=r
print cmin
print mesh.coordinates().shape
V = FunctionSpace(mesh, "Lagrange", 1)
delta=cmin
v_ext_corr = Expression("2.0/(pow(pow(x[0]-0.0,2)+pow(x[1]-0.0, 2)+pow(x[2]-0.0,2), 0.5)+delta)",delta=delta)
v_ext1=Expression("1.0/pow(pow(x[0]-0.0,2)+pow(x[1]-0.0, 2)+pow(x[2]-0.0,2),0.5)")
def boundary(x, on_boundary):
return on_boundary
# Define boundary condition
u0 = Constant(0.0)
bc = DirichletBC(V, u0, boundary)
bc1 = DirichletBC(V, v_ext1, boundary)
# Define test function and initial condition (STO-3G)
u = TrialFunction(V)
v = TestFunction(V)
alpha11=6.36242139
alpha12=1.15892300
alpha13=0.31364979
tmp11=Expression("(pow(2*alpha11/pi,3.0/4.0))*exp(-alpha11*(pow(x[0]-0.0,2)+pow(x[1]-0.0,2)+pow(x[2]-0.0,2)))",alpha11=alpha11)
tmp12=Expression("(pow(2*alpha12/pi,3.0/4.0))*exp(-alpha12*(pow(x[0]-0.0,2)+pow(x[1]-0.0,2)+pow(x[2]-0.0,2)))",alpha12=alpha12)
tmp13=Expression("(pow(2*alpha13/pi,3.0/4.0))*exp(-alpha13*(pow(x[0]-0.0,2)+pow(x[1]-0.0,2)+pow(x[2]-0.0,2)))",alpha13=alpha13)
rx1s=0.15432897*(interpolate(tmp11,V)).vector().array()+0.53532814*(interpolate(tmp12,V)).vector().array()+0.44463454*(interpolate(tmp13,V)).vector().array()
r1s=-0.85
r1s_p=0.0
energy=-2.5
tmp1=Function(V)
tmp1.vector()[:]=rx1s
frx1s=Function(V)
v1s = Function(V)
while True:
time0=datetime.datetime.now()
# normalization
nor1=assemble(inner(tmp1,tmp1)*dx)**0.5
frx1s.vector()[:]=tmp1.vector().array()/nor1
# coulomb potential
b1s=frx1s*frx1s*v*dx
a=0.25/pi*inner(nabla_grad(u), nabla_grad(v))*dx
problem = LinearVariationalProblem(a, b1s, v1s, bc1)
solver = LinearVariationalSolver(problem)
solver.parameters["linear_solver"] = "gmres"
solver.parameters["preconditioner"] = "amg"
solver.solve()
# iterate Helmholtz equation
hela1s=0.5*inner(nabla_grad(u), nabla_grad(v))*dx-u*r1s*v*dx
helb1s=(-v1s*frx1s+v_ext_corr*frx1s)*v*dx
problem = LinearVariationalProblem(hela1s, helb1s, tmp1, bc)
solver = LinearVariationalSolver(problem)
solver.parameters["linear_solver"] = "gmres"
solver.parameters["preconditioner"] = "amg"
solver.solve()
# energy correction step
t1=assemble(inner(-v1s*frx1s+v_ext_corr*frx1s,frx1s-tmp1)*dx)
n1=assemble(tmp1*tmp1*dx)
r1s_p=r1s
r1s=r1s+t1/n1
energy_T=assemble(inner(grad(frx1s),grad(frx1s))*dx)
energy_H=assemble(v1s*frx1s*frx1s*dx)
energy_ex=-assemble(2.0*v_ext_corr*frx1s*frx1s*dx)
energy_p=energy
energy=energy_T+energy_H+energy_ex
time1=datetime.datetime.now()
if abs(energy-energy_p)<1e-8:
break
# output in each step
print r1s_p,r1s,energy_p,energy,energy-energy_p,(time1-time0).total_seconds()
| [
"[email protected]"
] | |
61d733600cf9ae08ebb6048f86c4322b40dc5a0d | d44b58a203868884d97997aead32cb39fbe2fce5 | /SVO_2D.py | b9e25ff0ea90f6d87dc252997544f8eebd0f4cb0 | [] | no_license | ChengqianZhang/Visual_Odometry_AUV | 304a9a264d3bfafb506f2c2352b8aa689e598e2e | e8a43daa25e6e6a087835c363a16dfd8e604ed01 | refs/heads/master | 2020-06-23T11:54:30.034604 | 2019-09-23T10:26:19 | 2019-09-23T10:26:19 | 198,616,015 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,985 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Jul 23 15:46:26 2019
@author: sheva
"""
import cv2
import numpy as np
import pandas as pd
import yaml
import functions_2D
import os
from matplotlib import pyplot as plt
start_id = 67
end_id = 240
size =[2,2]
with open('D:/Southampton/Msc Project/TEST FOLDER/SVO_configuration.yaml', 'r') as stream:
load_data_mission = yaml.load(stream)
#Load the filelist of left images
filelist_LC_path = load_data_mission.get('Images_L_path', None)
filelist_LC = pd.read_csv(filelist_LC_path)
print(list(filelist_LC))
#Load the filelist of Right images
filelist_RC_path = load_data_mission.get('Images_R_path', None)
filelist_RC = pd.read_csv(filelist_RC_path)
#Load images
images_LC_path = load_data_mission.get('LC', None)
images_RC_path = load_data_mission.get('RC', None)
#Get camera parameters and calculate projection matirxs
camera_parameter_file_path_L = load_data_mission.get('params_path_LC', None)
camera_parameter_file_path_R = load_data_mission.get('params_path_RC', None)
Cammat_1,Cammat_2,Proj_1,Proj_2,dist_1,dist_2 = functions_2D.calc_projection(camera_parameter_file_path_L,camera_parameter_file_path_R)
print(Cammat_1,Cammat_2,Proj_1,Proj_2)
true = functions_2D.getTruePose()
idx = start_id
while idx <= end_id:
#Detcet the left image at Tk-1
imLprev = cv2.imread(os.path.join(images_LC_path, filelist_LC['Imagenumber'][idx]), 0)
print("processing images",filelist_LC['Imagenumber'][idx],idx)
#Detcet the left image at Tk
imLnext = cv2.imread(os.path.join(images_LC_path, filelist_LC['Imagenumber'][idx+1]), 0)
print("processing images",filelist_LC['Imagenumber'][idx+1],idx+1)
#Detcet the right image at Tk-1
imRprev = cv2.imread(os.path.join(images_RC_path, filelist_RC['Imagenumber'][idx]), 0)
print("processing images",filelist_RC['Imagenumber'][idx],idx)
#Detcet the left image at Tk
imRnext = cv2.imread(os.path.join(images_RC_path, filelist_RC['Imagenumber'][idx+1]), 0)
print("processing images",filelist_RC['Imagenumber'][idx+1],idx+1)
#get key points and descriputor orb
# kpLprev,descLprev = functions_2D.getFeatures_orb(imLprev)
# kpLnext,descLnext = functions_2D.getFeatures_orb(imLnext)
#
# kpRprev,descRprev = functions_2D.getFeatures_orb(imRprev)
# kpRnext,descRnext = functions_2D.getFeatures_orb(imRnext)
#get key points and descriputor sift
kpLprev,descLprev = functions_2D.getFeatures_SIFT(imLprev,size)
kpLnext,descLnext = functions_2D.getFeatures_SIFT(imLnext,size)
kpRprev,descRprev = functions_2D.getFeatures_SIFT(imRprev,size)
kpRnext,descRnext = functions_2D.getFeatures_SIFT(imRnext,size)
print(descLprev.shape,descLnext.shape)
#get key points and descriputor surf
# kpLprev,descLprev = functions_2D.getFeatures_SURF(imLprev,size)
# kpLnext,descLnext = functions_2D.getFeatures_SURF(imLnext,size)
#
# kpRprev,descRprev = functions_2D.getFeatures_SURF(imRprev,size)
# kpRnext,descRnext = functions_2D.getFeatures_SURF(imRnext,size)
# print(kpLprev)
# print (len(kpLprev))
##correspondences
corLprev, corLnext, matchKeptL_T1,matchKeptL_T2,goodL = functions_2D.getCorres(descLprev,descLnext,kpLprev,kpLnext)
corRprev, corRnext, matchKeptR_T1,matchKeptR_T2,goodR = functions_2D.getCorres(descRprev,descRnext,kpRprev,kpRnext)
print('L1 , L2 :', corLprev.shape, corLnext.shape)
print('R1 , R2 :', corRprev.shape, corRnext.shape)
#Debug imgages
# print(corLprev.T, corLnext.T)
#
# plt.figure(figsize=(10,10))
# plt.plot(corLprev.T[:,0:1],corLprev.T[:,1:2], c='r', label = 'kpL_T1')
# plt.plot(corLnext.T[:,0:1],corLnext.T[:,1:2], c='g', label = 'kpL_T2')
# plt.xlim((0,2464))
# plt.ylim((2056,0))
# plt.xlabel("$x-direction(pixel)$")
# plt.ylabel("$y-direction(pixel)$")
# plt.legend()
#debug matches:
img_matches = np.empty((max(imLprev.shape[0], imLnext.shape[0]), imLnext.shape[1]+imLnext.shape[1], 3), dtype=np.uint8)
img3 = cv2.drawMatches(imLprev, kpLprev, imLnext, kpLnext, goodL, img_matches, flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)
# cv2.namedWindow('Good Matches', 0)
# cv2.imshow('Good Matches', img3)
# cv2.waitKey()
# calculate vehicle motion
truth_x1, truth_y1, truth_z1, absolute_scale1 = functions_2D.getAbsoluteScale(filelist_LC, idx)
truth_x2, truth_y2, truth_z2, absolute_scale2 = functions_2D.getAbsoluteScale(filelist_RC, idx)
print(absolute_scale1,absolute_scale2)
#t1-t2
E1, mask1 = cv2.findEssentialMat(corLprev.T, corLnext.T, Cammat_1, cv2.RANSAC,threshold = 1, prob = .99)
_1, R1, t1, mask1 = cv2.recoverPose(E1, corLprev.T, corLnext.T,Cammat_1)
E2, mask2 = cv2.findEssentialMat(corRprev.T,corRnext.T, Cammat_2, cv2.RANSAC,threshold = 1, prob = .99)
_2, R2, t2, mask2 = cv2.recoverPose(E2, corRprev.T,corRnext.T, Cammat_2)
#t2-t1
# E1, mask1 = cv2.findEssentialMat(corLnext.T, corLprev.T, Cammat_1, cv2.RANSAC,threshold = 1, prob = .99)
# _1, R1, t1, mask1 = cv2.recoverPose(E1, corLnext.T, corLprev.T, Cammat_1)
#
# E2, mask2 = cv2.findEssentialMat(corRnext.T, corRprev.T, Cammat_2, cv2.RANSAC,threshold = 1, prob = .99)
# _2, R2, t2, mask2 = cv2.recoverPose(E2, corRnext.T, corRprev.T, Cammat_2)
transform1 = functions_2D.makeTransform(R1,t1)
transform2 = functions_2D.makeTransform(R2,t2)
if idx == start_id:
if absolute_scale1 > 0.1:
t0 = np.array([[0,0,0]]).T
t1_f = t0 + absolute_scale1*R1.dot(t1)
R1_f = R1.dot(R1)
pos_1 = t0.T
pos_1 = np.concatenate((pos_1,t1_f.T),axis =0)
prevpos_1 = pos_1
if absolute_scale2 > 0.1:
t0 = np.array([[0,0,0]]).T
t2_f = t0 + absolute_scale2*R2.dot(t2)
R2_f = R2.dot(R2)
pos_2 = t0.T
pos_2 = np.concatenate((pos_2,t2_f.T),axis =0)
prevpos_2 = pos_2
else:
if absolute_scale1 > 0.1:
t1_f = t1_f + absolute_scale1 * R1_f.dot(t1)
R1_f = R1.dot(R1_f)
pos_1 = np.concatenate((prevpos_1,t1_f.T) ,axis =0)
prevpos_1 = pos_1
if absolute_scale2 > 0.1:
t2_f = t2_f + absolute_scale2 * R2_f.dot(t2)
R2_f = R2.dot(R2_f)
pos_2 = np.concatenate((prevpos_2,t2_f.T), axis =0)
prevpos_2 = pos_2
# if idx == start_id:
# if absolute_scale1 > 0.1:
# pos1 = np.array([[0,0,0]])
# newpos1 = functions_2D.posUpdate(pos1,transform1)
# pos1 = np.concatenate((pos1,newpos1),axis =0)
# delta1 = newpos1-pos1
# prevpos1 = newpos1
#
# if absolute_scale2 > 0.1:
# pos2 = np.array([[0,0,0]])
# newpos2 = functions_2D.posUpdate(pos2,transform2)
# pos2 = np.concatenate((pos2,newpos2),axis =0)
# delta2 = newpos2-pos2
# prevpos2 = newpos2
#
# else:
# if absolute_scale1 > 0.1:
# newpos1 = functions_2D.posUpdate(prevpos1,transform1)
# pos1 = np.concatenate((pos1,newpos1),axis =0)
# delta1 = newpos1-prevpos1
# prevpos1 = newpos1
# if absolute_scale2 > 0.1:
# newpos2 = functions_2D.posUpdate(prevpos2,transform2)
# pos2 = np.concatenate((pos2,newpos2),axis =0)
# delta2 = newpos2-prevpos2
# prevpos2 = newpos1
# trans = trans1
## trans = (trans1+trans2)/2
# print(trans2)
#
# rot = rot1
## rot = (rot1+rot2)/2
# print(rot2)
# print(matchKeptL_St,matchKeptR_St)
# transform1 = functions_2D.makeTransform(R1_f,t1_f)
# transform2 = functions_2D.makeTransform(R2_f,t2_f)
# if idx == start_id:
# pos = np.array([[0,0,0]])
# newpos = functions_2D.posUpdate(pos,transform1)
# pos = np.concatenate((pos,newpos),axis =0)
# prevpos = newpos
#
# else:
# newpos = functions_2D.posUpdate(prevpos,transform1)
# pos = np.concatenate((pos,newpos),axis =0)
# prevpos = newpos
print(t1)
print(pos_1)
# print(pos1)
# print(mask1)
idx += 1
plt.figure(figsize=(10,10))
plt.title('2D results with ORB_detector',fontsize = 20)
plt.plot(true[start_id:end_id,1],true[start_id:end_id,0],c ='r',label = 'Dr_data')
plt.plot((pos_1[:,0:1]+true[start_id,1]),(-pos_1[:,1:2]+true[start_id,0]),c= 'g',label = 'LC')
plt.plot((pos_2[:,0:1]+true[start_id,1]),(-pos_2[:,1:2]+true[start_id,0]),c= 'b',label = 'RC')
plt.xlim((205,220))
plt.xlabel("$Easting (m)$",fontsize = 20)
plt.ylabel("$Northing (m)$",fontsize = 20)
plt.legend(fontsize = 20)
| [
"[email protected]"
] | |
72647c0a005c96588d8d25613e4f4c5762bb9773 | 2fcb8038c7f87da4ae2408f45bb8cdf40e1b90e3 | /myTest/8583/my8583/lib/customizeFun.py | 0493ed3c8af3316184637d92d08e01e5fe1af10f | [] | no_license | JokerZhu/PythonLearn | dceb8cb8fbf5b4e1edce4ca96ce847ed38056cce | 27c2f53e8e1c53b9e680b6558797a2cab695561e | refs/heads/master | 2020-04-16T07:42:47.384470 | 2018-09-12T09:51:21 | 2018-09-12T09:51:21 | 36,870,000 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,796 | py | #!/usr/bin/python3.4
#-*- coding:utf8 -*-
import myConf
import logging
import os
import time
import qrcode
import qrcode.image.svg
from PIL import Image
import re
import readline
import pack8583
from ctypes import *
import Security
def GetSerial():
initSeq = '1'
#从配置文件取出流水号
cfgFile = myConf.GetCombination('app_env','CfgDir','usr_var','seqFile')
logging.info('seq file = [%s]' % cfgFile)
try:
with open(cfgFile,'a+') as seqFile:
seqFile.seek(0)
allLines = seqFile.readlines()
if len(allLines) != 1 :
logging.info('seq file format error')
seqFile.seek(0)
seqFile.truncate()
seqFile.writelines(initSeq )
else:
try:
if int(allLines[0]) + 1 > 999999:
seq = '%06d' % int(initSeq)
else:
seq = '%06d' % int(allLines[0])
seqFile.seek(0)
seqFile.truncate()
seqFile.writelines(str(int(seq ) + 1) )
except ValueError as e:
seq = '%06d' % int(initSeq)
seqFile.seek(0)
seqFile.truncate()
seqFile.writelines(str(int(seq ) + 1) )
except FileNotFoundError as e:
logging.info("can\'t open file [%s] " % cfgFile )
#logging.info("seq = [%s]" % seq )
return seq
def GetLocalDate():
T = time.localtime()
localDate = '%02d' % T[1] + '%02d' % T[2]
logging.info('localDate = [%s] ' % localDate)
return localDate
def GetLocalTime():
T = time.localtime()
localTime = '%02d' % T[3] + '%02d' % T[4] + '%02d' % T[5]
logging.info('localTime = [%s] ' % localTime)
return localTime
def CallCustomFun(functionName ):
logging.info('in CallCustomFun index = [%s]' % functionName)
result = OperatorOfFun.get(functionName)()
return result
def SetDefultValue(value ):
logging.info('in SetDefultValue = [%s]' % value)
return value
pass
def CallInputFun(txt = '' ):
logging.info('in InPutFun!!!')
pressData = input('请输入' + txt + ':\n')
return pressData
pass
def CallInputQRFun(txt = '' ):
logging.info('in InPutFun!!!')
pressData = input('请输入' + txt + ':\n')
return 'QRnumber=' + pressData
pass
def AutoSetFld(packSource = []):
if not isinstance(packSource[1],str) :
logging.error('this cfg %s is error' % packSource)
return None
try:
value = myOperator.get(packSource[0])(packSource[1])
except TypeError as e:
logging.error('not support this cfg %s' % packSource)
return None
return value
def CreateQrcode(sourceMsg ='alipaySeq=&QRlink='):
sorceData = []
if (not isinstance(sourceMsg,str)) and len(sourceMsg) <= 0:
logging.error('can\'t create qrcode!')
return 0
sorceData = re.findall(r'alipaySeq=(\d{0,20})&', sourceMsg)
sorceData += re.findall(r'QRlink=(.{0,128})$' ,sourceMsg)
if len(sorceData) != 2:
logging.error('can\'t create qrcode!')
return 0
cmd = 'qr %s' % (sorceData[1])
os.system(cmd)
input("press <enter> to continue")
def GenTermMac():
logging.info('in GenTermMac')
pack8583.setPackageFlf(64,'00000000')
tmpStr = create_string_buffer(1024)
Len = pack8583.libtest.packageFinal(tmpStr)
#logging.info('len = [%d] after pack = [%s]' %(Len ,tmpStr.value))
MAC = Security.GenerateTermMac(tmpStr.value.decode()[:-16])
logging.info(MAC)
return MAC
def GetCardNoFromPackage():
tmpStr = create_string_buffer(128)
length = pack8583.libtest.getFldValue(2,tmpStr,sizeof(tmpStr))
if length == 0:
length = pack8583.libtest.getFldValue(35,tmpStr,sizeof(tmpStr))
if length > 0:
cardno = re.findall(r'(\d{15,21})[D,=]',tmpStr.value.decode())[0]
else:
return None
else:
return None
logging.info('cardno = %s ' % cardno)
return cardno
def InPutPW(flag='' ):
logging.info('in input passwd fun!!')
if not isinstance(flag,str):
logging.error('input passwd error!')
return None
flagLen = len(flag)
#配置文件直接赋值
if flagLen >= 6 and flagLen <= 12 and flag.isdigit():
passwd = flag
withcardno = False
pinblock = Security.GetPinblock3Des(passwd,)
elif flag == 'withcardno':
withcardno = True
inputPasswd = input('请输入您的密码:\n')
if len(inputPasswd) >= 6 and len(inputPasswd) <= 12 and inputPasswd.isdigit():
passwd = inputPasswd
cardNo = GetCardNoFromPackage()
else:
logging.error('you input passwd error')
return None
pinblock = Security.GetPinblock3Des(passwd,1,cardNo)
else:
return None
logging.info('pinblock = [%s]' % pinblock)
return pinblock
def InPutPWWithCard(flag='' ):
logging.info('in input passwd fun!!')
if not isinstance(flag,str):
logging.error('input passwd error!')
return None
flagLen = len(flag)
cardNo = GetCardNoFromPackage()
#配置文件直接赋值
if flagLen >= 6 and flagLen <= 12 and flag.isdigit():
passwd = flag
withcardno = False
pinblock = Security.GetPinblock3Des(passwd,1,cardNo)
elif flag == '':
withcardno = True
inputPasswd = input('请输入您的密码:\n')
if len(inputPasswd) >= 6 and len(inputPasswd) <= 12 and inputPasswd.isdigit():
passwd = inputPasswd
else:
logging.error('you input passwd error')
return None
pinblock = Security.GetPinblock3Des(passwd,1,cardNo)
else:
return None
logging.info('pinblock = [%s]' % pinblock)
return pinblock
def InPutPWNoCard(flag='' ):
logging.info('in input passwd fun!!')
if not isinstance(flag,str):
logging.error('input passwd error!')
return None
flagLen = len(flag)
#配置文件直接赋值
if flagLen >= 6 and flagLen <= 12 and flag.isdigit():
passwd = flag
withcardno = False
pinblock = Security.GetPinblock3Des(passwd)
elif flag == '':
withcardno = True
inputPasswd = input('请输入您的密码:\n')
if len(inputPasswd) >= 6 and len(inputPasswd) <= 12 and inputPasswd.isdigit():
passwd = inputPasswd
else:
logging.error('you input passwd error')
return None
pinblock = Security.GetPinblock3Des(passwd)
else:
return None
logging.info('pinblock = [%s]' % pinblock)
return pinblock
def SaveWorkKey(fld62):
rightLen = [24,40,44,60,84]
logging.info('work key = [%s]' % fld62)
if not isinstance(fld62,str) or len(fld62) == 0:
logging.error('get work key error')
return None
lenFld62 = int(len(fld62) / 2)
if lenFld62 not in rightLen:
logging.error('get work key error')
return None
PINKey = fld62[0:lenFld62]
MACKey = fld62[lenFld62:]
logging.info('PINKey = [%s] ,MACKey = [%s]' % (PINKey,MACKey))
if len(PINKey)== 40 or len(PINKey)== 44:
PINKey = PINKey[0:32]
#SetConf('termInfo','tpk',PINKey)
elif len(PINKey)== 24:
PINKey = PINKey[0:16]
else:
return None
if len(MACKey)== 40:
MACKey = MACKey[0:16]
#SetConf('termInfo','tak',MACKey)
elif len(MACKey)== 44:
MACKey = MACKey[4:20]
elif len(MACKey)== 24:
MACKey = MACKey[0:16]
else:
return None
logging.info('PINKey = [%s] ,MACKey = [%s]' % (PINKey,MACKey))
myConf.SetConf('termInfo','tpk',PINKey)
myConf.SetConf('termInfo','tak',MACKey)
myConf.tpk = PINKey
myConf.tak = MACKey
def GetLogin60():
defValue = '00000001003'
lenList = [16,32]
lenTmk = len(myConf.tmk)
if lenTmk in lenList:
if lenTmk == 16:
return '00000001001'
elif lenTmk == 32:
return '00000001003'
else:
return defValue
pass
def GetMid():
return myConf.mid
def GetTid():
return myConf.termid
def GetInsNo():
return myConf.InsNo
myOperator = {
'Def':SetDefultValue,
'Fun':CallCustomFun,
'InPut':CallInputFun,
'InPutqr':CallInputQRFun,
'InPutPWWithCard':InPutPWWithCard,
'InPutPWNoCard':InPutPWNoCard,
}
OperatorOfFun = {
'GetSerial':GetSerial,
'GetDate':GetLocalDate,
'GetTime':GetLocalTime,
'GenMac':GenTermMac,
'GetLogin60':GetLogin60,
'GetMid':GetMid,
'GetTid':GetTid,
'GetInsNo':GetInsNo,
}
#logging.info(AutoSetFld(['Def','234']))
#logging.info(AutoSetFld([1,'Fun','GetSerial']))
#logging.info(AutoSetFld(['Fun','GetDate']))
#GetLocalDate()
#GetLocalTime()
| [
"[email protected]"
] | |
69c2f0ba3b489402e69e17bcb84dca3d503d331a | 51477e354907d4d17238c21f4bae1a0acdefada3 | /app/my_api/tests/v2/test_auth.py | 44dcd1e59ffb880228992293f41e28df11e7a2ea | [] | no_license | AloisBlue/Politico_Backend | df31f9c5b4a7edd92e3305c5b094b4bb3a121973 | 852b3c3294ee665264f8936abf6b78738b9a5a55 | refs/heads/develop | 2022-12-13T17:12:41.164195 | 2019-02-22T07:39:23 | 2019-02-22T07:39:23 | 169,041,095 | 0 | 1 | null | 2022-12-08T01:36:14 | 2019-02-04T07:18:34 | Python | UTF-8 | Python | false | false | 13,371 | py | # test auth.py
import unittest
import os
import json
from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT
# local imports
from ...database import database, database_init
from app import create_app
class TestVote(unittest.TestCase):
"""docstring for TestVote."""
def setUp(self):
self.app = create_app('testing')
self.Client = self.app.test_client()
self.url = os.getenv("DATABASE_URL")
connection = database()
connection.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
self.cur = connection.cursor()
# signup variables
self.user_signs = {
"email": "[email protected]",
"firstname": "Alois",
"lastname": "Blue",
"othername": "Success",
"phonenumber": "0778082345",
"passporturl": "https://miro.medium.com/max/2400/1*hiAQNjsT30LuqlZRmpdJkQ.jpeg",
"password": "wangeCHIALOis@1",
"passwordconfirm": "wangeCHIALOis@1"
}
self.invalid_url = {
"email": "[email protected]",
"firstname": "Alois",
"lastname": "Blue",
"othername": "Success",
"phonenumber": "0778082345",
"passporturl": "flfd.com",
"password": "wangeCHIALOis@1",
"passwordconfirm": "wangeCHIALOis@1"
}
self.sign_bad_email_format = {
"email": "aloismburu.com",
"firstname": "Alois",
"lastname": "Blue",
"othername": "Success",
"phonenumber": "0778082345",
"passporturl": "jdkjfld.com",
"password": "wangeCHIALOis@1",
"passwordconfirm": "wangeCHIALOis@1"
}
self.password_less = {
"email": "[email protected]",
"firstname": "Alois",
"lastname": "Blue",
"othername": "Success",
"phonenumber": "0778082345",
"passporturl": "jdkjfld.com",
"password": "pasord",
"passwordconfirm": "wangeCHIALOis@1"
}
self.password_match = {
"email": "[email protected]",
"firstname": "Alois",
"lastname": "Blue",
"othername": "Success",
"phonenumber": "0778082345",
"passporturl": "jdkjfld.com",
"password": "wangeCHIALOis@1",
"passwordconfirm": "pagaword"
}
self.empty_email = {
"email": "",
"firstname": "Alois",
"lastname": "Blue",
"othername": "Success",
"phonenumber": "0778082345",
"passporturl": "jdkjfld.com",
"password": "wangeCHIALOis@1",
"passwordconfirm": "wangeCHIALOis@1"
}
self.empty_firstname = {
"email": "[email protected]",
"firstname": "",
"lastname": "Blue",
"othername": "Success",
"phonenumber": "0778082345",
"passporturl": "jdkjfld.com",
"password": "wangeCHIALOis@1",
"passwordconfirm": "wangeCHIALOis@1"
}
self.empty_lastname = {
"email": "[email protected]",
"firstname": "Alois",
"lastname": "",
"othername": "Success",
"phonenumber": "0778082345",
"passporturl": "jdkjfld.com",
"password": "wangeCHIALOis@1",
"passwordconfirm": "wangeCHIALOis@1"
}
self.empty_othername = {
"email": "[email protected]",
"firstname": "Alois",
"lastname": "Blue",
"othername": "",
"phonenumber": "0778082345",
"passporturl": "jdkjfld.com",
"password": "wangeCHIALOis@1",
"passwordconfirm": "wangeCHIALOis@1"
}
self.empty_phonenumber = {
"email": "[email protected]",
"firstname": "Alois",
"lastname": "Blue",
"othername": "Success",
"phonenumber": "",
"passporturl": "jdkjfld.com",
"password": "wangeCHIALOis@1",
"passwordconfirm": "wangeCHIALOis@1"
}
self.empty_passport_url = {
"email": "[email protected]",
"firstname": "Alois",
"lastname": "Blue",
"othername": "Success",
"phonenumber": "0778082345",
"passporturl": "",
"password": "wangeCHIALOis@1",
"passwordconfirm": "wangeCHIALOis@1"
}
self.empty_password = {
"email": "[email protected]",
"firstname": "Alois",
"lastname": "Blue",
"othername": "Success",
"phonenumber": "0778082345",
"passporturl": "jdkjfld.com",
"password": "",
"passwordconfirm": "wangeCHIALOis@1"
}
self.empty_password_confirm = {
"email": "[email protected]",
"firstname": "Alois",
"lastname": "Blue",
"othername": "Success",
"phonenumber": "0778082345",
"passporturl": "jdkjfld.com",
"password": "wangeCHIALOis@1",
"passwordconfirm": ""
}
# login variables
self.login = {
"email": "[email protected]",
"password": "wangeCHIALOis@1"
}
self.bad_email_format = {
"email": "aloismburu.com",
"password": "wangeCHIALOis@1"
}
self.login_empty_email = {
"email": "",
"password": "wangeCHIALOis@1"
}
self.login_empty_password = {
"email": "[email protected]",
"password": ""
}
with self.app.app_context():
# init db creating tables
database_init()
# user signs up
def test_user_signsup(self):
response = self.Client.post('/api/v2/auth/signup',
data=json.dumps(self.user_signs),
content_type='application/json')
result = json.loads(response.data.decode())
self.assertEqual(result['Message'], "User with such email already exists.")
self.assertEqual(409, response.status_code)
def test_invalid_url(self):
response = self.Client.post('/api/v2/auth/signup',
data=json.dumps(self.invalid_url),
content_type='application/json')
result = json.loads(response.data.decode())
self.assertEqual(result['Message'], "The passport URL is invalid")
self.assertEqual(400, response.status_code)
def test_user_exists(self):
resp = self.Client.post('/api/v2/auth/signup',
data=json.dumps(self.user_signs),
content_type='application/json')
response = self.Client.post('/api/v2/auth/signup',
data=json.dumps(self.user_signs),
content_type='application/json')
result = json.loads(response.data.decode())
self.assertEqual(result['Message'], "User with such email already exists.")
self.assertEqual(409, response.status_code)
def test_bad_email_format(self):
response = self.Client.post('/api/v2/auth/signup',
data=json.dumps(self.sign_bad_email_format),
content_type='application/json')
result = json.loads(response.data.decode())
self.assertEqual(result['Message'], "Email format not correct")
self.assertEqual(400, response.status_code)
def test_password_lessthan_8(self):
response = self.Client.post('/api/v2/auth/signup',
data=json.dumps(self.password_less),
content_type='application/json')
result = json.loads(response.data.decode())
self.assertEqual(400, response.status_code)
def test_password_match(self):
response = self.Client.post('/api/v2/auth/signup',
data=json.dumps(self.password_match),
content_type='application/json')
result = json.loads(response.data.decode())
self.assertEqual(result['Message'], "Passwords must match")
self.assertEqual(400, response.status_code)
def test_empty_email(self):
response = self.Client.post('/api/v2/auth/signup',
data=json.dumps(self.empty_email),
content_type='application/json')
result = json.loads(response.data.decode())
self.assertEqual(result['Message'], "Email cannot be empty")
self.assertEqual(400, response.status_code)
def test_empty_firstname(self):
response = self.Client.post('/api/v2/auth/signup',
data=json.dumps(self.empty_firstname),
content_type='application/json')
result = json.loads(response.data.decode())
self.assertEqual(result['Message'], "Firstname cannot be empty")
self.assertEqual(400, response.status_code)
def test_empty_lastname(self):
response = self.Client.post('/api/v2/auth/signup',
data=json.dumps(self.empty_lastname),
content_type='application/json')
result = json.loads(response.data.decode())
self.assertEqual(result['Message'], "Lastname cannot be empty")
self.assertEqual(400, response.status_code)
def test_empty_othername(self):
response = self.Client.post('/api/v2/auth/signup',
data=json.dumps(self.empty_othername),
content_type='application/json')
result = json.loads(response.data.decode())
self.assertEqual(result['Message'], "Othername cannot be empty")
self.assertEqual(400, response.status_code)
def test_empty_phonenumber(self):
response = self.Client.post('/api/v2/auth/signup',
data=json.dumps(self.empty_phonenumber),
content_type='application/json')
result = json.loads(response.data.decode())
self.assertEqual(result['Message'], "You must provide a phone number")
self.assertEqual(400, response.status_code)
def test_empty_passport_url(self):
response = self.Client.post('/api/v2/auth/signup',
data=json.dumps(self.empty_passport_url),
content_type='application/json')
result = json.loads(response.data.decode())
self.assertEqual(result['Message'], "Passport url is needed")
self.assertEqual(400, response.status_code)
def test_empty_password(self):
response = self.Client.post('/api/v2/auth/signup',
data=json.dumps(self.empty_password),
content_type='application/json')
result = json.loads(response.data.decode())
self.assertEqual(result['Message'], "Password is empty")
self.assertEqual(400, response.status_code)
def test_empty_password_confirm(self):
response = self.Client.post('/api/v2/auth/signup',
data=json.dumps(self.empty_password_confirm),
content_type='application/json')
result = json.loads(response.data.decode())
self.assertEqual(result['Message'], "Password confirm is empty")
self.assertEqual(400, response.status_code)
# login tests
def test_login(self):
resp = self.Client.post('/api/v2/auth/signup',
data=json.dumps(self.user_signs),
content_type='application/json')
response = self.Client.post('/api/v2/auth/login',
data=json.dumps(self.login),
content_type='application/json')
result = json.loads(response.data.decode())
self.assertEqual(result['Message'], "Logged in as {}".format(self.login['email']))
self.assertEqual(200, response.status_code)
def test_login_empty_email(self):
response = self.Client.post('/api/v2/auth/login',
data=json.dumps(self.login_empty_email),
content_type='application/json')
result = json.loads(response.data.decode())
self.assertEqual(result['Message'], "You must provide an email")
self.assertEqual(400, response.status_code)
def test_login_empty_password(self):
response = self.Client.post('/api/v2/auth/login',
data=json.dumps(self.login_empty_password),
content_type='application/json')
result = json.loads(response.data.decode())
self.assertEqual(result['Message'], "You must provide a password")
self.assertEqual(400, response.status_code)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
7b2a2326f0408669dc1abe5b9889c58fc52e95ec | 451ec277c9a485721ac92c73a5a227bd9ae26b75 | /lofar/spam/flag.py | 0157e5b500cdb446d3e29088f274a50d18ac54f4 | [] | no_license | kcavagnolo/astro-projects | 2bad2c1760d85c95708ef76f8fe43bdbbdda2875 | 4cbfc4e8f95f54e85208352c5acdb7188f494536 | refs/heads/main | 2023-08-14T00:40:44.732673 | 2021-10-01T17:22:40 | 2021-10-01T17:22:40 | 412,538,271 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 27,445 | py | ###############################################################################
# import Python modules
from sys import *
from os import *
from datetime import *
from math import *
# import user modules
from files import *
from aips import *
from acalc import *
from parameter import *
from mpfit import *
from solutions import *
from plot import *
from error import *
from image import *
###############################################################################
def flag_uv_data( uv, flag_params = [], keep_solutions = True, flag_version = 0 ):
if ( flag_version > 0 ):
fl_version = flag_version
else:
fl_version = uv.table_highver( 'FG' ) + 1
for uvflg_params in flag_params:
call_aips_task( 'UVFLG', indata = uv, outfgver = fl_version, opcode = 'FLAG',
**uvflg_params )
# apply flag table
flag_uv = apply_flag_table( uv, version = flag_version,
keep_solutions = keep_solutions )
return flag_uv
###############################################################################
def make_time_images( uv, image_size = 1024, apply_solutions = True, solution_version = 0,
imagr_params = {}, apply_flags = True, flag_version = 0, print_info = False, epsilon = 1.e-8,
keep_images = False, time_step = 10. ):
# works best if source model is subtracted from uv
if apply_solutions:
if table_exists( uv, 'SN', solution_version ):
docalib = 100
gainuse = solution_version
else:
docalib = -1
gainuse = -1
else:
docalib = -1
gainuse = -1
nchav = get_channel_count( uv )
cell_size = restore_parameter( uv, 'cell_size' )
uv_size = restore_parameter( uv, 'pb_image_size' )
time_array = array( get_time_list( uv ) )
# dtime = time_step / ( 60. * 24. )
# apply previous flags
if ( apply_flags and table_exists( uv, 'FG', flag_version ) ):
flag_uv = apply_flag_table( uv, version = flag_version, keep_solutions = True )
else:
flag_uv = uv
# get reference noise
temp_image = get_aips_file( uv.disk, 'T0000', 'IIM001', -1, 'MA' )
temp_beam = get_facet_beam( temp_image )
call_aips_task( 'IMAGR', indata = flag_uv, nchav = nchav, nfield = 1, niter = 0,
cellsize = [ cell_size, cell_size ], do3dimag = 1, outdisk = temp_image.disk,
outname = temp_image.name, outseq = temp_image.seq, docalib = docalib,
imsize = [ image_size, image_size ], gainuse = gainuse, flagver = -1,
dotv = 0, allok = 0, uvsize = [ uv_size, uv_size ], **imagr_params )
fill_facet( temp_image, do_edge_circle = True )
[ temp_avg, temp_noise ] = call_aips_task( 'IMEAN', indata = temp_image,
pixavg = 0., pixstd = 0., outputs = [ 'pixavg', 'pixstd' ] )
[ temp_avg, temp_noise ] = call_aips_task( 'IMEAN', indata = temp_image,
pixavg = temp_avg, pixstd = temp_noise, pixrange = [ - 10. * temp_noise,
10. * temp_noise ], outputs = [ 'pixavg', 'pixstd' ] )
if ( temp_noise == 0. ):
temp_noise = get_image_rms( temp_image )
if ( not keep_images ):
temp_image.zap()
temp_beam.zap()
reference_noise = temp_noise
if print_info:
print '... reference noise = %s' % ( repr( reference_noise ) )
# loop over time
noise_list = []
timerang_list = []
t = 0
# time = time_array[ 0 ]
int_time = restore_parameter( uv, 'integration_time' )
n = 0
dn = int( ceil( time_step * 60. / int_time ) )
# while ( time < time_array[ -1 ] ):
while ( n < len( time_array ) ):
# sel = awhere( ( time_array > time ) & ( time_array < time + dtime ) )
# if ( len( sel ) == 0 ):
# time = time + dtime
# continue
# sel = arange( n, n + dn ).reshape( dn, 1 )
t = t + 1
time_low = time_to_dhms( time_array[ n ] - 0.5 * int_time / ( 24. * 3600. ) )
time_high = time_to_dhms( time_array[ min( n + dn, len( time_array ) ) - 1 ] +
0.5 * int_time / ( 24. * 3600. ) )
timerang_list.append( time_low + time_high )
# time = time + dtime
n = n + dn
if print_info:
print '... time range %s = %s - %s' % ( repr( t ), repr( time_low ),
repr( time_high ) )
call_aips_task( 'UVFLG', indata = flag_uv, outfgver = 0, opcode = 'FLAG',
reason = 'test', timerang = time_low + time_high )
flagver = flag_uv.table_highver( 'FG' )
temp_image = get_aips_file( uv.disk, 'T%04d' % ( t ), 'IIM001', -1, 'MA' )
temp_beam = get_facet_beam( temp_image )
call_aips_task( 'IMAGR', indata = flag_uv, nchav = nchav, nfield = 1, niter = 0,
cellsize = [ cell_size, cell_size ], uvsize = [ uv_size, uv_size ],
outdisk = temp_image.disk, outname = temp_image.name, outseq = temp_image.seq,
imsize = [ image_size, image_size ], do3dimag = 1, docalib = docalib,
gainuse = gainuse, flagver = flagver, dotv = 0, allok = 0, **imagr_params )
fill_facet( temp_image, do_edge_circle = True )
[ temp_avg, temp_noise ] = call_aips_task( 'IMEAN', indata = temp_image,
pixavg = 0., pixstd = 0., outputs = [ 'pixavg', 'pixstd' ] )
[ temp_avg, temp_noise ] = call_aips_task( 'IMEAN', indata = temp_image,
pixavg = temp_avg, pixstd = temp_noise, pixrange = [ -10. * temp_noise,
10. * temp_noise ], outputs = [ 'pixavg', 'pixstd' ] )
flag_uv.zap_table( 'FG', flagver )
if ( temp_noise == 0. ):
temp_noise = get_image_rms( temp_image )
if ( not keep_images ):
temp_image.zap()
temp_beam.zap()
if ( temp_noise == reference_noise ):
noise_list.append( 1.e9 )
else:
noise_list.append( temp_noise )
min_t = 1 + noise_list.index( min( noise_list ) )
min_timerang = timerang_list[ min_t - 1 ]
min_noise = noise_list[ min_t - 1 ]
if print_info:
print '... minimum noise time range %s (%s)= %s' % ( repr( min_t ), repr( min_timerang ),
repr( min_noise ) )
if ( apply_flags and table_exists( uv, 'FG', flag_version ) ):
flag_uv.zap()
return noise_list
###############################################################################
def make_antenna_images( uv, apply_solutions = True, solution_version = 0,
imagr_params = {}, apply_flags = True, flag_version = 0, print_info = False,
epsilon = 1.e-8, skip_antennas = [], keep_images = False, max_facets = 0,
facet_list = [] ):
# works best if source model is subtracted from uv
# extract beam size
try:
beam = [ imagr_params[ 'bmaj' ], imagr_params[ 'bmin' ], imagr_params[ 'bpa' ] ]
except:
beam = []
# apply flags and solutions
if ( apply_flags and table_exists( uv, 'FG', flag_version ) ):
flag_uv = apply_flag_table( uv, version = flag_version, keep_solutions = True )
else:
flag_uv = uv
if ( apply_solutions and table_exists( uv, 'SN', solution_version ) ):
cal_uv = apply_solution_table( flag_uv, version = solution_version )
else:
cal_uv = flag_uv
if ( ( flag_uv != uv ) and ( flag_uv != cal_uv ) ):
flag_uv.zap()
if ( len( facet_list ) > 0 ):
facet_file_name = restore_parameter( cal_uv, 'pb_facet_file_name' )
temp_facet_file_name = facet_file_name + '.TEMP'
extract_facet_definitions( facet_file_name, facet_list, temp_facet_file_name )
cpb_count = restore_parameter( cal_uv, 'cpb_facet_count' )
store_parameter( cal_uv, 'cpb_facet_count', len( facet_list ) )
store_parameter( cal_uv, 'pb_facet_file_name', temp_facet_file_name )
elif ( max_facets > 0 ):
cpb_count = restore_parameter( cal_uv, 'cpb_facet_count' )
store_parameter( cal_uv, 'cpb_facet_count', max_facets )
# get reference noise
noise_list = []
i_params = imagr_params.copy()
cpb_facets = image_cpb_facets( cal_uv, imagr_params = i_params )
reference_noise = measure_cpb_noise( cal_uv, cpb_facets, keep_image = keep_images )
remove_facets( cpb_facets )
if print_info:
print '... reference noise = %s' % ( repr( reference_noise ) )
noise_list.append( reference_noise )
if keep_images:
cpb_image = get_aips_file( uv.disk, 'CPB', 'FLATN', 0, 'MA' )
new_image = get_aips_file( uv.disk, 'CPB', 'A00', -1, 'MA' )
cpb_image.rename( name = new_image.name, klass = new_image.klass,
seq = new_image.seq )
# loop over antennas
antenna_count = len( cal_uv.antennas )
for a in range( 1, 1 + antenna_count ):
if ( a in skip_antennas ):
noise_list.append( 1.e9 )
continue
if print_info:
print '... antenna %s' % ( repr( a ) )
i_params[ 'antennas' ] = [ - a ]
cpb_facets = image_cpb_facets( cal_uv, imagr_params = i_params )
temp_noise = measure_cpb_noise( cal_uv, cpb_facets, keep_image = keep_images )
remove_facets( cpb_facets )
if ( abs( temp_noise - reference_noise ) / reference_noise > epsilon ):
noise_list.append( temp_noise )
else:
noise_list.append( 1.e9 )
if keep_images:
cpb_image = get_aips_file( uv.disk, 'CPB', 'FLATN', 0, 'MA' )
new_image = get_aips_file( uv.disk, 'CPB', 'A%02d' % ( a ), -1, 'MA' )
cpb_image.rename( name = new_image.name, klass = new_image.klass,
seq = new_image.seq )
min_a = noise_list.index( min( noise_list ) )
min_noise = noise_list[ min_a ]
if print_info:
print '... minimum noise antenna %s = %s' % ( repr( min_a ), repr( min_noise ) )
# cleanup
if ( cal_uv != uv ):
cal_uv.zap()
elif ( max_facets > 0 ) :
restore_parameter( uv, 'cpb_facet_count', cpb_count )
return noise_list
###############################################################################
def make_baseline_images( uv, antenna, apply_solutions = True, solution_version = 0,
imagr_params = {}, apply_flags = True, flag_version = 0, print_info = False,
epsilon = 1.e-8, skip_antennas = [], keep_images = False, max_facets = 0,
facet_list = [] ):
# works best if source model is subtracted from uv
# extract beam size
try:
beam = [ imagr_params[ 'bmaj' ], imagr_params[ 'bmin' ], imagr_params[ 'bpa' ] ]
except:
beam = []
# apply flags and solutions
if ( apply_flags and table_exists( uv, 'FG', flag_version ) ):
flag_uv = apply_flag_table( uv, version = flag_version, keep_solutions = True )
else:
flag_uv = uv
if ( apply_solutions and table_exists( uv, 'SN', solution_version ) ):
cal_uv = apply_solution_table( flag_uv, version = solution_version )
else:
cal_uv = flag_uv
if ( ( flag_uv != uv ) and ( flag_uv != cal_uv ) ):
flag_uv.zap()
if ( len( facet_list ) > 0 ):
facet_file_name = restore_parameter( cal_uv, 'pb_facet_file_name' )
temp_facet_file_name = facet_file_name + '.TEMP'
extract_facet_definitions( facet_file_name, facet_list, temp_facet_file_name )
cpb_count = restore_parameter( cal_uv, 'cpb_facet_count' )
store_parameter( cal_uv, 'cpb_facet_count', len( facet_list ) )
store_parameter( cal_uv, 'pb_facet_file_name', temp_facet_file_name )
elif ( max_facets > 0 ):
cpb_count = restore_parameter( cal_uv, 'cpb_facet_count' )
store_parameter( cal_uv, 'cpb_facet_count', max_facets )
# get reference noise
noise_list = []
i_params = imagr_params.copy()
cpb_facets = image_cpb_facets( cal_uv, imagr_params = i_params )
reference_noise = measure_cpb_noise( cal_uv, cpb_facets, keep_image = keep_images )
remove_facets( cpb_facets )
if print_info:
print '... reference noise = %s' % ( repr( reference_noise ) )
noise_list.append( reference_noise )
if keep_images:
cpb_image = get_aips_file( uv.disk, 'CPB', 'FLATN', 0, 'MA' )
new_image = get_aips_file( uv.disk, 'CPB', 'B0000', -1, 'MA' )
cpb_image.rename( name = new_image.name, klass = new_image.klass,
seq = new_image.seq )
# loop over antennas
antenna_count = len( cal_uv.antennas )
i_params[ 'antennas' ] = [ - antenna ]
for a in range( 1, 1 + antenna_count ):
if ( ( a in skip_antennas ) or ( a == antenna ) ):
noise_list.append( 1.e9 )
continue
if print_info:
print '... baseline %s-%s' % ( repr( antenna ), repr( a ) )
i_params[ 'baseline' ] = [ -a ]
cpb_facets = image_cpb_facets( cal_uv, imagr_params = i_params )
temp_noise = measure_cpb_noise( cal_uv, cpb_facets, keep_image = keep_images )
remove_facets( cpb_facets )
if ( abs( temp_noise - reference_noise ) / reference_noise > epsilon ):
noise_list.append( temp_noise )
else:
noise_list.append( 1.e9 )
if keep_images:
cpb_image = get_aips_file( uv.disk, 'CPB', 'FLATN', 0, 'MA' )
new_image = get_aips_file( uv.disk, 'CPB', 'B%02d%02d' % ( antenna, a ),
-1, 'MA' )
cpb_image.rename( name = new_image.name, klass = new_image.klass,
seq = new_image.seq )
min_a = noise_list.index( min( noise_list ) )
min_noise = noise_list[ min_a ]
if print_info:
print '... minimum noise baseline %s-%s = %s' % ( repr( antenna ),
repr( min_a ), repr( min_noise ) )
# cleanup
if ( cal_uv != uv ):
cal_uv.zap()
elif ( len( facet_list ) > 0 ):
store_parameter( uv, 'cpb_facet_count', cpb_count )
store_parameter( uv, 'pb_facet_file_name', facet_file_name )
elif ( max_facets > 0 ) :
store_parameter( uv, 'cpb_facet_count', cpb_count )
return noise_list
###############################################################################
def remove_antenna_images( uv ):
antenna_count = len( uv.antennas )
for a in range( antenna_count + 1 ):
try:
image = get_aips_file( uv.disk, 'CPB', 'A%02d' % ( a ), 0, 'MA' )
while image.exists():
image.zap()
image = get_aips_file( uv.disk, 'CPB', 'A%02d' % ( a ), 0, 'MA' )
except:
continue
return
###############################################################################
def remove_baseline_images( uv ):
antenna_count = len( uv.antennas )
for a in range( antenna_count + 1 ):
for b in range( antenna_count + 1 ):
try:
image = get_aips_file( uv.disk, 'CPB', 'B%02d%02d' % ( a, b ), 0, 'MA' )
while image.exists():
image.zap()
image = get_aips_file( uv.disk, 'CPB', 'B%02d%02d' % ( a, b ), 0, 'MA' )
except:
continue
return
###############################################################################
def flag_undulations( uv, image, cutoff = 6., extend = 1, print_info = False ):
# cut image to nearest power of two
image_size = get_image_size( image )
new_image_size = [ int( 2.**floor( log( image_size[ 0 ] ) / log( 2. ) ) ),
int( 2.**floor( log( image_size[ 1 ] ) / log( 2. ) ) ) ]
xinc = 1
yinc = 1
if ( new_image_size[ 0 ] != new_image_size[ 1 ] ):
if ( new_image_size[ 0 ] > new_image_size[ 1 ] ):
xinc = new_image_size[ 0 ] / new_image_size[ 1 ]
else:
yinc = new_image_size[ 1 ] / new_image_size[ 0 ]
blc = [ 1 + int( ceil( float( image_size[ 0 ] - xinc * new_image_size[ 0 ] ) / 2. ) ),
1 + int( floor( float( image_size[ 1 ] - yinc * new_image_size[ 1 ] ) / 2. ) ) ]
trc = [ int( ceil( float( image_size[ 0 ] + xinc * new_image_size[ 0 ] ) / 2. ) ),
int( floor( float( image_size[ 1 ] + yinc * new_image_size[ 1 ] ) / 2. ) ) ]
sub_image = get_aips_file( image.disk, image.name, 'SUB', -1, 'MA' )
call_aips_task( 'SUBIM', indata = image, outdata = sub_image,
blc = blc, trc = trc, xinc = xinc, yinc = yinc )
# remove tables to speed up things
cc_max = sub_image.table_highver( 'CC' )
for i in range( 1, 1 + cc_max ):
if table_exists( sub_image, 'CC', i ):
sub_image.zap_table( 'CC', i )
sn_max = sub_image.table_highver( 'SN' )
for i in range( 1, 1 + sn_max ):
if table_exists( sub_image, 'SN', i ):
sub_image.zap_table( 'SN', i )
# clip flux above 5 sigma
im_rms = get_image_rms( sub_image )
im_max = abs( get_image_extremum( sub_image )[ 0 ] )
while ( im_max > 5. * im_rms ):
clip_image( sub_image, [ -5. * im_rms, 5. * im_rms ],
[ get_aips_magic_value(), get_aips_magic_value() ] )
im_rms = get_image_rms( sub_image )
im_max = abs( get_image_extremum( sub_image )[ 0 ] )
# replace image blanks by zeros
fft_image = get_aips_file( image.disk, image.name, 'FFT', -1, 'MA' )
call_aips_task( 'REMAG', indata = sub_image, outdata = fft_image,
pixval = 0. )
sub_image.zap()
# FFT image
fft_real = get_aips_file( image.disk, image.name, 'UVREAL', -1, 'MA' )
fft_imag = get_aips_file( image.disk, image.name, 'UVIMAG', -1, 'MA' )
outseq = max( fft_real.seq, fft_imag.seq )
call_aips_task( 'FFT', indata = fft_image, opcode = 'MARE',
outdisk = fft_real.disk, outname = fft_real.name, outseq = outseq )
fft_image.zap()
fft_real = get_aips_file( image.disk, image.name, 'UVREAL', outseq, 'MA' )
fft_imag = get_aips_file( image.disk, image.name, 'UVIMAG', outseq, 'MA' )
# combine FFT results into amplitude and phase images
fft_amp = get_aips_file( image.disk, image.name, 'FFTA', -1, 'MA' )
call_aips_task( 'COMB', indata = fft_real, in2data = fft_imag,
outdata = fft_amp, opcode = 'POLI', aparm = [ 1., 1., 0., 0. ] )
# fft_phs = get_aips_file( image.disk, image.name, 'FFTP', -1, 'MA' )
# call_aips_task( 'COMB', indata = fft_real, in2data = fft_imag,
# outdata = fft_phs, opcode = 'POLA', aparm = [ 1., 0., 0., 0. ] )
fft_real.zap()
fft_imag.zap()
# get FFT image info
pix = get_image_pixels( fft_amp, flip = False ) - 1.
for ctype in fft_amp.header.ctype:
if ( ctype.find( 'UU' ) != - 1 ):
u_index = fft_amp.header.ctype.index( ctype )
if ( ctype.find( 'VV' ) != - 1 ):
v_index = fft_amp.header.ctype.index( ctype )
uv_ref = [ int( round( fft_amp.header.crpix[ u_index ] ) ) - 1,
int( round( fft_amp.header.crpix[ v_index ] ) ) - 1 ]
uv_size = [ fft_amp.header.cdelt[ u_index ], fft_amp.header.cdelt[ v_index ] ]
factor = get_frequency( uv ) / get_frequency( image )
du = factor * uv_size[ 0 ]
dv = factor * uv_size[ 1 ]
fft_amp.zap()
# apply UV track mask
mask = azeros( pix )
sel = []
group_count = 0
for group in wizardry( uv ):
group_count = group_count + 1
uvw = group.uvw
du = int( round( uvw[ 0 ] / ( uv_size[ 0 ] * factor ) ) )
dv = int( round( uvw[ 1 ] / ( uv_size[ 1 ] * factor ) ) )
u1 = uv_ref[ 0 ] + du - extend
u2 = uv_ref[ 0 ] + du + extend + 1
u3 = uv_ref[ 0 ] - du - extend
u4 = uv_ref[ 0 ] - du + extend + 1
v1 = uv_ref[ 1 ] + dv - extend
v2 = uv_ref[ 1 ] + dv + extend + 1
v3 = uv_ref[ 1 ] - dv - extend
v4 = uv_ref[ 1 ] - dv + extend + 1
mask[ u1 : u2, v1 : v2 ] = 1.
mask[ u3 : u4, v3 : v4 ] = 1.
# mask[ uv_ref[ 0 ] + du, uv_ref[ 1 ] + dv ] = 2.
# mask[ uv_ref[ 0 ] - du, uv_ref[ 1 ] - dv ] = 2.
# pix[ u1 : u2, v1 : v2 ] *= - asign( pix[ u1 : u2, v1 : v2 ] )
# pix[ u3 : u4, v3 : v4 ] *= - asign( pix[ u3 : u4, v3 : v4 ] )
pix = pix * mask
# identify amplitude outliers
sel = array( awhere( pix > 0. ), dtype = float32 )
data_list = []
for s in sel:
u = factor * ( s[ 0 ] - uv_ref[ 0 ] ) * uv_size[ 0 ]
v = factor * ( s[ 1 ] - uv_ref[ 1 ] ) * uv_size[ 1 ]
data_list.append( [ s[ 0 ], s[ 1 ], u, v, u**2 + v**2,
pix[ s[ 0 ], s[ 1 ] ] ] )
data_list.sort( cmp = lambda a, b: cmp( a[ 4 ], b[ 4 ] ) )
bins = int( floor( sqrt( float( len( data_list ) ) ) ) )
for i in range( bins ):
bin = data_list[ i * bins : min( ( i + 1 ) * bins, len( data_list ) ) ]
data = array( bin )
rms = sqrt( mean( data[ : , 5 ]**2 ) )
sel = awhere( data[ : , 5 ] > cutoff * rms )
while ( len( sel ) > 0 ):
bin2 = [ [ y for y in x ] for x in bin ]
for j in range( len( sel ) ):
k = sel[ j, 0 ]
d = data[ k ]
u1 = int( d[ 0 ] ) - extend
u2 = int( d[ 0 ] ) + extend + 1
u3 = 2 * uv_ref[ 0 ] - int( d[ 0 ] ) - extend
u4 = 2 * uv_ref[ 0 ] - int( d[ 0 ] ) + extend + 1
v1 = int( d[ 1 ] ) - extend
v2 = int( d[ 1 ] ) + extend + 1
v3 = 2 * uv_ref[ 1 ] - int( d[ 1 ] ) - extend
v4 = 2 * uv_ref[ 1 ] - int( d[ 1 ] ) + extend + 1
pix[ u1 : u2, v1 : v2 ] *= -asign( pix[ u1 : u2, v1 : v2 ] )
pix[ u3 : u4, v3 : v4 ] *= -asign( pix[ u3 : u4, v3 : v4 ] )
bin.remove( bin2[ k ] )
data = array( bin )
rms = sqrt( mean( data[ : , 5 ]**2 ) )
sel = awhere( data[ : , 5 ] > cutoff * rms )
# convert pixels to UV ranges and flag
# TODO: incorporate xinc & yinc
flag_uv = get_aips_file( uv.disk, uv.name, 'UFLAG', -1, 'UV' )
sel = awhere( pix[ 0 : uv_ref[ 0 ] + 1 ] < 0. )
if ( len( sel ) == 0 ):
call_aips_task( 'MOVE', indata = uv, outdata = flag_uv,
userid = get_aips_userid() )
return flag_uv
size = len( pix )
adu = abs( factor * 0.51 * uv_size[ 0 ] )
adv = abs( factor * 0.51 * uv_size[ 1 ] )
for s in sel:
au = abs( factor * float( s[ 0 ] - uv_ref[ 0 ] ) * uv_size[ 0 ] )
av = abs( factor * float( s[ 1 ] - uv_ref[ 1 ] ) * uv_size[ 1 ] )
if ( not flag_uv.exists() ):
call_aips_task( 'UVWAX', indata = uv, outdata = flag_uv,
aparm = [ au-adu, au+adu, av-adv, av+adv, 0, 0 ] )
else:
dummy_uv = get_aips_file( uv.disk, uv.name, 'DUMMY', -1, 'UV' )
while table_exists( flag_uv, 'SN', 0 ):
flag_uv.zap_table( 'SN', 0 )
while table_exists( flag_uv, 'NI', 0 ):
flag_uv.zap_table( 'NI', 0 )
while table_exists( flag_uv, 'OB', 0 ):
flag_uv.zap_table( 'OB', 0 )
call_aips_task( 'UVWAX', indata = flag_uv, outdata = dummy_uv,
aparm = [ au-adu, au+adu, av-adv, av+adv, 0, 0 ] )
flag_uv.zap()
dummy_uv.rename( name = flag_uv.name, klass = flag_uv.klass,
seq = flag_uv.seq )
if flag_uv.exists():
# copy tables back
if ( table_exists( uv, 'SN', 0 ) and ( not table_exists( flag_uv, 'SN', 0 ) ) ):
high = uv.table_highver( 'SN' )
for i in range( 1, 1 + high ):
if table_exists( uv, 'SN', i ):
call_aips_task( 'TACOP', indata = uv, outdata = flag_uv, inext = 'SN',
invers = i, outvers = i, ncount = 1 )
if ( table_exists( uv, 'NI', 0 ) and ( not table_exists( flag_uv, 'NI', 0 ) ) ):
high = uv.table_highver( 'NI' )
for i in range( 1, 1 + high ):
if table_exists( uv, 'NI', i ):
call_aips_task( 'TACOP', indata = uv, outdata = flag_uv, inext = 'NI',
invers = i, outvers = i, ncount = 1 )
if ( table_exists( uv, 'OB', 0 ) and ( not table_exists( flag_uv, 'OB', 0 ) ) ):
high = uv.table_highver( 'OB' )
for i in range( 1, 1 + high ):
if table_exists( uv, 'OB', i ):
call_aips_task( 'TACOP', indata = uv, outdata = flag_uv, inext = 'OB',
invers = i, outvers = i, ncount = 1 )
else:
call_aips_task( 'MOVE', indata = uv, outdata = flag_uv,
userid = get_aips_userid() )
if print_info:
flagged = 2 * len( sel )
pixels = len( data_list )
fraction = float( flagged ) / float( pixels )
print '... flagged %s percent of data' % ( repr( 100. * fraction ) )
return flag_uv
###############################################################################
def make_baseline_flags( uv, apply_solutions = True, solution_version = 0,
max_facets = 0, imagr_params = {}, print_info = False, # improvement = 0., # 0.0025,
measure_final_noise = True, keep_final_image = False, flag_version = 0,
fix_beam = False, antennas = [] ):
if ( ( flag_version == 0 ) or ( not table_exists( uv, 'FG', flag_version ) ) ) :
flagver = uv.table_highver( 'FG' ) + 1
else:
flagver = flag_version
baseline_list = []
# filter imagr params
rmss = make_antenna_images( uv, apply_solutions = apply_solutions,
solution_version = solution_version, imagr_params = imagr_params,
apply_flags = True, flag_version = flagver, print_info = print_info,
epsilon = 1e-8, skip_antennas = range( 1000 ), keep_images = True,
max_facets = max_facets )
ref_im = get_aips_file( uv.disk, 'CPB', 'A00', 0, 'MA' )
[ bmaj, bmin, bpa ] = get_beam_size( ref_im )
ref_im.zap()
i_params = imagr_params.copy()
if fix_beam:
i_params[ 'bmaj' ] = bmaj
i_params[ 'bmin' ] = bmin
i_params[ 'bpa' ] = bpa
rmss = make_antenna_images( uv, apply_solutions = apply_solutions,
solution_version = solution_version, imagr_params = i_params,
apply_flags = True, flag_version = flagver, print_info = print_info,
epsilon = 1e-8, skip_antennas = [], keep_images = True,
max_facets = max_facets )
rmss = array( rmss )
dead_antennas = awhere( rmss > 1.e6 ).ravel().tolist()
# sel = awhere( 1. - ( rmss / rmss[ 0 ] ) > improvement )
# if ( len( sel ) == 0 ):
# return baseline_list
# antennas = sel.ravel().tolist()
# rmss = aget( rmss, sel ).tolist()
# ar_list = []
# for i in range( len( antennas ) ):
# ar_list.append( [ antennas[ i ], rmss[ i ] ] )
# ar_list.sort( cmp = lambda a, b: cmp( a[ 1 ], b[ 1 ] ) )
# antennas = [ x[ 0 ] for x in ar_list ]
antenna_count = len( rmss ) - 1
ref_im = get_aips_file( uv.disk, 'CPB', 'A00', 0, 'MA' )
for a in range( 1, 1 + antenna_count ):
im = get_aips_file( uv.disk, 'CPB', 'A%02d' % ( a ), 0, 'MA' )
rms = 0.
if ( not a in dead_antennas ):
res_im = get_aips_file( uv.disk, 'CPB', 'M%02d' % ( a ), -1, 'MA' )
call_aips_task( 'COMB', indata = im, in2data = ref_im, outdata = res_im,
opcode = 'SUM', aparm = [ 1., -1. ] )
rms = get_image_rms( res_im )
res_im.zap()
im.zap()
rmss[ a ] = rms
ref_im.zap()
sel = awhere( rmss[ 1 : ] > 0. )
if ( len( sel ) == 0 ):
return baseline_list
sel = sel + 1
ants = sel.ravel().tolist()
rmss = aget( rmss, sel ).tolist()
ar_list = []
for i in range( len( ants ) ):
ar_list.append( [ ants[ i ], rmss[ i ] ] )
ar_list.sort( cmp = lambda a, b: cmp( b[ 1 ], a[ 1 ] ) )
ants = [ x[ 0 ] for x in ar_list ]
if print_info:
print '... processing antennas in the following order: '
print repr( ants )
for i in range( len( ants ) ):
if ( len( antennas ) > 0 ):
if ( not i in antennas ):
continue
if print_info:
print '... processing antenna ' + repr( ants[ i ] )
rmss = make_baseline_images( uv, ants[ i ], imagr_params = i_params,
apply_solutions = apply_solutions, solution_version = solution_version,
apply_flags = True, flag_version = flagver, print_info = print_info,
epsilon = 1e-8, skip_antennas = dead_antennas + ants[ 0 : i ],
keep_images = False, max_facets = max_facets )
rmss = array( rmss )
sel = awhere( 1. - ( rmss / rmss[ 0 ] ) > 0. )
if ( len( sel ) == 0 ):
# continue
break
baselines = sel.ravel().tolist()
if ( len( baselines ) > 0 ):
if print_info:
print '... flagging baselines % s - %s' % ( repr( ants[ i ] ),
repr( baselines ) )
call_aips_task( 'UVFLG', indata = uv, outfgver = flagver,
antennas = [ ants[ i ] ], baseline = baselines, reason = 'ripples' )
for b in baselines:
baseline_list.append( [ ants[ i ], b ] )
if measure_final_noise:
rmss = make_antenna_images( uv, apply_solutions = apply_solutions,
solution_version = solution_version, imagr_params = imagr_params,
apply_flags = True, flag_version = flagver, print_info = print_info,
epsilon = 1e-8, skip_antennas = range( 1000 ), keep_images = keep_final_image,
max_facets = max_facets )
return baseline_list
###############################################################################
| [
"[email protected]"
] | |
9d11e71efb57cb7492f8693d7711bcf512766ba5 | 7370306657f8e66018ae88b1c3db323bfd466710 | /app/migrations/0007_auto_20141125_0153.py | 905494f8e7b462ddbb1f84006d4542a3c6f6e3b2 | [] | no_license | bradenneufeld/Nectere | a3c824902a6f3429571c2baa034335c082475c0c | 870eac23452a9e1195028407f7f3669dde59d161 | refs/heads/master | 2021-01-23T15:42:39.085553 | 2015-09-04T22:16:40 | 2015-09-04T22:16:40 | 41,940,045 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 639 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('app', '0006_auto_20141125_0032'),
]
operations = [
migrations.AddField(
model_name='mfilteroptions',
name='match_id',
field=models.IntegerField(default=0),
preserve_default=True,
),
migrations.AddField(
model_name='mfilteroptions',
name='users',
field=models.ManyToManyField(to='app.User'),
preserve_default=True,
),
]
| [
"[email protected]"
] | |
ad2ea15cef4301d7f8b5227179b46d14b2ca82ce | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/446/usersdata/343/104747/submittedfiles/diagonaldominante.py | b75485db6ccbdd3657c717e4231a0d344b9c22fa | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 306 | py | # -*- coding: utf-8 -*-
n = int(input('número de linhas e colunas da matriz: '))
matriz = []
for i in range(n) :
elementos = []
for j in range(n) :
elementos.append(int(input('elementos da matriz: ')))
matriz.append(elementos)
linhas = 0
linhas = linhas + matriz[[0][i+1]
print(linhas) | [
"[email protected]"
] | |
6cd04255453986f590c57c17644345ce8df51c16 | 263341a413ab36b75a2bcd20003037d7bf71122d | /ready_data.py | a8fd025119eaedaa588153554050e68928ab5a7a | [] | no_license | astrofizkid/SciPyNOAA | 84f533c21e240cfa38b823ee6fd85560133fcbfc | 0aa6ae754cd59d8d52b021699fbb660bff60dc55 | refs/heads/master | 2021-05-19T08:53:15.959513 | 2020-04-01T17:33:09 | 2020-04-01T17:33:09 | 251,613,839 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,677 | py | """
This code reads in GHCN CSV data for a specified number of years and calculate
various aggregations of precipitation and temperature data.
"""
import ftplib
import numpy as np
import re
import pandas as pd
def tidy_frame(obs_df):
"""
Return a tidy dataframe of GHCN data.
Args:
obs_df: GHCN dataframe to tidy
Returns (pandas._DataFrame): Tidy DataFrame
"""
obs_df.columns = ['id', 'date', 'obs_element', 'value', 'm_flag', 'q-flag', 's-flag', 'obs-time']
pivot_table = obs_df.pivot_table(index=['id', 'date'], columns='obs_element', values='value')
return pivot_table.reset_index()
def read_years(year_begin, year_end):
"""
Read in CSV GHCN data from its FTP endpoint.
Args:
year_begin:
year_end:
Returns:
"""
ftp_session = ftplib.FTP('ftp.ncdc.noaa.gov')
ftp_session.login()
ftp_session.cwd('/pub/data/ghcn/daily/by_year/')
year_range = np.arange(year_begin, year_end + 1)
file_listing = ftp_session.nlst('*.csv.gz')
valid_dfs = []
for csv_file in file_listing:
csv_year = int(re.match('\d\d\d\d', csv_file).group(0))
if csv_year in year_range:
valid_dfs.append(
pd.read_csv(
'ftp://ftp.ncdc.noaa.gov/pub/data/ghcn/daily/by_year/{}'.format(csv_file),
header=None,
)
)
ftp_session.close()
valid_df = pd.concat(valid_dfs, axis=0)
return tidy_frame(valid_df)
def main():
"""
Main Function
Returns: None
"""
ghcn_df = read_years(1880, 1881)
print(ghcn_df.head())
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
1c751310823ef36d67d4d6004b8ab34d800c2825 | ed7d812310cbe6c8a7a5863f422f4aaa4f0de94c | /models.py | 363b6b17eb11c161555f1f73d3f2aab6e60837cf | [] | no_license | NoonWood/MyFlask | 3c0d3876e07179a56ce2203c91af1d36ebbd6f55 | 588192b37428d0b089bfb88482e366c1b8698759 | refs/heads/master | 2021-08-23T22:22:41.506816 | 2017-12-06T21:25:19 | 2017-12-06T21:25:19 | 112,068,163 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,374 | py |
from app import db
from datetime import datetime
import re
from flask_security import UserMixin, RoleMixin
from sqlalchemy import event
from sqlalchemy.orm import *
#ROLE_USER = 0
#ROLE_ADMIN = 1
def slugify(s): #генерация заголовков
pattern = r'[^\w+]'
return re.sub(pattern, '-', s)
post_tags = db.Table('post_tags',
db.Column('post_id', #post_it - название колонки
db.Integer,
db.ForeignKey('post.id')), #post.id - экземпляр класса post, id - свойство
db.Column('tag_id',
db.Integer,
db.ForeignKey('tag.id'))
)
####
authors_join = db.Table('authors_join',
db.Column('post_id', #post_it - название колонки
db.Integer,
db.ForeignKey('post.id')), #post.id - экземпляр класса post, id - свойство
db.Column('author_id',
db.Integer,
db.ForeignKey('user.id'))
)
####
class Post(db.Model):
id = db.Column(db.Integer, primary_key = True)
title = db.Column(db.String(140), nullable=False)
slug = db.Column(db.String(140), unique=True, nullable=False)
body = db.Column(db.Text, nullable=False)
timestamp = db.Column(db.DateTime, default=datetime.utcnow)
authors = db.relationship('User', secondary=authors_join, lazy='joined',
backref=db.backref('posts',
lazy='dynamic'))
####
tags = db.relationship('Tag', secondary=post_tags,
backref= db.backref('posts',
lazy='dynamic'))#определяем тип возвразаемых данных
def __init__(self, *args, **kwargs):
super(Post, self).__init__(*args,**kwargs)
self.generate_slug()
def generate_slug(self):
if self.title:
self.slug = slugify(self.title) #генерация заголовков
def __repr__(self):
return '<Post id: {}, title: {}>'.format(self.id, self.title)
class Tag(db.Model):
id = db.Column(db.Integer, primary_key = True)
name = db.Column(db.String(100), nullable=False)
slug = db.Column(db.String(100), nullable=False)
# posts = db.relationship('Post', secondary=post_tags,
# backref=db.backref('tag',
# lazy='dynamic'))
def __init__(self,*args,**kwargs):
super(Tag, self).__init__(*args,**kwargs)
self.generate_slug()
def __repr__(self):
return '{}'.format(self.name)
def generate_slug(self):
if self.name:
self.slug = slugify(self.name)
### Flask security
roles_users = db.Table('roles_users',
db.Column('user_id', db.Integer(), db.ForeignKey('user.id')),
db.Column('role_id', db.Integer(), db.ForeignKey('role.id'), default= 2)
)
class User(db.Model, UserMixin):
id = db.Column(db.Integer, primary_key=True)
nickname = db.Column(db.String(64), index=True, unique=True, nullable=False)
email = db.Column(db.String(100), unique=True, nullable=False)
password = db.Column(db.String(255),nullable=False)
active = db.Column(db.Boolean())
#articles = db.relationship('Post', lazy=True, secondary=authors_join, backref=db.backref('author', lazy='dynamic'))
roles = db.relationship('Role', secondary=roles_users, backref=db.backref('user', lazy='dynamic'))
def __repr__(self): #используется для отладки(можно видеть с консоли)
return '<User {}>'.format(self.nickname)
class Role(db.Model, RoleMixin):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(100), unique=False, nullable=False)
description = db.Column(db.String(255))
def __repr__(self):
return 'Role {0}'.format(self.name)
| [
"[email protected]"
] | |
03a702f1e6f7a7de77c91a5fd895268781c8cfcf | 8f272d4ef56d2e778025cf31fc8aa4489a880242 | /ch5/train_neuralnet.py | 44be541bc261f8e464619718153831502e4b361c | [] | no_license | takashi7/zero_deep_learning | 0653be912a2dcd01d38ec0321085517a2de66cf7 | 8eee9194b42c9e236e6c2482a54d2cc23f351ae9 | refs/heads/master | 2021-04-15T12:48:47.609880 | 2018-04-04T23:24:29 | 2018-04-04T23:24:29 | 126,666,764 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,538 | py | import sys, os
sys.path.append(os.pardir)
import numpy as np
import matplotlib.pyplot as plt
from dataset.mnist import load_mnist
from two_layer_net import TwoLayerNet
(x_train, t_train), (x_test, t_test) = load_mnist(normalize=True, one_hot_label=True)
network = TwoLayerNet(input_size=784, hidden_size=50, output_size=10)
iters_num = 10000
train_size = x_train.shape[0]
batch_size = 100
learning_rate = 0.1
train_loss_list = []
train_acc_list = []
test_acc_list = []
iter_per_epoch = max(train_size / batch_size, 1)
for i in range(iters_num):
batch_mask = np.random.choice(train_size, batch_size)
x_batch = x_train[batch_mask]
t_batch = t_train[batch_mask]
#grad = network.numerical_gradient(x_batch, t_batch)
grad = network.gradient(x_batch, t_batch)
for key in ('W1', 'b1', 'W2', 'b2'):
network.params[key] -= learning_rate * grad[key]
loss = network.loss(x_batch, t_batch)
train_loss_list.append(loss)
if i % iter_per_epoch == 0:
train_acc = network.accuracy(x_train, t_train)
test_acc = network.accuracy(x_test, t_test)
train_acc_list.append(train_acc)
test_acc_list.append(test_acc)
print("train acc, test acc :" + str(train_acc) + ", " + str(test_acc))
markers = {'train': 'o', 'test': 's'}
x = np.arange(len(train_acc_list))
plt.plot(x, train_acc_list, label='train acc')
plt.plot(x, test_acc_list, label='test acc', linestyle='--')
plt.xlabel("epochs")
plt.ylabel("accracy")
plt.ylim(0,1.0)
plt.legend(loc='lower right')
plt.show() | [
"[email protected]"
] | |
d01f20f969e7226cc85bb79c0708bde5b96a9abc | a9a3e931e1c5cf3dca8d9c9992e6966e3070f7ec | /Commands/server.py | 5ab168a6033ccdf3167fd02ad2537b8ed475b25c | [] | no_license | Sjc1000/PyRC | 3e92f3090deca1d2f70299b63e8099d25a127ab7 | 35c82327c56d59e359d865ad8f00ee95e1174709 | refs/heads/master | 2020-06-08T03:00:48.174018 | 2015-11-19T00:00:17 | 2015-11-19T00:00:17 | 42,507,288 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 911 | py | #!/usr/bin/env python3
def run(c, server, nickname='PyRC', port=6667, password=None):
if server in c['MainWindow'].servers:
return None
c['MainWindow'].global_action('add_server', server)
c['MainWindow'].ui_plugins['ServerList'].servers[server]['connection'] = c['Connection'](server, port, nickname, password=password)
handler = c['Handler']
c['MainWindow'].ui_plugins['ServerList'].servers[server]['connection'].MainWindow = c['MainWindow']
c['MainWindow'].ui_plugins['ServerList'].servers[server]['connection'].event_handler = handler.EventHandler( c['MainWindow'], c['MainWindow'].ui_plugins['ServerList'].servers[server]['connection'])
c['MainWindow'].ui_plugins['ServerList'].servers[server]['connection'].connect()
c['MainWindow'].global_action('activate_path', server, None)
c['MainWindow'].global_action('change_nickname', server, nickname)
return None | [
"[email protected]"
] | |
7f704fa66bc3954c55c21eefc29eede10d79bbe7 | 62bd59705a3d23971ce1451e32cc6461d1ec9f4e | /Structural Analysis/Buckling_analysis.py | 0ff5b8c8c7029eff6c2376c07bbcf2a9ca7b7a51 | [] | no_license | mitta64/WeFlyCycle | ae22f64ec86f9aa853661d5b0502c14f16a87996 | 7c694a5d04cd5995d5dc0acfb2f6d7ce6e08fd6f | refs/heads/master | 2022-10-07T15:53:24.957648 | 2020-06-09T08:45:59 | 2020-06-09T08:45:59 | 259,890,865 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,074 | py | import numpy as np
import matplotlib.pyplot as plt
#Input parameters
stringer_type = 2 #value 0 means hat, 1 means I, 2 means z, 3 means t (that is with the horizontal part of t not attached to skin)
E = 70000.0 #N/mm^2
v = 0.3 #- Poisson ratio
k_ss = 4.0 #value for simply supported plate
k_ssf = 0.43 #value for 3 sides simply supported and one edge free
t_skin = 2.5 #mm
t_stiff = 2.5 #mm
h_stiff = 50 #mm height of the stiffener
w_stiff = 30 #mm (please note that this is half the total width for z stiffener and only includes the top (of hat) width for hat stiffener)
d_stiff = 5 #mm (set this 0 for everything except hat, for hat it is the small part of the hat stiffener being in touch with the skin)
a = 1000 #mm distance between two ribs
b = 120 #mm (stringer spacing)
c = 3.0 #value 4 for bolt (HiLok/Jobolts), 3.5 for spot welds, 3 for universal head rivets, 1.5 for countersunk rivets (tension head) and 1 for countersunk rivets (shear head)
s = 90 #mm (spacing between rivets)
Limit_load = 600 #N/mm
Ultimate_load = 900 #N/mm
#first failure -> Local skin buckling
sigma_crit_skin = (np.pi**2 * k_ss * E) / (12 * (1 - v**2)) * (t_skin/b)**2
#Second failure -> Local stiffener buckling
if stringer_type == 0: #for hat stringer
sigma_cr_baseflange = (np.pi**2 * k_ssf * E) / (12*(1-v**2)) * (t_stiff/d_stiff)**2
sigma_cr_web = (np.pi**2 * k_ss * E) / (12*(1-v**2)) * (t_stiff/h_stiff)**2
sigma_cr_topflange = (np.pi**2 * k_ss * E) / (12*(1-v**2)) * (t_stiff/w_stiff)**2
sigma_cr_stiff = min(sigma_cr_baseflange, sigma_cr_web, sigma_cr_topflange)
elif stringer_type == 1: #for I stringer
sigma_cr_flange = (np.pi**2 * k_ssf * E) / (12*(1-v**2)) * (t_stiff/(w_stiff/2))**2
sigma_cr_web = (np.pi**2 * k_ss * E) / (12*(1-v**2)) * (t_stiff/h_stiff)**2
sigma_cr_stiff = min(sigma_cr_flange, sigma_cr_web)
elif stringer_type == 2: #for Z-stringer
sigma_cr_flange = (np.pi**2 * k_ssf * E) / (12*(1-v**2)) * (t_stiff/(w_stiff))**2
sigma_cr_web = (np.pi**2 * k_ss * E) / (12*(1-v**2)) * (t_stiff/h_stiff)**2
sigma_cr_stiff = min(sigma_cr_flange, sigma_cr_web)
else: #for T_stringer
sigma_cr_flange = (np.pi**2 * k_ssf * E) / (12*(1-v**2)) * (t_stiff/(w_stiff/2))**2
sigma_cr_web = (np.pi**2 * k_ss * E) / (12*(1-v**2)) * (t_stiff/h_stiff)**2
sigma_cr_stiff = min(sigma_cr_flange, sigma_cr_web)
#Third failure -> Inter rivet buckling
sigma_rivet = (np.pi**2 * c * E) / (12*(1-v**2)) * (t_skin/s)**2
#Fourth failure -> Column stiffness buckling
if stringer_type == 0: #for hat stringer
A_skin = 30*t_skin*t_skin
y_skin = 0
A_str_flange_base = t_stiff * 2 * d_stiff
y_str_flange_base = 0
A_str_web = 2*t_stiff*h_stiff
y_str_web = 0.5*h_stiff
A_str_flange_top = t_stiff * w_stiff
y_str_flange_top = h_stiff
y_bar = (A_skin * y_skin + A_str_flange_base*y_str_flange_base + A_str_web*y_str_web + A_str_flange_top*y_str_flange_top) / (A_skin + A_str_flange_base + A_str_web + A_str_flange_top)
I_yy_skin = A_skin * (y_bar)**2
I_yy_str_flange_base = A_str_flange_base * (y_bar)**2
I_yy_str_web = 2*(t_stiff * h_stiff**3)/12 + A_str_web * (y_bar - y_str_web)**2
I_yy_str_flange_top = A_str_flange_top*(y_bar - y_str_flange_top)**2
I_yy = I_yy_skin + I_yy_str_flange_base + I_yy_str_web + I_yy_str_flange_top
A_stiff = A_str_flange_base + A_str_web + A_str_flange_top
elif stringer_type == 1: #for I stringer
A_skin = 30*t_skin*t_skin
y_skin = 0
A_str_flange_base = t_stiff * w_stiff
y_str_flange_base = 0
A_str_web = t_stiff*h_stiff
y_str_web = 0.5*h_stiff
A_str_flange_top = t_stiff * w_stiff
y_str_flange_top = h_stiff
y_bar = (A_skin * y_skin + A_str_flange_base*y_str_flange_base + A_str_web*y_str_web + A_str_flange_top*y_str_flange_top) / (A_skin + A_str_flange_base + A_str_web + A_str_flange_top)
I_yy_skin = A_skin * (y_bar)**2
I_yy_str_flange_base = A_str_flange_base * (y_bar)**2
I_yy_str_web = (t_stiff * h_stiff**3)/12 + A_str_web * (y_bar - y_str_web)**2
I_yy_str_flange_top = A_str_flange_top*(y_bar - y_str_flange_top)**2
I_yy = I_yy_skin + I_yy_str_flange_base + I_yy_str_web + I_yy_str_flange_top
A_stiff = A_str_flange_base + A_str_web + A_str_flange_top
elif stringer_type == 2: #for Z-stringer
A_skin = 30*t_skin*t_skin
y_skin = 0
A_str_flange_base = t_stiff * w_stiff
y_str_flange_base = 0
A_str_web = t_stiff*h_stiff
y_str_web = 0.5*h_stiff
A_str_flange_top = t_stiff * w_stiff
y_str_flange_top = h_stiff
y_bar = (A_skin * y_skin + A_str_flange_base*y_str_flange_base + A_str_web*y_str_web + A_str_flange_top*y_str_flange_top) / (A_skin + A_str_flange_base + A_str_web + A_str_flange_top)
I_yy_skin = A_skin * (y_bar)**2
I_yy_str_flange_base = A_str_flange_base * (y_bar)**2
I_yy_str_web = (t_stiff * h_stiff**3)/12 + A_str_web * (y_bar - y_str_web)**2
I_yy_str_flange_top = A_str_flange_top*(y_bar - y_str_flange_top)**2
I_yy = I_yy_skin + I_yy_str_flange_base + I_yy_str_web + I_yy_str_flange_top
A_stiff = A_str_flange_base + A_str_web + A_str_flange_top
else: #for T_stringer
A_skin = 30*t_skin*t_skin
y_skin = 0
A_str_web = t_stiff*h_stiff
y_str_web = 0.5*h_stiff
A_str_flange_top = t_stiff * w_stiff
y_str_flange_top = h_stiff
y_bar = (A_skin * y_skin + A_str_web*y_str_web + A_str_flange_top*y_str_flange_top) / (A_skin + A_str_web + A_str_flange_top)
I_yy_skin = A_skin * (y_bar)**2
I_yy_str_web = (t_stiff * h_stiff**3)/12 + A_str_web * (y_bar - y_str_web)**2
I_yy_str_flange_top = A_str_flange_top*(y_bar - y_str_flange_top)**2
I_yy = I_yy_skin + I_yy_str_web + I_yy_str_flange_top
A_stiff = A_str_web + A_str_flange_top
P_critital = np.pi**2 * E * I_yy / (a**2)
#Normalizing everything
t_star = (A_stiff/b) + t_skin
Load_crit_skin = sigma_crit_skin*t_star
Load_cr_stiff = sigma_cr_stiff*t_star
Load_rivet = sigma_rivet*t_star
Load_critical = P_critital/b
load_names = ('Skin critical', 'Stiffener critical', 'Rivet critical', 'Load critical')
y_pos = np.arange(len(load_names))
performance = [Load_crit_skin, Load_cr_stiff, Load_rivet, Load_critical]
plt.bar(y_pos, performance, align='center', alpha=0.5)
plt.xticks(y_pos, load_names)
plt.xlabel('Different scenarios')
plt.ylabel('Load [N/mm]')
plt.hlines(y=Limit_load, xmin=-0.5, xmax=3.5, linestyles='dashed')
plt.text(3.5, Limit_load, 'Limit load', ha='left', va='center')
plt.hlines(y=Ultimate_load, xmin=-0.5, xmax=3.5, colors = 'r')
plt.text(3.5, Ultimate_load, 'Ultimate load', ha='left', va='center')
plt.title('Experienced load per scenario compared to allowable loads')
plt.show()
| [
"[email protected]"
] | |
dc4b04839183fdcac9afa091b4ca1f0d6db46600 | ac6d9ce7ecdf2f5b8ea2fa9764590bb74f37a07d | /ex3.py | 081ba6ffca533bf51bc38d242dffcf1b0ceed1f2 | [] | no_license | kellyspiano/lpthw | e50fc7de636ebfa19da25fd129a5ba12b838df4e | 081aef1b81107b2dd40f5cc55fef8e7ce184ad4b | refs/heads/master | 2021-01-20T10:32:55.079803 | 2012-04-25T12:00:14 | 2012-04-25T12:00:14 | 3,074,020 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 752 | py | # Tells that I am about to count my chickens
print "I will now count my chickens:"
# Counts the hens by performing simple mathematics
print "Hens", 25.0 + 30.0 / 6.0
# Counts the Roosters by performing simple mathematics
print "Roosters", 100. - 25. * 3. % 4.
# Counts the eggs by performing simple mathematics
print "Now I will count the eggs:"
# Performs the calculations to count the eggs
print 3. + 2. + 1. - 5. + 4. % 2. - 1. / 4. + 6.
print "Is it true that 3 + 2 < 5 - 7?"
print 3. + 2. < 5. - 7.
print "What is 3 + 2?", 3. + 2.
print "What is 5 - 7?", 5. - 7.
print "Oh, that's why it's False."
print "How about some more."
print "Is it greater?", 5 > -2
print "Is it greater or equal?", 5 >= -2
print "Is it less or equal?", 5 <+ -2
| [
"[email protected]"
] | |
0a359a4eb4f15ff8ed1286876f73c095f98be8cd | f09dc121f213f2881df3572288b7ee5b39246d73 | /aliyun-python-sdk-mts/aliyunsdkmts/request/v20140618/DeleteSmarttagTemplateRequest.py | 39011a4150d00329b06acbe4ca69e10bfd270e99 | [
"Apache-2.0"
] | permissive | hetw/aliyun-openapi-python-sdk | 2f31378ad6be0896fb8090423f607e9c7d3ae774 | 7443eacee9fbbaa93c7975c6dbec92d3c364c577 | refs/heads/master | 2023-01-19T22:42:36.214770 | 2020-12-04T10:55:14 | 2020-12-04T10:55:14 | 318,689,093 | 1 | 0 | NOASSERTION | 2020-12-05T03:03:03 | 2020-12-05T03:03:03 | null | UTF-8 | Python | false | false | 2,226 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkmts.endpoint import endpoint_data
class DeleteSmarttagTemplateRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Mts', '2014-06-18', 'DeleteSmarttagTemplate','mts')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_OwnerAccount(self):
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self,OwnerAccount):
self.add_query_param('OwnerAccount',OwnerAccount)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_TemplateId(self):
return self.get_query_params().get('TemplateId')
def set_TemplateId(self,TemplateId):
self.add_query_param('TemplateId',TemplateId) | [
"[email protected]"
] | |
ace72b65df05227190bfa60dda1d6a36c1129d8e | 64d37bfa76f3e74d424c3e13797d4f19571d9bd0 | /hiretutor/hireapp/urls.py | b1256e27587d73c276c077b5d12543b9ddb67c2b | [] | no_license | BlackBoxSQL/ProjectSatisfaction | 00f4b5cc7135d7662c1db1f72aa4f66e409c369a | bc9082e2c55dd56a81414344ba019f84a34edf82 | refs/heads/master | 2020-04-26T04:36:38.280759 | 2019-03-01T13:57:10 | 2019-03-01T13:57:10 | 173,307,673 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 959 | py | from django.urls import include, path
from .views import hireapp, tutor, guardian
urlpatterns = [
path('', hireapp.index, name='home'),
path('guardian/', include(([
path('guardianhomepage/', guardian.GuardianHomepage.as_view(),
name='guardian_homepage'),
path('guardianprofile/', guardian.GuardianProfile.as_view(),
name='guardian_profile'),
], 'hireapp'), namespace='guardian')),
path('tutor/', include(([
path('tutorhomepage/', tutor.TutorHomepage.as_view(),
name='tutor_homepage'),
path('tutorprofile/', tutor.TutorProfile.as_view(),
name='tutor_profile'),
], 'hireapp'), namespace='tutor')),
]
| [
"[email protected]"
] | |
445fab7edaf0a01e847dffeda3088b1a52bc6727 | 012de5d44c54a9e3c6c4cedf5a0a405967e9d6e9 | /web_scraping-python/irishamerica.py | b1b34af415257d86aa45a4afe2ab50a6bf937047 | [] | no_license | paisap/chat-online | 6717bf3c21863bb6ad07e9733784500b5a99a282 | bb0dfc527d1dedc97f2a3c57e5f0da4823da9396 | refs/heads/master | 2022-12-12T08:14:05.775725 | 2020-09-09T20:49:54 | 2020-09-09T20:49:54 | 282,641,415 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,671 | py | #!/usr/bin/python3
from bs4 import BeautifulSoup
from urllib.request import urlopen, Request
import json
def get_images(obj_soup):
""" obtener todas las imagenes """
images = [img['src'] for img in obj_soup.find_all('img')]
return images
def get_articles(obj_soup):
""" Obtiene todos los links de los articulos """
articles = []
for link in obj_soup.find_all('a'):
if 'y=' in link['href']:
articles.append(link['href'])
return articles
def get_title(obj_soup, id_title):
""" Obitiene el titulo con el id especifico """
title = obj_soup.find('h2', id=id_title).text
return title
def get_pie_imagen(obj_soup):
""" Obtiene todos los pie de las imagenes en orden """
texto = [pie.text for pie in obj_soup('div', {'class': 'ccfic'})]
return texto
def get_intro_noticia(obj_soup):
""" Obtiene todas las noticias que se ven """
list_news = [pie.find('p').text for pie in obj_soup('div', {'class': 'blog-entry'})]
return list_news
def tags_with_links(obj_soup):
""" obtiene los nombes y links y los devuelve en un json"""
all_tags = {}
count = 0
for obj in obj_soup('div', {'class': 'post-tags'}):
tags = {}
for i in obj.find_all('a'):
tags[i.text ] = i['href']
all_tags[count] = tags
count += 1
return all_tags
def links_to_news(obj_soup):
""" obtiene todos los links a las noticias """
articles = []
for link in obj_soup('div', {'class': 'blog-entry'}):
for i in link.find_all('a'):
articles.append(i['href'])
break
return articles
req = Request('https://irishamerica.com/category/blog/', headers={'User-Agent': 'Mozilla/5.0'})
irish = urlopen(req).read().decode("utf-8")
soup = BeautifulSoup(irish, "html.parser")
start_body = irish.find('<body ')
end_body = irish.find('</body>')
images = get_images(soup)
articles = get_articles(soup)
title = get_title(soup, "post-51484")
pies_imagen = get_pie_imagen(soup)
news = get_intro_noticia(soup)
t = tags_with_links(soup)
links_news = links_to_news(soup)
jsonOBJ = {
"Noticia 1": {
"title": title,
"imagen": images[1],
"pie de imagen": pies_imagen[0],
"intro noticia": news[0],
"tags con link": t[0],
"link noticia": links_news[0]
},
"Noticia 2": {
"title": get_title(soup, "post-51423"),
"imagen": images[2],
"pie de imagen": pies_imagen[1],
"intro noticia": news[1],
"tags con link": t[1],
"link noticia": links_news[1]
},
"links todos los articulos": articles
}
print(json.dumps(jsonOBJ, indent = 4)) | [
"[email protected]"
] | |
71832f29d29a577579d7bb02354e38f6e3f460aa | b036f05eb76aa199fe133b9b2e746f36f36330b2 | /migrations/versions/2854fc568b35_initial_migration.py | f873e94b9001de4cbd1d89ee3b1db99c6b96c263 | [] | no_license | kba977/flasky | be058662cc1c115b44f90aeaf3065b7ab40d92a0 | 3b2c202136356b447150a8f613bd31f509326aa0 | refs/heads/master | 2016-09-05T16:04:39.490412 | 2015-04-20T05:36:28 | 2015-04-20T05:36:28 | 33,992,627 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 492 | py | """initial migration
Revision ID: 2854fc568b35
Revises: None
Create Date: 2015-04-15 18:51:53.074761
"""
# revision identifiers, used by Alembic.
revision = '2854fc568b35'
down_revision = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
pass
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
pass
### end Alembic commands ###
| [
"[email protected]"
] | |
568331c7849b02404cac0440eb3cd6a9cc392935 | 335199b832de20a5a116df2d41c774fa72bfafd5 | /p12/p12.py | 151cc1248e27c3f0c8ec1b908db11b5720dd05df | [] | no_license | kefhifi/bili_7_days_learn_python | 5f57622c28f14cdee132cdfc3e3034501be3732e | a4ca5a9edd50fd2cc84a822fa1ca32d81f20adda | refs/heads/master | 2022-11-15T01:37:32.802239 | 2020-07-06T16:59:49 | 2020-07-06T16:59:49 | 266,197,916 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 125 | py | # 匿名函数
def add(x,y):
return x+y
print(add(1,2))
f = lambda x,y: x+y
print(f(1,2))
# 装饰器
| [
"[email protected]"
] | |
295f4c731293a440f282a7410066edd807c3a327 | da90c63119a09219469d45b8064c804c0bc427c2 | /venv/bin/pip3 | aebd794abfbddf3e3911e73fd2141eeca7c8c9e5 | [] | no_license | tkoz0/Bonobot | d2e5d7e582636d73a050587cb051ea98c8b5b149 | a067d971fd6d03604fcfac0024669af642c29005 | refs/heads/master | 2022-04-17T16:52:56.007874 | 2020-04-14T18:14:38 | 2020-04-14T18:14:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 402 | #!/Users/chrisjerrett/Desktop/Bonobot/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3')()
)
| [
"[email protected]"
] | ||
c97cdb54b1c74a96fdb488d7479ce27892839dbb | 78231eec1d52e71b1cc374b1d2d492232f4f8ddc | /tools/retrieve_data.py | 20a2e403a9b4834c06266c4434476c080a50871c | [] | no_license | wyhfrank/crypto-project | d9ba9f4dc062869d06af1a821f6759525c1dfa80 | 88e92cf50ad9c14837cd932c456615bf143efd66 | refs/heads/main | 2023-05-06T04:11:20.125892 | 2021-05-22T01:38:36 | 2021-05-22T01:38:36 | 367,174,778 | 0 | 0 | null | 2021-05-13T21:06:59 | 2021-05-13T21:06:58 | null | UTF-8 | Python | false | false | 1,926 | py | import os
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
import pandas as pd
from cryptocompare import cryptocompare
from utils import percent_calc
def get_historical_price(crypto_name="BTC",
start=datetime.now() - relativedelta(months=6),
end=datetime.now(),
currency="JPY",
cache_path="cache"):
limit = (end - start).days * 24
fn = os.path.join(cache_path, f"{crypto_name}_{currency}_{start.date()}_{end.date()}.csv")
if os.path.exists(fn):
df = pd.read_csv(fn)
return df
if not os.path.exists(cache_path):
os.makedirs(cache_path)
values = []
while limit > 0:
amount = min(1920, limit)
tmp = cryptocompare.get_historical_price_hour(coin=crypto_name, currency=currency, limit=amount, toTs=end)
end -= timedelta(hours=amount)
limit -= amount
values.extend(tmp)
df = pd.DataFrame(values)
df.sort_values("time", inplace=True)
df.drop_duplicates(subset="time", inplace=True)
df["perc_val"] = df.apply(lambda row: percent_calc(row["close"], row["open"]), axis=1)
df["price"] = df["close"]
df.to_csv(fn, index=False)
return df
def test():
parameter_file = "market_patterns.xlsx"
# simulation_result_base = "simulations"
# result_file = os.path.join(simulation_result_base, f"result_{datetime.now().strftime('%Y%m%d%H%M%S')}.xlsx")
df_para = pd.read_excel(parameter_file)
for i, row in df_para.iterrows():
id = row["#"]
crypto_name = row["crypto_name"]
start = row["start"]
end = row["end"]
get_historical_price(crypto_name=crypto_name,
start=start,
end=end,
currency="USD")
if __name__ == '__main__':
test()
| [
"[email protected]"
] | |
99581de0640afbb2f1dcc630ac41dba1d2258a8c | 532c78bae210da33e1f81bd89c53f21633ac8c5a | /count words.py | e640485f6a21738a10b2c30f9f069d4a52f55d07 | [] | no_license | mariasanthosh0901/Santo | 756dda9773817febdb3203fdb44b922318d5dbfa | ec3a4e5ef322bcdbb32f942e5e19c14199fe1227 | refs/heads/master | 2020-05-28T07:57:51.395216 | 2019-11-08T11:09:10 | 2019-11-08T11:09:10 | 188,929,758 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 86 | py | str1=input()
c=0
for i in str1:
if i==' ':
c+=1
print(c+1)
| [
"[email protected]"
] | |
9267075b848b3f45a6b213e1b9974734428fddca | 35b4ad652269d5569ab771b3c03cd70a80e1a45b | /firacademy_openapi/signature.py | 996be60d497b134e6d4b1b54b07a4e8121d68561 | [
"MIT"
] | permissive | hongshanxueyuan/openapi_sdk | 08b04b2318196cf48460eb46f24459888e825193 | 3a4945df67f7e8dd1948a24e1a9a5bb3b334fb84 | refs/heads/master | 2023-09-04T01:14:15.739638 | 2023-08-28T08:48:12 | 2023-08-28T08:48:12 | 394,182,887 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,268 | py | import base64
import hashlib
import hmac
try:
from base64 import encodestring as base64encode
except ImportError:
from base64 import encodebytes as base64encode
def build_sign_str(uri, method, headers, params=None, data=None):
lf = '\n'
string_to_sign = []
string_to_sign.append(method)
need_signed_headers = ['Accept', 'Content-MD5', 'Content-Type', 'Date']
for _header in need_signed_headers:
string_to_sign.append(lf)
_value = headers.get(_header)
if _value:
string_to_sign.append(_value)
string_to_sign.append(lf)
string_to_sign.append(_format_header(headers))
string_to_sign.append(_build_resource(uri, params, data))
return ''.join(string_to_sign)
def _build_resource(uri, params, data):
params = params or {}
data = data or {}
body = {}
if params:
body.update(params)
if data:
body.update(data)
resource = []
resource.append(uri)
if body:
resource.append('?')
param_list = sorted(body.keys())
first = True
for key in param_list:
if not first:
resource.append('&')
first = False
if body[key]:
resource.append(key)
resource.append('=')
resource.append(body[key])
else:
resource.append(key)
if resource is None:
return ''
return ''.join(str(x) for x in resource)
def _format_header(headers):
headers = headers or {}
lf = '\n'
temp_headers = []
if len(headers) > 0:
header_list = sorted(headers.keys())
signature_headers = []
for k in header_list:
if k.startswith('X-Ca-'):
temp_headers.append(k)
temp_headers.append(':')
temp_headers.append(str(headers[k]))
temp_headers.append(lf)
signature_headers.append(k)
headers['X-Ca-Signature-Headers'] = ','.join(signature_headers)
return ''.join(temp_headers)
def sign(source, secret):
key = bytes(secret, encoding='utf-8')
h = hmac.new(key, source.encode('utf-8'), hashlib.sha256)
signature = base64encode(h.digest()).strip()
return signature
| [
"[email protected]"
] | |
006a39f5c5e48c5d9e25c2f81cf23dea32595cf4 | cea6bff1b615432d97ffceb3dbaec65738444856 | /stemmer.py | 41b22647b49f27a75cde61e3e6b9f2944b239628 | [] | no_license | Gontareva/help-learn-eng-bot-run | 53febc87190cad083a45a940015fd9ee577a7241 | 43e23fd1682e89f16752ac7aed79e88fc5740c08 | refs/heads/master | 2020-03-19T05:36:53.098331 | 2018-06-03T22:53:17 | 2018-06-03T22:53:17 | 135,948,229 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 526 | py | import nltk
class Stemmer:
def __init__(self):
self.__stemmer = nltk.stem.porter.PorterStemmer()
def stemming_list(self, words):
stem_dictionary = {}
for word in words:
stem_word = self.stemming(word)
if stem_dictionary.get(stem_word):
stem_dictionary[stem_word].append(word)
else:
stem_dictionary[stem_word] = [word]
return stem_dictionary
def stemming(self, word):
return self.__stemmer.stem(word)
| [
"[email protected]"
] | |
0020731b43290f3db8e21077a41ebc493a9a2ca3 | a8355427c05724b71256187ebf37ecda80deabc5 | /tsk6.py | da4a396ebf40c55760897208bdfad39cfd1f846c | [] | no_license | jshhh/projects | 79fbdb4c0ca27e0ca2c6490e7b9a5258f45c0eed | 5e25c2e79de5bab8d95c7035a5fa4031aa1d3dac | refs/heads/master | 2021-05-04T21:07:46.358771 | 2019-03-29T14:27:46 | 2019-03-29T14:27:46 | 119,881,827 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 724 | py | from random import randint
#config
mode = 'AUT'
def digitInput(prompt='',bounds=(0,100)):
if mode=='AUTO':
out = randint(bounds[0],bounds[1])
else:
out = int(input(prompt))
print('INPUT',out)
return out
total_amount=digitInput()
first_number=digitInput()
amount_of_doubles=0
d=0
def amount_of_digits(number):
n = 0
while number > 0:
n += 1
number = number // 10
return n
n = amount_of_digits(first_number)
for i in range(total_amount-1):
current_number=digitInput(bounds=(0,100000))
d = amount_of_digits(current_number)
if d>n:
n = d
amount_of_doubles = 0
if d==n:
amount_of_doubles += 1
print(n, amount_of_doubles) | [
"[email protected]"
] | |
98b024718a8e8a23277d18d3e4b43665152dd24b | 852144258f9d6024331a16f8d51248dbc3e3b53b | /python_array/arithmetic_operators.py | 47109051aee6d6d3d0cbd6c7812892124f8b17e8 | [] | no_license | codewithbLanksS/workshop2 | 1e18c28df245d04299eaff7b89b41c104bcc3e8e | 07dd735f8279450d08ce24a0baaca3f73d9e59c4 | refs/heads/master | 2023-03-21T15:54:42.368703 | 2021-03-14T23:03:16 | 2021-03-14T23:03:16 | 328,321,660 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 157 | py | x = 17
y = 2
print(x + y)
print(x - y)
print(x * y)
print(x / y)
print(x % y)
print(x ** y)
print(x // y)
# 19
# 15
# 34
# 8.5
# 1
# 289
# 8 floatdivision
| [
"[email protected]"
] | |
09709dcc36938f9cdc29535bbbd86dda3efd889b | 5c7b1cd64fe600401473e8bb11aa8c606a2b1799 | /lab2_2/main.py | b6847156391b23bc65a30886f2a3cad2753163a4 | [] | no_license | drA666/rts | a7c303fd1fe4731d49a0a2233da3a9ed5138a449 | 6daf761e53549a4a2a465d6bfda4d13224c23a2f | refs/heads/main | 2023-05-02T16:56:32.838573 | 2021-06-08T13:47:47 | 2021-06-08T13:47:47 | 364,647,745 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 990 | py | from random_signals import signal_generator
from FFT import fft
import matplotlib.pyplot as plotter
from DFT import discrete_fourier_transform
from time import time
n = 14
w = 2000
N = 256
signal = signal_generator(n, w, N)
start1 = time()
spectre = abs(fft(signal))
end1 = time()
start2 = time()
dftsig = abs(discrete_fourier_transform(signal))
end2 = time()
fft_time = start1 - end1
dft_time = start2 - end2
print('FFT: {}'.format(fft_time))
print('DFT: {}'.format(dft_time))
# figure, axis = plotter.subplots(2, 1)
# plotter.subplots_adjust(left=0.1, top=0.9, bottom=0.1, right=0.99, hspace=0.5)
# axis[0].plot(range(N), signal)
# axis[0].set_title("Сигнал")
# axis[0].set(xlabel='Час', ylabel='Згенерований сигнал')
# axis[1].plot(range(N), spectre)
# axis[1].set_title("Швидке перетворення Фур'є")
# axis[1].set(xlabel='p', ylabel='F(p)')
# plotter.show()
plotter.stem([a - b for a, b in zip(spectre, dftsig)])
plotter.show()
| [
"[email protected]"
] | |
cd66acb3301f32189da4bb68b39f499962460140 | 21a212056c86d41c9123b657084157f60ee4e161 | /adduser.py | b0b3f117a7d5038cc95df9a7d902ff0bd01fda8a | [] | no_license | renanqts/pinger | a106eb2f68b9a8f94eb8b6c3038fa4072952a963 | 5e0f96d72bae453fb7f2476e27dbd7755f067c42 | refs/heads/master | 2021-01-23T06:34:04.770552 | 2017-04-09T21:44:20 | 2017-04-09T21:44:20 | 86,373,804 | 4 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,144 | py | #!/usr/bin/env python
def insertUser(args):
#load modules
from database import Database
import ConfigParser
#load configurations
config = ConfigParser.RawConfigParser()
config.read('config.properties')
#database configuration
db = Database(config)
#add user
if db.setUser(args):
print "\n Added!"
else:
print "\n I couldn't add!"
if __name__ == '__main__':
ans=True
while ans:
print ("""
1.Add commun user (Only read options)
2.Add admin user
3.Exit/Quit
""")
ans=raw_input("What would you like to do? ")
if ans=="1":
args = []
name = raw_input("Name: ")
args.append(name)
id = raw_input("Telegram id: ")
args.append(id)
args.append(2)
insertUser(args)
elif ans=="2":
args = []
name = raw_input("Name: ")
args.append(name)
id = raw_input("Telegram id: ")
args.append(id)
args.append(1)
insertUser(args)
elif ans=="3":
print("\n Goodbye :D\n")
ans = None
elif ans !="":
print("\n Not valid choice try again") | [
"[email protected]"
] | |
bfdf07b9e1a7e784e104b9757de014e1fc513768 | 1d49607268ff93aa6eadc92efee19c8521c8c3ec | /tensorflow_Application/tensorflow_Word2Vector_SkipGram_WithTSNE/main.py | 7de32309b1ccaf8b3b4a80b330e7821fc14e7230 | [] | no_license | ahn-github/Tensorflow_Advanced_Tutorials | 9e444795360a0f28aba41274dcb1788388c62ac0 | 1c6eb420bf8d87877d8279dd3173d160689cc3aa | refs/heads/master | 2020-03-25T22:25:52.496775 | 2018-08-09T16:45:29 | 2018-08-09T16:45:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 865 | py | from model import Word2Vec
# optimizers_ selection = "Adam" or "RMSP" or "SGD"
# weight_selection 은 encoder, decoder 임베디중 어떤것을 사용할 것인지 선택하는 변수
# weight_sharing=True시 weight_selection="decoder"라고 설정해도 encoder의 embedding_matrix 로 강제 설정된다.
Word2Vec(TEST=True, tSNE=True, model_name="Word2Vec", weight_selection="encoder", # encoder or decoder
vocabulary_size=50000, tSNE_plot=200, similarity_number=8,
# similarity_number -> 비슷한 문자 출력 개수
# num_skip : 하나의 문장당 num_skips 개의 데이터를 생성
validation_number=30, embedding_size=128, batch_size=128, num_skips=2, window_size=1,
negative_sampling=64, optimizer_selection="SGD", learning_rate=0.1, training_epochs=1000,
display_step=1, weight_sharing=False)
| [
"[email protected]"
] | |
01c6d4ffb46db2fa4d42682580e0b5090ef96ef3 | 4424337b21b7ea91f7e24bea8366d7244fda6158 | /week-03/day-01/18.py | cd541ddc93d5a9bc517f8eb03a65b5f82f95dffe | [] | no_license | oliviagardiner/green-fox-projects-oliviaisarobot | a9b0084160d3dfe5582dd798ffd25c9c666fec74 | 2b9a998a648036a76070a535326ed4b8985f2a24 | refs/heads/master | 2022-05-05T11:52:29.971584 | 2017-02-17T13:06:08 | 2017-02-17T13:06:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 41 | py | r = [54, 23, 66, 12]
print(r[1] + r[2])
| [
"[email protected]"
] | |
9c0d368db62ebb19f27b251f87a47c3b9d1c5c66 | 45de3aa97525713e3a452c18dcabe61ac9cf0877 | /src/primaires/salle/masques/bonhomme_neige/__init__.py | a131702c6aeebce754231a0a2a6c118092257041 | [
"BSD-3-Clause"
] | permissive | stormi/tsunami | 95a6da188eadea3620c70f7028f32806ee2ec0d1 | bdc853229834b52b2ee8ed54a3161a1a3133d926 | refs/heads/master | 2020-12-26T04:27:13.578652 | 2015-11-17T21:32:38 | 2015-11-17T21:32:38 | 25,606,146 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,239 | py | # -*-coding:Utf-8 -*
# Copyright (c) 2012 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant le masque <bonhomme_neige>."""
from primaires.interpreteur.masque.masque import Masque
from primaires.interpreteur.masque.fonctions import *
from primaires.interpreteur.masque.exceptions.erreur_validation \
import ErreurValidation
from primaires.format.fonctions import supprimer_accents
class BonhommeNeige(Masque):
"""Masque <bonhomme_neige></bonhomme_neige>.
On attend un nom de modèle de bonhomme de neige.
"""
nom = "bonhomme_neige"
nom_complet = "modèle"
def init(self):
"""Initialisation des attributs"""
self.modele = None
def repartir(self, personnage, masques, commande):
"""Répartition du masque."""
lstrip(commande)
nom = liste_vers_chaine(commande)
if not nom:
raise ErreurValidation( \
"Précisez un nom de modèle.", False)
commande[:] = []
self.a_interpreter = nom
masques.append(self)
return True
def valider(self, personnage, dic_masques):
"""Validation du masque"""
Masque.valider(self, personnage, dic_masques)
nom = supprimer_accents(self.a_interpreter).lower()
modele = None
for prototype in importeur.salle.bonhommes_neige.values():
if prototype.utilisable_joueurs and nom == \
supprimer_accents(prototype.nom):
modele = prototype
break
if modele is None:
raise ErreurValidation(
"Ce modèle est introuvable.", True)
self.modele = modele
return True
| [
"[email protected]"
] | |
17395668b258f165ea59612fb1b4df14fab387fa | 5e9dee7a655ba3862cc1d40424b829f357aa6a97 | /crawling/zigbang_ajax.py | 496c718c72b7382beba3582d90b57011cd901d82 | [] | no_license | soomok-lee/python_examples | 6aae35639d2f6d42893ea6fc9f8c6c9ccfac769c | 753f67cacc0e40fc68dd8bdc3bd144f5ff58c536 | refs/heads/main | 2023-02-12T03:28:02.580666 | 2021-01-05T20:35:57 | 2021-01-05T20:35:57 | 319,796,046 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,407 | py | import requests
import json
import pprint
import geohash2
q = "충정로"
serviceType = "아파트"
url = "https://apis.zigbang.com/v2/search?q={}&serviceType={}".format(q, serviceType)
r = requests.get(url) # json 형태를 반환
result = json.loads(r.text) # dict
# pprint.pprint(result) # 정렬
if result["success"]:
lat = result["items"][0]["lat"]
lng = result["items"][0]["lng"]
# geohash # https://en.wikipedia.org/wiki/Geohash
geohash = geohash2.encode(lat, lng, precision=5)
# 위에서 구한 geohash 값을 아래의 api 로 호출하고 쿼리(전세 월세 등)를 넘겨주는 주소 입니다.
url = "https://apis.zigbang.com/v2/items?deposit_gteq=0&domain=zigbang&geohash={}&rent_gteq=0&sales_type_in=전세%7C월세&service_type_eq=원룸".format(geohash)
r_items = requests.get(url).json() # json 형태 처리
# items 값은 실제 매물 데이터의 인덱스 값입니다.
items = r_items.get("items")
# pprint.pprint(items)
# 위에서 취한 json 형태의 items 목록을 파이썬 리스트 형태로 저장합니다.
item_ids = []
for item in items:
item_ids.append(item.get("item_id"))
items = {"item_ids": item_ids[:10]} # 10개만 items_ids 라는 키의 값으로 설정
# post - data = items
results = requests.post('https://apis.zigbang.com/v2/items/list', data=items).json() # json 형태 처리
# pprint.pprint(results)
datas = results.get("items") # 최종 결과 items
for d in datas:
address = "{}".format(d.get("address1"))
if d.get("address2") is not None:
address += " {}".format(d.get("address2"))
if d.get("address3") is not None:
address += " {}".format(d.get("address3"))
building_floor = d.get("building_floor")
floor = d.get("floor")
thumbnail = d.get("images_thumbnail")
item_id = d.get("item_id")
reg_date = d.get("reg_date")
sales_type = d.get("sales_type")
service_type = d.get("service_type")
size_m2 = d.get("size_m2")
title = d.get("title")
deposit = d.get("deposit")
rent = d.get("rent")
print("*" * 100)
print("{} [{}]".format(title, item_id))
print("보증금/월세: {}/{}".format(deposit, rent))
print("건물층/매물층: {}/{}".format(building_floor, floor))
print("등록일자: {}".format(reg_date))
print("서비스형태/매물형태: {}/{}".format(service_type, sales_type))
print("사이즈: {}".format(size_m2))
| [
"[email protected]"
] | |
22b1c70e956c395dea7ff6157671ff88b17a7096 | c7c6a29a36b1c9692aff30e585679d46be3a4051 | /mainapp/migrations/0004_mainmodel_ph_value.py | 901c60f6a759ffe5e33ff01944e16e9293936ce0 | [] | no_license | risubaba/pH-monitoring-dashboard | 158ae02d00d536f2156eb293874b6e0f768e755e | 85f5eea85b238d9dcc70720c2ef4d07d53f28471 | refs/heads/master | 2022-12-17T10:22:39.071302 | 2019-11-25T06:11:18 | 2019-11-25T06:11:18 | 223,783,006 | 0 | 0 | null | 2022-12-08T06:58:28 | 2019-11-24T17:30:27 | JavaScript | UTF-8 | Python | false | false | 419 | py | # Generated by Django 2.2.6 on 2019-11-24 16:06
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mainapp', '0003_mainmodel_power_5'),
]
operations = [
migrations.AddField(
model_name='mainmodel',
name='pH_value',
field=models.DecimalField(decimal_places=2, default=7, max_digits=4),
),
]
| [
"[email protected]"
] | |
c42b44c992bfa82a3831e519bb78eca2431a3b0e | 8d5dae48231357998b4c46eb2daf540520de1344 | /Trees.py | 669ddd25baaecc7361079665d35d1dc9c59a13de | [] | no_license | rohith-honnegowda/AlgosInPython | cddcb49e61fbbf9c8060fd500ed943e8f215d4a3 | c84c63e77c1d4f41f77a9d51330e770582851245 | refs/heads/master | 2021-05-05T08:11:52.973374 | 2018-02-15T06:16:28 | 2018-02-15T06:16:28 | 118,936,811 | 0 | 0 | null | 2018-02-15T06:16:29 | 2018-01-25T16:17:33 | Python | UTF-8 | Python | false | false | 1,705 | py | # Tree datastructure and its various operations
class Node:
def __init__(self, data):
self.data = data
self.left = None
self.right = None
def populateTree(root):
temproot = root
two = Node(2)
three = Node(3)
four = Node(4)
five = Node(5)
six= Node(6)
seven = Node(7)
eight = Node(8)
nine = Node(9)
temproot.left = two
temproot.right = three
temproot.left.right = four
temproot.right.left = five
temproot.right.right = six
temproot.right.left.left = seven
temproot.right.right.left = eight
temproot.right.right.right = nine
def printTree(root):
buf = []
output = []
if not root:
print('*')
else:
buf.append(root)
count = 1
nextCount = 0
while count > 0:
node = buf.pop(0)
if node:
output.append(node.data)
count -= 1
else:
output.append('*')
if node and node.left:
buf.append(node.left)
nextCount += 1
else:
buf.append(None)
if node and node.right:
buf.append(node.right)
nextCount += 1
else:
buf.append(None)
if count == 0:
print(output)
output = []
count = nextCount
nextCount = 0
# print the remaining all empty leaf node part
for i in range(len(buf)):
output.append('*')
print(output)
def main():
root = Node(11)
populateTree(root)
printTree(root)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
c9bd0e5a4c999a5ab7fbe45f813372d1e2dcd713 | 474f00cf9f517cd36dca15b9dcbc77c77e787578 | /ccwrapper/catcoin.py | f43272821dc7cb3be3998bd998235102c532b77c | [
"Apache-2.0"
] | permissive | duzive/ccwrapper | 5a994b5c7158d27f266ee308d6b4881a6e426e9c | 70369e076a8ba92d201a51c7991bc4a0d29823fc | refs/heads/master | 2022-11-15T02:12:11.915309 | 2020-07-17T20:55:57 | 2020-07-17T20:55:57 | 280,410,467 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,039 | py | from requests import get, post
from time import sleep
from error import CCError
class CatCoinWrapper(object):
__slots__ = ('__user_id', '__token', '__api_url')
def __init__(self, user_id, token):
'''
:param user_id: ID Вконтакте используемый для CatCoin Api
:param token: Токен CatCoin Api
'''
self.__user_id = user_id
self.__token = token
self.__api_url = 'likecoin2.ru/api/'
def _send_request(self, params = None):
try:
response = post(self.__api_url, params = params, headers={})
except Exception as err:
raise CCError("CCWrapper: API server not available or connection problems", err)
if 'error' in response:
raise CCError("CCWrapper: Error when executing the request", response)
return response['response']
def get_transfer_history(self, tx):
params = {
"method": "tx",
"merchantId": self.__user_id,
"key": self.__token,
"tx": [tx]
}
return self._send_request(params = params)
def make_transfer(self, toid, amount, mark=0):
params = {
"method": "send",
"merchantId": self.__user_id,
"key": self.__token,
"toId": toid,
"amount": amount,
"markAsMerchant": mark
}
return self._send_request(params = params)
def get_users_score(self, user_ids):
params = {
"method": "score",
"merchantId": self.__user_id,
"key": self.__token,
"userIds": user_ids
}
return self._send_request(params = params)
def set_shop_name(self, new_name):
params = {
"method": "setName",
"merchantId": self.__user_id,
"key": self.__token,
"name": new_name
}
return self._send_request(params = params)
def set_callback(self, callback_url):
params = {
"method": "set",
"merchantId": self.__user_id,
"key": self.__token,
"callback": callback_url
}
return self._send_request(params = params)
def get_lost_transfer(self):
params = {
"method": "lost",
"merchantId": self.__user_id,
"key": self.__token
}
return self._send_request(params = params)
class CCPoll(object):
__slots__ = ('__api', '__LastTransactions')
def __init__(self, ApiObject):
self.__api = ApiObject
self.__LastTransactions = self.__api.get_transfer_history()[0]["id"]
def listen(self, sleep = 3):
while True:
history = self.__api.get_transfer_history()
if history[0] != self.__LastTransactions:
for payment in history:
if payment['id'] > self.__LastTransactions:
yield payment
else:
self.__LastTransactions = self.__api.get_transfer_history()[0]["id"]
sleep(sleep) | [
"[email protected]"
] | |
88da7497807a799bf149384c35a374f1a5bc4c0d | 1811d37ed6474ab7eaeafff3c82d3bb7c0466e3d | /parts/zodiac/pyramid/tests/test_scripts/test_ptweens.py | e65af20323e76ac294ca706cb05db207f8cc75d0 | [] | no_license | bernatcortina/zodiac | ed384fe96f6739d841a3a777d10bad4b33fd0e78 | aa0ecb2c386fc5b54ff60ba94e0a1bc5a7493f17 | refs/heads/master | 2021-01-18T14:02:44.978553 | 2014-02-07T17:33:27 | 2014-02-07T17:33:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 97 | py | /Users/Bernat/GitHub/zodiac/eggs/pyramid-1.4-py2.7.egg/pyramid/tests/test_scripts/test_ptweens.py | [
"[email protected]"
] | |
534c6b9603e2b3195e9f8d06e6b1ec02e3df20eb | d29738495f1efb53733ae3280c9c4c840762bc76 | /day7/t_shirt.py | 5ad9be942930869066b5d828b08e1730d6d004ee | [] | no_license | himanshuchelani/Python_course | e317fec168dd5952a0b31a594ae4c4d9ff0806af | e533aa63eee990364d151dc6f1712005696f29fc | refs/heads/master | 2020-05-14T01:54:51.833559 | 2019-08-25T13:10:48 | 2019-08-25T13:10:48 | 181,687,249 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,861 | py | """Q1. (Create a program that fulfills the following specification.)
tshirts.csv
T-Shirt Factory:
You own a clothing factory. You know how to make a T-shirt given the height and weight of a customer.
You want to standardize the production on three sizes: small, medium, and large. How would you figure out the actual size of these 3 types of shirt to better fit your customers?
Import the tshirts.csv file an
d perform Clustering on it to make sense out of the data as stated above.
"""
# import Library
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
#import dataset
df=pd.read_csv("tshirts.csv")
#Selecting Features
features=df.iloc[:,1:].values
plt.scatter(features[:,0],features[:,1])
#Finding no.of cluster
from sklearn.cluster import KMeans
wcss=[]
for i in range(1,5):
kmeans=KMeans(n_clusters=i,init='k-means++',random_state=0)
kmeans.fit(features)
wcss.append(kmeans.inertia_)
plt.plot(range(1,5),wcss)
plt.title('Elbow Method')
plt.xlabel('Number of Clusters')
plt.ylabel('WCSS')
plt.show()
kmeans = KMeans(n_clusters = 3, init = 'k-means++', random_state = 0)
pred_cluster = kmeans.fit_predict(features)
plt.scatter(features[pred_cluster==0,0],features[pred_cluster==0,1],color="orange",label="med")
plt.scatter(features[pred_cluster==1,0],features[pred_cluster==1,1],color="red",label="large")
plt.scatter(features[pred_cluster==2,0],features[pred_cluster==2,1],color="green",label="Small")
plt.scatter(kmeans.cluster_centers_[:, 0], kmeans.cluster_centers_[:, 1], c = 'yellow', label = 'Centroids')
a=kmeans.cluster_centers_
print("height and weight for small size is",a[2])
print("height and weight for Medium is Size",a[0])
print("height and weight for Large size is",a[1])
plt.title('Clusters of datapoints')
plt.xlabel('Heigh')
plt.ylabel('Weight')
plt.legend()
plt.show()
| [
"[email protected]"
] | |
babfcdf3b882999af803cf1579a6ffc69c2f9480 | 296ef02d80108e6404fabbeee31fb1b26d7bf202 | /Python/find_missing.py | 510dfda8e9f082db5f567ad38b51938654d33f73 | [] | no_license | craigread77/CodeWars | cb00d6438ae51bd510766d84ac4072268fdf2c07 | de14bc8bc0f8bc53ea25bc97cb10cbac9b3bad97 | refs/heads/master | 2023-07-21T09:45:18.559137 | 2020-02-01T03:46:19 | 2020-02-01T03:46:19 | 209,141,581 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 33 | py | def find_missing(sequence):
| [
"[email protected]"
] | |
057f17f474be14a66bb9937b475951cf147c8312 | e20aa172b8cbd1133597da4ca6ca79d2d3566be3 | /main.py | 2e0e4419978d62177c56efd073f625e7424bbc88 | [] | no_license | kirilldd2/currency_exchange_rate_predictor | 3071c47c07197be392e0a11161ae87a6e18845fd | 6c0b45f0e953299bbd3bf5cd0510a929b572498d | refs/heads/master | 2023-02-10T08:52:25.371201 | 2020-11-12T11:33:03 | 2020-11-12T11:33:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 722 | py | import predictor as pd
from datetime import date
if __name__ == '__main__':
# proxy
print('------------Proxy----------------')
real = pd.DataVault()
proxy = pd.DataVaultProxy(real)
result = proxy.get_data(('rub', 'usd'), date(2020, 1, 1), 'day', date(2020, 1, 31))
# delegation
print('-----------Delegation-------------')
visualizers = [pd.Visualizer1(), pd.Visualizer2()]
predictors = [pd.Predictor1(), pd.Predictor2()]
manager1 = pd.Manager(predictors[0], visualizers[1])
manager2 = pd.Manager(predictors[1], visualizers[0])
manager1.predict(pd.Data())
manager1.show(pd.Data(), pd.Data())
manager2.predict(pd.Data())
manager2.show(pd.Data(), pd.Data())
| [
"[email protected]"
] | |
a88ffb6174ebdfd827e73bd766adc0b636593847 | f77d25b8c21edd450257e4a6c91464116021b7af | /string-compression.py | 492609f12d769f8a3a169e35f4859bf92ba310be | [] | no_license | kumarUjjawal/python_problems | 094d7641fefe19115e85c0d2ee78bd2be656c8b2 | dc28844a706f31619e646f30261435c8a829cade | refs/heads/master | 2023-03-02T00:54:21.069340 | 2021-02-07T13:13:18 | 2021-02-07T13:13:18 | 312,626,921 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 594 | py | # Perform string compression using counts of repeated characters.
def compress_string(string):
compressed = []
counter = 0
for i in range(len(string)): # noqa
if i != 0 and string[i] != string[i - 1]:
compressed.append(string[i - 1] + str(counter))
counter = 0
counter += 1
# add last repeated character
if counter:
compressed.append(string[-1] + str(counter))
# returns original string if compressed string isn't smaller
return min(string, "".join(compressed), key=len)
string = 'aabbcc'
compress_string(string) | [
"[email protected]"
] | |
5b3f0779c66036825c0fb1976a146b8afea55a86 | 175107e6b61ab499e22c74a8c3f05c3fbf533d0d | /src/adventofcode_2020/day_5.py | cc0f54d8607fec31e1cb885c48eaf4a6bc4f86a8 | [
"MIT"
] | permissive | Erik-vdg/adventofcode-2020 | 70427659e9ebc7c957e89951f14feabb98cfdfc0 | 72a6009786eb12eeb449243336f63d704f0eb053 | refs/heads/main | 2023-02-07T05:32:35.038002 | 2021-01-03T22:03:41 | 2021-01-03T22:03:41 | 318,713,126 | 0 | 0 | MIT | 2021-01-03T22:03:42 | 2020-12-05T05:51:14 | Python | UTF-8 | Python | false | false | 1,399 | py | from dataclasses import dataclass
@dataclass
class Seat:
row: int
column: int
@classmethod
def from_boarding_pass(cls, boarding_pass: str) -> "Seat":
min_row = 0
max_row = 127
for char in boarding_pass[:-3]:
if char == "F":
max_row = int(max_row - (max_row - min_row + 1) / 2)
elif char == "B":
min_row = int(min_row + (max_row - min_row + 1) / 2)
min_col = 0
max_col = 7
for char in boarding_pass[-3:]:
if char == "L":
max_col = int(max_col - (max_col - min_col + 1) / 2)
elif char == "R":
min_col = int(min_col + (max_col - min_col + 1) / 2)
return cls(row=max_row, column=max_col)
@property
def id(self) -> int:
return self.row * 8 + self.column
if __name__ == "__main__":
with open("src/adventofcode_2020/input_data/day_5.txt", "r") as input_data:
boarding_passes = input_data.read().splitlines()
seats = [Seat.from_boarding_pass(p) for p in boarding_passes]
ids = sorted([seat.id for seat in seats])
highest_id = max(ids)
print(f"Part 1: {highest_id}")
for i in range(1, 128 * 8 - 1):
if i not in ids:
if i + 1 in ids and i - 1 in ids:
print(f"Part 2: {i}")
break
| [
"[email protected]"
] | |
4141188aff9e8bd74c836ce018b14b259eb7c4ce | ea4c4475a6c314d58e5e9272c18a52e083bf1d1d | /util/__init__.py | 2cc570983393ce016cadd08f8b01b5e06039ed2f | [
"Apache-2.0"
] | permissive | marshuang80/ml-scl | 389ae1535253986f563153a5f5a8dc95814ede11 | ca6a3cbd82a7afa4f3d1e5e3385ddea7376da653 | refs/heads/master | 2023-01-24T14:47:58.669346 | 2020-08-17T02:31:54 | 2020-08-17T02:31:54 | 278,994,835 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 76 | py | from .image_util import *
from .eval_util import *
from .optim_util import * | [
"[email protected]"
] | |
c4df6dd3299a3317a4247bd9c0efab72463ce5c0 | b005e5cbd491aa3c3516677d72c2171db3904847 | /video_spliter.py | 33ec8c59460bb0b034816cbfcbcd2fbe6283cd57 | [] | no_license | Reina-a/scripts-python | 90e94971cfd0f768dfb7b1a454065bc35e1a424d | 583db410fdac09a408261447eeb6a9d517b9641f | refs/heads/master | 2022-11-26T00:25:02.645317 | 2020-08-01T11:52:38 | 2020-08-01T11:52:38 | 283,504,090 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 602 | py | from cv2 import cv2
cap = cv2.VideoCapture('k3.mp4')
if cap.isOpened():
print('Open successfully!')
fps = cap.get(cv2.CAP_PROP_FPS)
width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
frame_count = cap.get(cv2.CAP_PROP_FRAME_COUNT)
print(frame_count)
print('fps: ', fps,'width: ',width,'height: ',height)
for i in range(int(frame_count)):
ret, frame = cap.read()
filename = 'out/' + 'frame' + "{:0>4d}".format(i) + '.jpg'
cv2.imwrite(filename, frame)
else:
print('Error occurred when opening the video.')
| [
"[email protected]"
] | |
70988cdaae8fe160d9ff4483ca11bfcc6337c3df | 3e323c72ebb497a4e9ea5fcc94ee11962e44c43d | /main/admin.py | cccdc791c0b1a9c82b74a4cb2fdf101e0eeb5ea6 | [] | no_license | skru/HealthApp | 6e50e6dd2ef1f8502414bee8f7896beeb6ea2537 | fefc37a6baa05fc2825ee6cd5683afcb010e3034 | refs/heads/master | 2022-12-12T19:00:17.161712 | 2020-05-01T12:06:35 | 2020-05-01T12:06:35 | 242,496,460 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,727 | py | from django.contrib import admin
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
from django.contrib.auth.models import User
from django.contrib.contenttypes.admin import GenericTabularInline
from .models import *
from chat.models import *
# Define an inline admin descriptor for Profile model
# which acts a bit like a singleton
class ProfileInline(admin.StackedInline):
model = Profile
fk_name = "user"
can_delete = False
verbose_name_plural = 'Profile'
# Define a new User admin
class UserAdmin(BaseUserAdmin):
inlines = (ProfileInline,)
list_filter = BaseUserAdmin.list_filter + ('profile__is_practitioner',)
# Re-register UserAdmin
admin.site.unregister(User)
admin.site.register(User, UserAdmin)
class PractitionerAdmin(admin.ModelAdmin):
list_display = ['is_practitioner',]
can_delete = False
def get_queryset(self, request):
return super(PractitionerAdmin,self).get_queryset(request).filter(is_practitioner=True)
admin.site.register(Practitioner, PractitionerAdmin)
class PatientAdmin(admin.ModelAdmin):
def get_queryset(self, request):
return super(PatientAdmin,self).get_queryset(request).filter(is_practitioner=False)
admin.site.register(Patient, PatientAdmin)
class MessageInline(admin.TabularInline):
model = Message
readonly_fields = ['author', 'content']
fieldsets = (
(None, {
'fields': ('author', 'content',)
}),
)
class ChatAdmin(admin.ModelAdmin):
inlines = [
MessageInline,
]
readonly_fields = ['chat_uuid', 'participants']
fieldsets = (
(None, {
'fields': ('chat_uuid', 'participants')
}),
)
admin.site.register(Chat, ChatAdmin)
| [
"[email protected]"
] | |
0a304e0a40a48f21f190293e60e6e9529aa92ad8 | 7d1b906d0c5bd68eceb904232bb16e705578fb5a | /news_center/admin.py | 1154a46efb99f8dc37024f96482e8760722b45a8 | [] | no_license | jesustr20/ejercicio_semana12 | a02bbcbbb4d8882674b40cabedadd9f60b031ed4 | 8e4c06965f06c33ae1392461a27475d163b32161 | refs/heads/master | 2023-04-27T11:32:13.941847 | 2019-10-30T22:48:51 | 2019-10-30T22:48:51 | 218,639,678 | 0 | 0 | null | 2023-04-21T20:39:51 | 2019-10-30T22:42:49 | Python | UTF-8 | Python | false | false | 113 | py | from django.contrib import admin
from .models import New
# Register your models here.
admin.site.register(New)
| [
"[email protected]"
] | |
af012a55a4d87f181d28bcb4732b0e1c344fcd66 | c33e8b3d524ea6faa3cc538b45921edd7786cde4 | /ANN.py | 8e97721b0fc2b88308bfc4a5c13edda65f9427ba | [] | no_license | ganashanth/Machine-learning-using-various-techniques | df7973639d97152c1ba9d40e4e691f0a0dd3495e | bd4f51f5fe16ca6c9f417fe43e1111cf1f842bf7 | refs/heads/master | 2020-04-14T17:24:18.998847 | 2019-01-09T09:52:49 | 2019-01-09T09:52:49 | 163,978,621 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,520 | py | from __future__ import print_function, division
import numpy as np
import pandas as pd
from scipy.io import arff
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.metrics import accuracy_score, confusion_matrix
from sklearn.neural_network import MLPClassifier
"""This code apply MLPClassifier(Multi Layer Perceptron)
Algorithm on the given dataset. We have used train_test_split
to split data into 70/30 training and testing set. Hyperparameter
optimization has been carried out using 10 fold cross validation on
the training dataset. Running this file will print cross_validation
accuracy, test_accuracy and confusion matrix for test data."""
#load the data and convert it in DataFrame object
data,meta = arff.loadarff("training_dataset.arff")
data = pd.DataFrame(data)
#We need to replace all negative values with '2'
data = data.replace('-1', '2')
data = pd.get_dummies(data, columns = ['URL_Length','SSLfinal_State','having_Sub_Domain',\
'URL_of_Anchor','Links_in_tags','SFH', 'web_traffic',\
'Links_pointing_to_page'])
data = data.apply(pd.to_numeric)
#Creating predictors and target
labels = data.columns
X = data[labels[:-1]]
Y = data['Result']
#splitting into train/test set (70/30)
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size = 0.3)
#Aritificial Neural Net
# We have two hyper parameter in MLPClassifier
#hidden layer size and learning rate
# we will optimise these using hyperparameter optimization
hidden_layer_values = [10, 25, 50, 100]
learning_rate_values = [0.0001, 0.001, 0.01, 0.1, 1]
cross_val_scores = []
for h in hidden_layer_values:
for l in learning_rate_values:
classifier = MLPClassifier(hidden_layer_sizes = h, learning_rate_init = l)
crossval_score = cross_val_score(classifier, X_train.values, Y_train.values, cv = 10)
cross_val_scores.append(crossval_score.mean())
max_score = max(cross_val_scores)
max_score_index = cross_val_scores.index(max_score)
print("using hidden_layer_size = 50 and learning_rate = 0.1")
classifier = MLPClassifier(hidden_layer_sizes = 50, learning_rate_init = 0.1)
classifier.fit(X_train.values, Y_train.values)
predicted = classifier.predict(X_test.values)
testing_accuracy = accuracy_score(Y_test.values, predicted)
conf_matrix = confusion_matrix(Y_test.values, predicted)
print("Cross Validation Score is %f"%max_score)
print("testing percentage accuracy is %f"%testing_accuracy)
print("confusionm matrix is")
print(conf_matrix)
| [
"[email protected]"
] | |
c8a72f5a833ab58c631ccd20d56b9b4f4518c6b1 | 18d352b8e07337c345327c32f00ba8167731609f | /src/cm_api_tests/test_replication.py | b0099793550c7b448e6d0306569910b1df6efdf4 | [] | no_license | jbfavre/python-cmapi | f52797371c54bd250891008a117477ee0439e5ea | 9de308403836de23943c3a742514e279ed93ca30 | refs/heads/master | 2020-12-24T17:44:16.691145 | 2014-06-06T15:54:42 | 2014-06-06T15:54:42 | 20,562,437 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,228 | py | # Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import random
import unittest
try:
import json
except ImportError:
import simplejson as json
from cm_api.endpoints.types import *
from cm_api.endpoints.services import ApiService
from cm_api_tests import utils
class TestReplicationTypes(unittest.TestCase):
def test_hdfs_arguments(self):
RAW = '''{
"sourceService" : {
"peerName" : "vst2",
"clusterName" : "Cluster 1 - CDH4",
"serviceName" : "HDFS-1"
},
"sourcePath" : "/data",
"destinationPath" : "/copy/data2",
"mapreduceServiceName" : "MAPREDUCE-1",
"schedulerPoolName" : "medium",
"userName" : "systest",
"dryRun" : false,
"abortOnError" : true,
"removeMissingFiles" : false,
"preserveReplicationCount" : true,
"preserveBlockSize" : true,
"preservePermissions" : false
}'''
args = utils.deserialize(RAW, ApiHdfsReplicationArguments)
self.assertEquals('vst2', args.sourceService.peerName)
self.assertEquals('Cluster 1 - CDH4', args.sourceService.clusterName)
self.assertEquals('HDFS-1', args.sourceService.serviceName)
self.assertEquals('/data', args.sourcePath)
self.assertEquals('/copy/data2', args.destinationPath)
self.assertEquals('MAPREDUCE-1', args.mapreduceServiceName)
self.assertEquals('medium', args.schedulerPoolName)
self.assertEquals('systest', args.userName)
self.assertFalse(args.dryRun)
self.assertTrue(args.abortOnError)
self.assertFalse(args.removeMissingFiles)
self.assertTrue(args.preserveBlockSize)
self.assertFalse(args.preservePermissions)
self.assertTrue(args.preserveReplicationCount)
def test_hive_arguments(self):
RAW = '''{
"sourceService" : {
"peerName" : "vst2",
"clusterName" : "Cluster 1 - CDH4",
"serviceName" : "HIVE-1"
},
"force" : true,
"replicateData" : true,
"hdfsArguments" : {
"mapreduceServiceName" : "MAPREDUCE-1",
"dryRun" : false,
"abortOnError" : false,
"removeMissingFiles" : false,
"preserveReplicationCount" : false,
"preserveBlockSize" : false,
"preservePermissions" : false
},
"tableFilters" : [
{ "database" : "db1", "tableName" : "table1" }
],
"dryRun" : false
}'''
args = utils.deserialize(RAW, ApiHiveReplicationArguments)
self.assertEquals('vst2', args.sourceService.peerName)
self.assertEquals('Cluster 1 - CDH4', args.sourceService.clusterName)
self.assertEquals('HIVE-1', args.sourceService.serviceName)
self.assertTrue(args.force)
self.assertTrue(args.replicateData)
self.assertIsInstance(args.hdfsArguments, ApiHdfsReplicationArguments)
self.assertIsInstance(args.tableFilters, list)
self.assertEquals(1, len(args.tableFilters))
self.assertIsInstance(args.tableFilters[0], ApiHiveTable)
self.assertEquals("db1", args.tableFilters[0].database)
self.assertEquals("table1", args.tableFilters[0].tableName)
def test_schedule(self):
RAW = '''{
"id" : 39,
"startTime" : "2012-12-10T23:11:31.041Z",
"interval" : 1,
"intervalUnit" : "DAY",
"paused" : false,
"nextRun" : "2013-01-15T23:11:31.041Z",
"history" : [ {
"id" : 738,
"name" : "HiveReplicationCommand",
"startTime" : "2013-01-15T18:28:24.895Z",
"endTime" : "2013-01-15T18:30:49.446Z",
"active" : false,
"success" : true,
"resultMessage" : "Hive Replication Finished Successfully.",
"resultDataUrl" : "/cmf/command/738/download",
"serviceRef" : {
"clusterName" : "Cluster 1 - CDH4",
"serviceName" : "HIVE-1"
},
"hiveResult" : {
"tables" : [ {
"database" : "default",
"tableName" : "repl_test_1"
}, {
"database" : "default",
"tableName" : "sample_07"
}, {
"database" : "default",
"tableName" : "sample_08"
} ],
"errors" : [ ],
"dataReplicationResult" : {
"progress" : 100,
"numFilesCopied" : 0,
"numBytesCopied" : 0,
"numFilesSkipped" : 3,
"numBytesSkipped" : 92158,
"numFilesDeleted" : 0,
"numFilesCopyFailed" : 0,
"numBytesCopyFailed" : 0,
"dryRun" : false
},
"dryRun" : false
}
} ],
"alertOnStart" : false,
"alertOnSuccess" : false,
"alertOnFail" : false,
"alertOnAbort" : false,
"hiveArguments" : {
"sourceService" : {
"peerName" : "vst2",
"clusterName" : "Cluster 1 - CDH4",
"serviceName" : "HIVE-1"
},
"force" : true,
"replicateData" : true,
"hdfsArguments" : {
"mapreduceServiceName" : "MAPREDUCE-1",
"dryRun" : false,
"abortOnError" : false,
"removeMissingFiles" : false,
"preserveReplicationCount" : false,
"preserveBlockSize" : false,
"preservePermissions" : false
},
"dryRun" : false
}
}'''
sched = utils.deserialize(RAW, ApiReplicationSchedule)
self.assertEqual(39, sched.id)
self.assertEqual(self._parse_time("2012-12-10T23:11:31.041Z"), sched.startTime)
self.assertEqual('DAY', sched.intervalUnit)
self.assertEqual(1, sched.interval)
self.assertFalse(sched.paused)
self.assertEqual(self._parse_time("2013-01-15T23:11:31.041Z"), sched.nextRun)
self.assertFalse(sched.alertOnStart)
self.assertIsNotNone(sched.hiveArguments)
self.assertEqual(1, len(sched.history))
self.assertIsInstance(sched.history[0], ApiReplicationCommand)
self.assertEqual('default', sched.history[0].hiveResult.tables[0].database)
self.assertEqual(92158, sched.history[0].hiveResult.dataReplicationResult.numBytesSkipped)
def test_peers(self):
RAW = '''{
"name" : "peer1",
"url" : "http://peer1",
"username" : "user1",
"password" : "pwd"
}'''
peer = ApiCmPeer.from_json_dict(json.loads(RAW), None)
self.assertEquals("peer1", peer.name)
self.assertEquals("http://peer1", peer.url)
self.assertEquals("user1", peer.username)
self.assertEquals("pwd", peer.password)
def _parse_time(self, tstr):
return datetime.datetime.strptime(tstr, "%Y-%m-%dT%H:%M:%S.%fZ")
class TestReplicationRequests(unittest.TestCase):
def __init__(self, methodName):
super(TestReplicationRequests, self).__init__(methodName)
self.resource = utils.MockResource(self)
def test_replication_crud(self):
service = ApiService(self.resource, 'hdfs1', 'HDFS')
service.__dict__['clusterRef'] = ApiClusterRef(self.resource, clusterName='cluster1')
hdfs_args = ApiHdfsReplicationArguments(self.resource)
hdfs_args.sourceService = ApiServiceRef('cluster2', 'hdfs2')
hdfs_args.sourcePath = '/src'
hdfs_args.destinationPath = '/dst'
return_sched = ApiReplicationSchedule(self.resource,
interval=2, intervalUnit='DAY')
return_sched.hdfsArguments = hdfs_args
return_sched.__dict__['id'] = 1
return_list = ApiList([ return_sched ]).to_json_dict()
self.resource.expect("POST",
"/clusters/cluster1/services/hdfs1/replications",
retdata=return_list)
sched = service.create_replication_schedule(
None, None, 'DAY', 2, True, hdfs_args, alert_on_fail=True)
self.assertEqual(return_sched.intervalUnit, sched.intervalUnit)
self.assertEqual(return_sched.interval, sched.interval)
self.assertIsInstance(sched.hdfsArguments, ApiHdfsReplicationArguments)
self.resource.expect("GET",
"/clusters/cluster1/services/hdfs1/replications",
retdata=return_list)
service.get_replication_schedules()
self.resource.expect("GET",
"/clusters/cluster1/services/hdfs1/replications/1",
retdata=return_sched.to_json_dict())
service.get_replication_schedule(1)
self.resource.expect("PUT",
"/clusters/cluster1/services/hdfs1/replications/1",
retdata=return_sched.to_json_dict())
service.update_replication_schedule(1, return_sched)
self.resource.expect("DELETE",
"/clusters/cluster1/services/hdfs1/replications/1",
retdata=return_sched.to_json_dict())
service.delete_replication_schedule(1)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
9bfcc14d955c900d812edf063bb26a835f8cc94e | a3ff056443bcbcf80a4f3e39a63abb8b546e2b69 | /Controller/AuteurController.py | 9489a1aeda6ca6316a50796444c7f5b301440662 | [
"Unlicense"
] | permissive | amillet90/Flask_crud_v1 | aef041b45c9283af9fd030d9f80b4d2b5f304443 | 4958c06027b3eab87ac66774bb200ee2f4552e29 | refs/heads/master | 2023-03-26T18:23:18.631532 | 2020-06-14T21:02:40 | 2020-06-14T21:02:40 | 261,981,554 | 0 | 0 | Unlicense | 2021-03-20T04:19:14 | 2020-05-07T07:34:08 | Python | UTF-8 | Python | false | false | 2,122 | py | import re
from flask import *
from Model import Auteur, Oeuvre
c = Blueprint('auteur', __name__, url_prefix='/auteur')
@c.route('/show')
def show():
return render_template('auteur/showAuteurs.html.jj2', auteurs=Auteur.list())
@c.route('/supprimer/<int:id>', methods=['GET'])
def supprimer(id):
auteur = Auteur.get(id)
if not auteur:
abort(404)
oeuvres = Oeuvre.find_by(id)
if oeuvres:
return render_template('auteur/ErrorDeleteAuteur.html.jj2', nombre=len(oeuvres)), 400
Auteur.delete(id)
return redirect(url_for('auteur.show'))
@c.route('/ajouter', methods=['GET', 'POST'])
def ajouter():
if request.method == 'GET':
return render_template('auteur/addAuteur.html.jj2', errors=dict())
valid, errors = valider_form()
if valid:
auteur = Auteur.insert(request.form['prenom'], request.form['nom'])
return redirect(url_for('auteur.show'))
else:
return render_template('auteur/addAuteur.html.jj2', errors=errors)
@c.route('/modifier/<int:id>', methods=['GET', 'POST'])
def modifier(id):
auteur = Auteur.get(id)
if not auteur:
abort(404)
if request.method == 'GET':
return render_template('auteur/editAuteur.html.jj2', auteur=auteur,
errors=dict())
valid, errors = valider_form()
if valid:
Auteur.update(id, request.form['prenom'], request.form['nom'])
return redirect(url_for('auteur.show'))
else:
return render_template('auteur/editAuteur.html.jj2', auteur=auteur,
errors=errors)
def valider_form():
valid = True
errors = dict()
if not re.match(r'\w{2,}', request.form['prenom']):
# flash('Prenom doit avoir au moins deux caractères')
errors['prenom'] = 'Prenom doit avoir au moins deux caractères'
valid = False
if not re.match(r'\w{2,}', request.form['nom']):
# flash('Nom doit avoir au moins deux caractères')
errors['nom'] = 'Nom doit avoir au moins deux caractères'
valid = False
return (valid, errors)
| [
"[email protected]"
] | |
b626e3121a2dd2b30ae1100cb4d69bdbaedc2a96 | 94c654a1ca30d94caa1e9e7d40e09015efa2e573 | /03-persistence/scorelib.py | 4c066e87a54bde0ed91fc7a57cd6efd25b23183a | [] | no_license | martinzilak/PV248 | e2787bfc895d55a73f5d01280034ed4c9200bbef | 518b1ada481a11a17f94d6969ccfa99ee839e0d4 | refs/heads/master | 2021-10-08T11:41:15.224871 | 2018-12-11T21:43:56 | 2018-12-11T21:43:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,135 | py | from enum import Enum
import re
Regex = {
'NUMBER': r'.*?(\d+)',
'ANYTHING_AFTER_COLON': r'.*?:(.*)',
'COMPOSER': r'(.+?)\(([^-]*)(-{1,2}|\+|\*)([^-]*)\)',
'Y': r'.*?(y)',
'COMPOSITION_YEAR': r'.*?(\d{3,})',
'VOICE': r'\s*(?:(\S+?)(-{2})(\S+?)(?:,|;)\s*){0,1}(.*)',
'EDITOR': r'((?:\.|\-|\w)+(?:\,)?(?:\s*)?(?:[^,])*)(?:\,\s*)?',
'VOICE_NUM': r'Voice\s*(\d+)\:.*'
}
class Composition:
def __init__(self, name=None, incipit=None, key=None, genre=None, year=None, voices=[], authors=[]):
self.name = name
self.incipit = incipit
self.key = key
self.genre = genre
self.year = year
self.voices = voices
self.authors = authors
def format1(self):
if (len(self.authors) > 0 and len(self.formatAuthors()) > 0):
print('{}: {}'.format(Line.COMPOSER.value, self.formatAuthors()))
if (self.name):
print('{}: {}'.format(Line.TITLE.value, self.name))
if (self.genre):
print('{}: {}'.format(Line.GENRE.value, self.genre))
if (self.key):
print('{}: {}'.format(Line.KEY.value, self.key))
if (self.year):
print('{}: {}'.format(Line.COMPOSITION_YEAR.value, self.year))
def format2(self):
if (len(self.voices) > 0):
for v in self.voices:
if v.name or v.range:
print(v.formatted())
def formatAuthors(self):
formatted = ''
for a in self.authors:
formatted += a.formatted() + "; "
return formatted[:-2]
def __hash__(self):
return hash(str(self))
def __eq__(self, other):
return self.name == other.name and self.incipit == other.incipit and self.key == other.key and \
self.genre == other.genre and self.voices == other.voices and self.authors == other.authors
class Edition:
def __init__(self, composition=Composition(), authors=[], name=None):
self.composition = composition
self.authors = authors
self.name = name
def format(self):
self.composition.format1()
if (self.name):
print('{}: {}'.format(Line.EDITION.value, self.name))
if (len(self.authors) > 0):
print('{}: {}'.format(Line.EDITOR.value, self.formatAuthors()))
self.composition.format2()
def formatAuthors(self):
formatted = ''
for a in self.authors:
formatted += a.formatted() + ", "
return formatted[:-2]
def __hash__(self):
return hash(str(self))
def __eq__(self, other):
return self.composition == other.composition and self.authors == other.authors and self.name == other.name
class Print:
def __init__(self, edition=Edition(), print_id=-1, partiture=False):
self.edition = edition
self.print_id = print_id
self.partiture = partiture
def format(self):
print('{}: {}'.format(Line.PRINT_NUMBER.value, self.print_id))
self.edition.format()
print('{}: {}'.format(Line.PARTITURE.value, 'yes' if self.partiture else 'no'))
if (self.composition().incipit):
print('{}: {}'.format(Line.INCIPIT.value, self.composition().incipit))
print()
def composition(self):
return self.edition.composition
def __hash__(self):
return hash(str(self))
def __eq__(self, other):
return self.edition == other.edition and self.print_id == other.print_id and self.partiture == other.partiture
class Voice:
def __init__(self, name=None, range=None, number=-1):
self.name = name
self.range = range
self.number = number
def formatted(self):
if (self.range and self.name):
return ('{} {}: {}, {}'.format(Line.VOICE.value, self.number, self.range, self.name))
elif (self.range and not self.name):
return ('{} {}: {}'.format(Line.VOICE.value, self.number, self.range))
elif (self.name and not self.range):
return ('{} {}: {}'.format(Line.VOICE.value, self.number, self.name))
def __hash__(self):
return hash(str(self))
def __eq__(self, other):
return self.name == other.name and self.range == other.range and self.number == other.number
class Person:
def __init__(self, name=None, born=None, died=None):
self.name = name
self.born = born
self.died = died
def formatted(self):
formatted = self.name
if self.born or self.died:
formatted += ' ({}--{})'.format(self.born if self.born else '', self.died if self.died else '')
return formatted
def __hash__(self):
return hash(str(self))
def __eq__(self, other):
return self.name == other.name and self.born == other.born and self.died == other.died
class Line(Enum):
PRINT_NUMBER = 'Print Number'
COMPOSER = 'Composer'
TITLE = 'Title'
GENRE = 'Genre'
KEY = 'Key'
COMPOSITION_YEAR = 'Composition Year'
PUBLICATION_YEAR = 'Publication Year'
EDITION = 'Edition'
EDITOR = 'Editor'
VOICE = 'Voice'
PARTITURE = 'Partiture'
INCIPIT = 'Incipit'
def parseSimple(line, regex, group=1, defval=None, parseint=False):
r = re.compile(Regex[regex])
m = r.match(line)
if m:
m = m.group(group)
return m.strip() if not parseint else int(m.strip())
return defval
def isInt(s):
try:
int(s)
return True
except ValueError:
return False
def parseComposer(line):
if line == None:
return []
authors = []
for composer in line.split(';'):
composer = composer.strip()
person = Person()
r = re.compile(Regex['COMPOSER'])
m = r.match(composer)
if m and len(m.group(1).strip()) > 0:
person.name = m.group(1).strip()
if m.group(2) and isInt(m.group(2)):
person.born = int(m.group(2))
if m.group(4) and isInt(m.group(4)):
if m.group(3) == '*':
person.born = int(m.group(4))
else:
person.died = int(m.group(4))
else:
person.name = composer
authors.append(person)
return authors
def parseEdition(name):
if name == None:
return ''
return name
def parseEditor(line):
editors = []
if line == None:
return editors
r = re.compile(Regex['EDITOR'])
m = r.findall(line)
for name in m:
if len(name.strip()) > 0:
p = Person()
p.name = name.strip()
editors.append(p)
return editors
def parseVoice(line, num):
v = Voice()
if line == None:
return v
r = re.compile(Regex['VOICE'])
m = r.match(line.strip())
if m:
v.number = num
if not m.group(1) and not m.group(2) and not m.group(3) and '--' in m.group(4) and ',' not in m.group(4)\
and ';' not in m.group(4):
v.range = m.group(4)
v.name = None
else:
v.name = m.group(4)
range = ''
if m.group(1):
range += m.group(1)
range += '--'
if m.group(3):
range += m.group(3)
v.range = range if len(range) > 2 else None
return v
def parsePartiture(line):
return True if parseSimple(line, 'Y') else False
def starts(line, linetype):
return line.lower().startswith(linetype.value.lower())
def parse(_temp, line):
if starts(line, Line.PRINT_NUMBER):
_temp['print'].print_id = parseSimple(line, 'NUMBER', parseint=True)
elif starts(line, Line.COMPOSER):
_temp['composition'].authors = parseComposer(parseSimple(line, 'ANYTHING_AFTER_COLON'))
elif starts(line, Line.TITLE):
_temp['composition'].name = parseSimple(line, 'ANYTHING_AFTER_COLON')
elif starts(line, Line.GENRE):
_temp['composition'].genre = parseSimple(line, 'ANYTHING_AFTER_COLON')
elif starts(line, Line.KEY):
_temp['composition'].key = parseSimple(line, 'ANYTHING_AFTER_COLON')
elif starts(line, Line.COMPOSITION_YEAR):
_temp['composition'].year = parseSimple(line, 'COMPOSITION_YEAR', parseint=True)
elif starts(line, Line.PUBLICATION_YEAR):
pass
elif starts(line, Line.EDITION):
_temp['edition'].name = parseSimple(line, 'ANYTHING_AFTER_COLON')
elif starts(line, Line.EDITOR):
_temp['edition'].authors = parseEditor(parseSimple(line, 'ANYTHING_AFTER_COLON'))
elif starts(line, Line.VOICE):
_temp['voices'].append(parseVoice(parseSimple(line, 'ANYTHING_AFTER_COLON'), parseSimple(line, 'VOICE_NUM', parseint=True)))
elif starts(line, Line.PARTITURE):
_temp['print'].partiture = parsePartiture(line)
elif starts(line, Line.INCIPIT):
_temp['composition'].incipit = parseSimple(line, 'ANYTHING_AFTER_COLON')
def process(block):
_print = Print()
composition = Composition()
edition = Edition()
voices = []
_temp = {'print': _print, 'composition': composition, 'edition': edition, 'voices': voices}
for line in block:
parse(_temp, line)
composition.voices = voices
edition.composition = composition
_print.edition = edition
return _print
def load(filename):
prints = []
blocks = []
reading = []
with open(filename, errors='ignore') as file:
for line in file:
if line != '\n':
reading.append(line)
else:
blocks.append(reading)
reading = []
blocks.append(reading)
for block in blocks:
prints.append(process(block))
return list(filter(lambda y: y.print_id >= 0, sorted(prints, key=lambda x: x.print_id)))
| [
"[email protected]"
] | |
fdd766ae19b5a40233294459af7e8d093e4b9f1c | c971cefa15391eb5bfc661b61791aa641089ff71 | /Term 2/14/1-open.py | 3be53fa519b0ca8ae5b84e4bbcc68bc2e9c951fb | [] | no_license | theseana/ajil | d8816be35b7ee56d9bc6899587b71c664a455f5f | 3fcda909dae50394120c7c05d530ad905b05da0c | refs/heads/master | 2023-02-23T23:28:23.529531 | 2021-01-26T10:07:11 | 2021-01-26T10:07:11 | 305,970,739 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 68 | py | file = open('names.txt', 'a')
file.write('Kourosh\n')
file.close() | [
"[email protected]"
] | |
2be103b94a4b2915c4eba259acc1ebafba32b71f | 08305da9995cb69f8aca743ac217c896c98e9528 | /dirMake.py | 99cefdb18f0445c72afbc70970334dd19ffdee68 | [] | no_license | liuweistrong/DirsDemo | 99f198ca5772ba05dce6fc6b38ec66d9fcb6651a | b4fd71ebb90470bed959fe364027dda329fddd6f | refs/heads/master | 2021-09-06T04:01:53.815847 | 2018-02-02T08:33:14 | 2018-02-02T08:33:14 | 119,954,586 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,740 | py | import os
def makeDirs(PATH):
for i in os.walk(PATH):
print(i)
dirs = i[0]
dir_str = ''.join(dirs)
os.makedirs(dir_str.replace('C','D'))
def filePathUpdate(filePath,size):
# '''C:\Users\Administrator\Desktop\1\22.mp4从原始的文件PATh到要生成的文件Path'''
pathWith2 = os.path.splitext(filePath)
pathNew = ''.join(pathWith2[0]).replace('C','D') + ' ' + size + '-实际大小' +''.join(pathWith2[1])
return pathNew
def sizeHandle(filePath):
size = os.path.getsize(filePath)
if size >= 500000000:
sizeGB = round(size/1000000000,2)
return str(sizeGB) + 'GB'
else:
sizeMB = round(size / 1000000,2)
return str(sizeMB) + 'MB'
def write(PATH):
for i in os.walk(PATH):
dirs = i[0]
dir_str = ''.join(dirs)
files = i[2]
for file in files:
#原版的所有的文件
filePath = dir_str + '\\' + ''.join(file)
size = sizeHandle(filePath)
fileFinal = filePathUpdate(filePath, size)
f = open(fileFinal,'w')
f.close()
if __name__ == '__main__':
PATH = r'C:\Users\Administrator\Desktop\1'
makeDirs(PATH)
write(PATH)
'''1.得到当前工作目录,即当前Python脚本工作的目录路径: os.getcwd()
2.返回指定目录下的所有文件和目录名:os.listdir()
3.函数用来删除一个文件:os.remove()
4.删除多个目录:os.removedirs(r“c:\python”)
5.检验给出的路径是否是一个文件:os.path.isfile()
6.检验给出的路径是否是一个目录:os.path.isdir()
7.判断是否是绝对路径:os.path.isabs()
8.检验给出的路径是否真地存:os.path.exists()
9.返回一个路径的目录名和文件名:os.path.split() eg os.path.split('/home/swaroop/byte/code/poem.txt') 结果:('/home/swaroop/byte/code', 'poem.txt')
10.分离扩展名:os.path.splitext()
11.获取路径名:os.path.dirname()
12.获取文件名:os.path.basename()
13.运行shell命令: os.system()
17.重命名:os.rename(old, new)
18.创建多级目录:os.makedirs(r“c:\python\test”)
19.创建单个目录:os.mkdir(“test”)
23.获取文件大小:os.path.getsize(filename)'''
# print(os.getcwd())
# print(os.listdir(PATH))
# print(os.path.exists(PATH))
# print(os.getenv('pip'))
# print(os.name)
# # os.makedirs(r'C:\fuck\me')
# print(os.path.getsize(r'D:\p1Talking about men you like.mp4'))
#创建文件
# f = open(r'D:\tr.mp4','w')
#
# print(type(os.listdir(PATH)))
| [
"[email protected]"
] | |
289ec8ad7bb58019ac6246b30b4e26f72dfd62aa | 2ab208f3a517a494cdab576cd2dc67f3ccfe9405 | /log.py | 6c2d71abcd07ecb6582744ec59950520bd8f6002 | [
"MIT"
] | permissive | Irekean/DiRer_DiceRoller | dc56ad92556aae6a362d105d0adc7901375f7378 | 82a08c08b79580cd92588abc5a028000ccc4e483 | refs/heads/master | 2023-08-07T18:42:10.874786 | 2022-10-30T09:26:01 | 2022-10-30T09:26:01 | 232,535,165 | 0 | 0 | NOASSERTION | 2023-07-20T15:11:31 | 2020-01-08T10:12:30 | Python | UTF-8 | Python | false | false | 689 | py | import time
import datetime
def get_actual_time():
ts = time.time() # Just a seconds timestamp
st = datetime.datetime.fromtimestamp(ts).strftime(
'%Y-%m-%d %H:%M:%S') # Converting it to human time
return st
def log(value):
ts = time.time() # Just a seconds timestamp
st = datetime.datetime.fromtimestamp(ts).strftime(
'%Y-%m-%d %H:%M:%S') # Converting it to human time
print("[ {} ] : {}".format(st, value))
def print_actual_time():
ts = time.time() # Just a seconds timestamp
st = datetime.datetime.fromtimestamp(ts).strftime(
'%Y-%m-%d %H:%M:%S') # Converting it to human time
print("[ {} ] : ".format(st), end='')
| [
"[email protected]"
] | |
405cfad4df55eb5ca46c582509c94da0daa5a043 | a00e50ce437c33d3c3a1c3214f478be93101fd8b | /Module 2-Problem 4.py | c0e49eccc6bd2c02bca185dc1a7ee7c296e981a2 | [] | no_license | kinghoward63/hello-world | df6ccc2664cffdcafb6c5b42be4d7fea4b627728 | b68cfb8f5d843ab9f951d2e9ceca8bfd0010313f | refs/heads/master | 2020-04-21T13:52:39.354394 | 2019-05-28T17:59:33 | 2019-05-28T17:59:33 | 169,615,176 | 0 | 0 | null | 2019-02-07T18:07:11 | 2019-02-07T17:48:25 | null | UTF-8 | Python | false | false | 604 | py | #Dante Howard
#04/23/2019
#This program will give python interpreter's for a continuous session.
def add_fruit(inventory, fruit, quantity):
inventory[fruit] = quantity
new_inventory = {}
add_fruit(new_inventory, 'strawberries', 10)
if 'strawberries' in new_inventory:
print("yes. 'Strawberries' is a key")
if new_inventory ['strawberries'] == 10:
print("There are 10 Strawberries")
add_fruit(new_inventory, 'grapes', 25)
if 'grapes' in new_inventory:
print("yes. 'grapes' is a key")
if new_inventory ['grapes'] == 25:
print("There are 25 grapes.")
| [
"[email protected]"
] | |
b2543d4e49e0d671c02106ce72e558566098bd52 | 13b82912fd09a01301ed9444e8c4ed73f77ef86c | /week_number/models/sale_order.py | d434b8783b440dd4f097244d433c7ac321cb1ba1 | [] | no_license | lizethq/Mesadeayuda | f786ccd3e2b0f921cd66504ad7930f509104c00d | 647e26143d2ca6f52f44bb37a49685b7b7cf6739 | refs/heads/master | 2023-07-08T16:26:13.214240 | 2020-10-01T15:16:32 | 2020-11-19T20:42:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 400 | py | from odoo import models, fields, api
from datetime import datetime
class SaleOrder(models.Model):
_inherit = 'sale.order'
week_number = fields.Integer(string = 'Closing per week')
@api.onchange('validity_date')
def _onchange_validity_date(self):
date = str(self.validity_date).replace('-','/')
self.week_number = datetime.strptime(date,'%Y/%m/%d').isocalendar()[1] | [
"[email protected]"
] | |
0b8179b97a3c91e4e38b85d89697ca710e3c0abf | 805e4298849abe03f16b6658b832a889b9cda718 | /teme1/isPalindrome.py | 02a3db3b710e97586b73fc89220a15f538f4d128 | [] | no_license | vladzoicas/exercises | 9ef96bc9ae7bfdbb6da130974b87dfa4f1f86046 | 142a1d5b44138535677265ce6377b94814412e0a | refs/heads/master | 2021-01-25T23:46:45.657987 | 2020-02-26T13:21:43 | 2020-02-26T13:21:43 | 243,229,949 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 296 | py | userString = (input('Please enter a stringof characters '))
isPalindrome = True
i = 0
while i < len(userString) // 2:
if userString[-i-1] != userString[i]:
isPalindrome = False
break
else: i = i + 1
print('Is the number a palindrome? ', isPalindrome )
| [
"[email protected]"
] | |
4b6ccfb86e48819f6eee99a5cd18f91af08a52ab | cb1fbe170f0c2f5d06d5bd14cc7e4470e8f87203 | /timeWrapper.py | 2d2ce11a129f7f4ab3c1570ed1b6be01eb2a5bb1 | [] | no_license | chen19900307/helloworld | 3d7786ed89a9e1f32d139071ed9a3c9203226730 | 23ed54831bbcc252492435ee10375e9d19b5b2d9 | refs/heads/master | 2021-01-01T04:29:56.406399 | 2016-05-17T08:59:06 | 2016-05-17T08:59:06 | 59,006,346 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 594 | py | import time
from functools import wraps
def timethis(func):
'''
Decorator that reports the execution time
'''
@wraps(func)
def wrapper(*args,**kwargs):
start = time.time()
result = func(*args,**kwargs)
end = time.time()
print(func.__name__,end-start)
return result
return wrapper
@timethis
def countdown(n:int):
'''
Counts down
'''
while n > 0:
n -= 1
if __name__=='__main__':
countdown(100000)
print (countdown.__name__)
print (countdown.__doc__)
print (countdown.__annotations__)
| [
"[email protected]"
] | |
28681e57d26ffad32790fd7272fac313b6a9c78a | 5502782c83398118b2095fe7abc525f05020bb9a | /src/main_classify.py | 6293fb377b95e5891e39cc2a715aef633a60a420 | [] | no_license | yashkhem1/rgb_classify_regress | 2768529ba91f91f8febaa47dbcccd96c2e953653 | 63acf95c164e95ee5a35ea9bdfc2db87cd38a560 | refs/heads/master | 2023-01-23T01:20:25.272821 | 2020-02-12T13:22:33 | 2020-02-12T13:22:33 | 235,770,650 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,091 | py | import sys
sys.path.append('./')
import torch
import torch.utils.data
import numpy as np
from opts import opts
from models.models_list import BackBone, PoseClassifier
from data_loader.h36m_classify import H36M
from common.logger import Logger
# from utils.utils import adjust_learning_rate
# def worker_init_fn(worker_id):
# np.random.seed(np.random.get_state()[1][0] + worker_id)
def main():
opt = opts().parse()
torch.manual_seed(opt.seed)
np.random.seed(opt.seed)
torch.cuda.manual_seed(opt.seed)
torch.backends.cudnn.deterministic = True
from training.train_test_classify import run_epoch
train_dataset = H36M(opt, split='train',
train_stats={},
allowed_subj_list=opt.sub_list_reg,
)
test_dataset = H36M(opt, split='test',
train_stats={'mean_3d': train_dataset.mean_3d,
'std_3d': train_dataset.std_3d,
},
allowed_subj_list=[9, 11])
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=opt.train_batch,
shuffle=True,
num_workers=1,
pin_memory=True,
drop_last=True
# worker_init_fn=worker_init_fn
)
test_loader = torch.utils.data.DataLoader(
test_dataset,
batch_size=opt.train_batch,
shuffle=False,
num_workers=1,
# worker_init_fn=worker_init_fn
)
opt.n_bins_x = train_dataset.n_bins_x
opt.n_bins_y = train_dataset.n_bins_y
opt.n_bins_z = train_dataset.n_bins_z
opt.n_joints = train_dataset.n_joints
model = dict()
model['backbone'] = BackBone(opt, spatial_size=7)
model['classifier'] = PoseClassifier(opt, in_feat=model['backbone'].out_feats, h=model['backbone'].out_feat_h)
opt.bn_momentum = 0.1
opt.bn_decay = 0.9
model_dict=None
if opt.load_model != 'none':
model_dict = torch.load(opt.load_model)
model['backbone'].load_state_dict(model_dict['backbone'])
model['classifier'].load_state_dict(model_dict['pose'])
if opt.data_par is True:
print('Using data parallel')
model['backbone'] = torch.nn.DataParallel(model['backbone'])
model['backbone'].to(torch.device("cuda:0")) # change device here if we want
model['classifier'].to(torch.device("cuda:0"))
if opt.test is True:
run_epoch(1, opt, test_loader, model, optimizer=None, split='test')
exit(0)
optimizer = torch.optim.Adam(list(model['backbone'].parameters()) + list(model['classifier'].parameters()),
opt.lr,
betas=(0.9, 0.999), #can use beta from opt
weight_decay=0.00,
amsgrad=False,
)
if opt.resume is True:
assert model_dict is not None
optimizer.load_state_dict(model_dict['optimiser_emb'])
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=opt.lr_step, gamma=0.1)
logger = Logger(opt.save_dir + '/logs')
opt_metric_val = 9999.0
opt.global_step = 0
for epoch in range(1, opt.n_epochs + 1):
loss_train = 0
nmpjpe_train = 0
mpjpe_train = 0
result_train = run_epoch(epoch, opt, train_loader, model, optimizer=optimizer,
split='train')
if epoch % opt.val_intervals == 0:
result_val = run_epoch(epoch, opt, test_loader, model, optimizer=None,
split='test')
logger.write('LTr {:.3f} AccTr {:.2f} AccXTr {:.2f} AccYTr {:.2f} AccZTr {:.2f} LVal {:.5f} '
'AccVal {:.2f} AccXVal {:.2f} AccYVal {:.2f} AccZVal {:.2f}\n'.format(result_train['loss_class'],
-result_train['acc'], -result_train['acc_x'], -result_train['acc_y'], -result_train['acc_z'],
result_val['loss_class'], -result_val['acc'],
-result_val['acc_x'], -result_val['acc_y'], -result_val['acc_z']))
print('Saving last epoch model')
save_last_model = dict()
if opt.data_par is True:
save_last_model['backbone'] = model['backbone'].module.state_dict()
else:
save_last_model['backbone'] = model['backbone'].state_dict()
save_last_model['classifier'] = model['classifier'].state_dict()
save_last_model['tr_stat'] = train_dataset.train_stats
save_last_model['optimiser'] = optimizer.state_dict()
torch.save(save_last_model, opt.save_dir + '/model_last.pth')
metric_val = result_val[opt.e_metric] # Metric value from opt
if opt_metric_val > metric_val:
print('Saving model best model')
logger.write('Saving best model\n')
save_best_model = dict()
if opt.data_par is True:
save_best_model['backbone'] = model['backbone'].module.state_dict()
else:
save_best_model['backbone'] = model['backbone'].state_dict()
save_best_model['classifier'] = model['classifier'].state_dict()
save_best_model['tr_stat'] = train_dataset.train_stats
torch.save(save_best_model, opt.save_dir + '/model_best.pth')
opt_metric_val = metric_val
else:
logger.write('LTr {:.3f} AccTr {:.2f} AccXTr {:.2f} AccYTr {:.2f} AccZTr {:.2f}\n'.format(
result_train['loss_class'], -result_train['acc'], -result_train['acc_x'], -result_train['acc_y'],
-result_train['acc_z']))
opt.global_step = opt.global_step + 1
if opt.global_step % opt.lr_step == 0:
print('Applying LR Decay')
scheduler.step()
# opt.bn_momentum = opt.bn_momentum * opt.bn_decay
logger.close()
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
2eded4b579f1fa29c2fca781ce41275089ee6883 | 27ece9ab880a0bdba4b2c053eccda94602c716d5 | /.history/tf_regression_logistic_20181129224035.py | 042940152a06ad085508f84df2ea7b7c812b8fbf | [] | no_license | Symfomany/keras | 85e3ad0530837c00f63e14cee044b6a7d85c37b2 | 6cdb6e93dee86014346515a2017652c615bf9804 | refs/heads/master | 2020-04-08T20:21:35.991753 | 2018-11-30T08:23:36 | 2018-11-30T08:23:36 | 159,695,807 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,428 | py | import matplotlib.pyplot as plt
import tensorflow as tf
import numpy as np
import os, argparse
"""
Any interaction with your filesystem to save persistent data in TF needs a Saver object and a Session object.
The Saver constructor allows you to control many things among which 1 is important:
The var_list: Default to None, this is the list of variables you want to persist to your filesystem.
You can either choose to save all the variables, some variables or even a dictionary to give custom names to your variables.
The Session constructor allows you to control 3 things:
+ The var_list: This is used in case of a distributed architecture to handle computation. You can specify which TF server or ‘target’ you want to compute on.
+ The graph: the graph you want the Session to handle. The tricky thing for beginners is the fact that there is always a default Graph in TF where all operations are set by default, so you are always in a “default Graph scope”.
+ The config: You can use ConfigProto to configure TF. Check the linked source for more details.
The Saver can handle the saving and loading (called restoring) of your Graph metadata and your Variables data.
To do that, it adds operations inside the current Graph that will be evaluated within a session.
By default, the Saver will handle the default Graph and all its included Variables,
but you can create as much Savers as you want to control any graph or subgraph and their variables.
If you look at your folder, it actually creates 3 files per save call and a checkpoint file,
I’ll go into more details about this in the annexe.
You can go on just by understanding that weights are saved into .data files and your graph
and metadata are saved into the .meta file.
Note: You must be careful to use a Saver with a Session linked to the Graph containing all the variables the Saver is handling.😨
To restore a meta checkpoint, use the TF helper import_meta_graph:
import tensorflow as tf
# This function returns a Saver
saver = tf.train.import_meta_graph('results/model.ckpt-1000.meta')
graph = tf.get_default_graph()
# Finally we can retrieve tensors, operations, collections, etc.
global_step_tensor = graph.get_tensor_by_name('loss/global_step:0')
train_op = graph.get_operation_by_name('loss/train_op')
hyperparameters = tf.get_collection('hyperparameters')
with tf.Session() as sess:
# To initialize values with saved data
saver.restore(sess, 'results/model.ckpt.data-1000-00000-of-00001')
print(sess.run(global_step_tensor)) # returns 1000
"""
dir = os.path.dirname(os.path.realpath(__file__))
def freeze_graph(model_dir, output_node_names):
"""Extract the sub graph defined by the output nodes and convert
all its variables into constant
Args:
model_dir: the root folder containing the checkpoint state file
output_node_names: a string, containing all the output node's names,
comma separated
"""
if not tf.gfile.Exists(model_dir):
raise AssertionError(
"Export directory doesn't exists. Please specify an export "
"directory: %s" % model_dir)
if not output_node_names:
print("You need to supply the name of a node to --output_node_names.")
return -1
# We retrieve our checkpoint fullpath
checkpoint = tf.train.get_checkpoint_state(model_dir)
input_checkpoint = checkpoint.model_checkpoint_path
# We precise the file fullname of our freezed graph
absolute_model_dir = "/".join(input_checkpoint.split('/')[:-1])
output_graph = absolute_model_dir + "/frozen_model.pb"
# We clear devices to allow TensorFlow to control on which device it will load operations
clear_devices = True
# We start a session using a temporary fresh Graph
with tf.Session(graph=tf.Graph()) as sess:
# We import the meta graph in the current default Graph
saver = tf.train.import_meta_graph(input_checkpoint + '.meta', clear_devices=clear_devices)
# We restore the weights
saver.restore(sess, input_checkpoint)
# We use a built-in TF helper to export variables to constants
output_graph_def = tf.graph_util.convert_variables_to_constants(
sess, # The session is used to retrieve the weights
tf.get_default_graph().as_graph_def(), # The graph_def is used to retrieve the nodes
output_node_names.split(",") # The output node names are used to select the usefull nodes
)
# Finally we serialize and dump the output graph to the filesystem
with tf.gfile.GFile(output_graph, "wb") as f:
f.write(output_graph_def.SerializeToString())
print("%d ops in the final graph." % len(output_graph_def.node))
return output_graph_def
def get_dataset():
"""
Method used to generate the dataset
"""
# Numbers of row per class
row_per_class = 100
# Generate rows
sick = np.random.randn(row_per_class, 2) + np.array([-2, -2])
sick_2 = np.random.randn(row_per_class, 2) + np.array([2, 2])
healthy = np.random.randn(row_per_class, 2) + np.array([-2, 2])
healthy_2 = np.random.randn(row_per_class, 2) + np.array([2, -2])
features = np.vstack([sick, sick_2, healthy, healthy_2])
targets = np.concatenate((np.zeros(row_per_class * 2), np.zeros(row_per_class * 2) + 1))
targets = targets.reshape(-1, 1)
return features, targets
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--model_dir", type=str, default="models", help="Model folder to export")
parser.add_argument("--output_node_names", type=str, default="tf_models", help="The name of the output nodes, comma separated.")
args = parser.parse_args()
features, targets = get_dataset()
# Plot points
#plt.scatter(features[:, 0], features[:, 1], s=40, c=targets, cmap=plt.cm.Spectral)
#plt.show()
tf_features = tf.placeholder(tf.float32, shape=[None, 2])
tf_targets = tf.placeholder(tf.float32, shape=[None, 1])
# First
w1 = tf.Variable(tf.random_normal([2, 3]))
b1 = tf.Variable(tf.zeros([3]))
# Operations
z1 = tf.matmul(tf_features, w1) + b1
a1 = tf.nn.sigmoid(z1)
# Output neuron
w2 = tf.Variable(tf.random_normal([3, 1]))
b2 = tf.Variable(tf.zeros([1]))
# Operations
z2 = tf.matmul(a1, w2) + b2
py = tf.nn.sigmoid(z2)
cost = tf.reduce_mean(tf.square(py - tf_targets))
correct_prediction = tf.equal(tf.round(py), tf_targets)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.1)
train = optimizer.minimize(cost)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
for e in range(100):
sess.run(train, feed_dict={
tf_features: features,
tf_targets: targets
})
print("accuracy =", sess.run(accuracy, feed_dict={
tf_features: features,
tf_targets: targets
}))
# We can check easily that we are indeed in the default graph
print(z1.graph == tf.get_default_graph())
# By default, the Saver handles every Variables related to the default graph
all_saver = tf.train.Saver()
all_saver.save(sess, args.model_dir + '/data')
# freeze_graph(args.model_dir, args.output_node_names)
| [
"[email protected]"
] | |
34b88ab5a4de0838ef54b4e88dd0efc0a23cc1bc | 1e177ebdcb470f738c058606ac0f86a36085f661 | /CPRG-104/Assign03/socket-client.03.02.py | 45997a0555e0949378f8f1c3c1bb727dbb771f66 | [] | no_license | robingreig/raspi-git | 5cbdd295c1048a0571aa2c2f8576438269439f07 | 7373bf94557d7a88c8f343362ba64f9cd19c8ce7 | refs/heads/master | 2023-08-31T03:16:17.286700 | 2023-08-26T11:54:23 | 2023-08-26T11:54:23 | 16,873,881 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 568 | py | import socket
import time
ClientSocket = socket.socket()
host = '127.0.0.1'
#host = '192.168.200.138'
port = 1233
print('Waiting for connection')
try:
ClientSocket.connect((host, port))
except socket.error as e:
print(str(e))
Response = ClientSocket.recv(1024)
count = 0
while count < 31:
clientNumber = str(2)
ClientSocket.send(str.encode(clientNumber))
Response = ClientSocket.recv(1024)
responseString = Response.decode('utf-8')
print('Server Sends: '+ Response.decode('utf-8'))
time.sleep(1)
count += 1
ClientSocket.close()
| [
"[email protected]"
] | |
69713713ecf2bdecf41619c598395a574530077f | a174bf57c6d3b48f436402a3b825248c395934f4 | /accounts/urls.py | 1587ef94b90abd79cc47920f3cc5eacb95cd7425 | [] | no_license | pymq/ldap_change_password | b56cef07ecec4fb0b2967ebdd9b79ac455a55fb5 | 2db82b392713e6675f3f50aeca93c6666b880f27 | refs/heads/master | 2020-03-07T13:59:38.600728 | 2018-03-31T08:49:41 | 2018-03-31T08:49:41 | 127,515,821 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 294 | py | from django.conf.urls import url
from django.views import generic
from .views import change_password
urlpatterns = [
url(r'^change-password/$', change_password),
url(r'^change-password/success/$', generic.TemplateView.as_view(template_name='accounts/success.html'), name='success'),
]
| [
"[email protected]"
] | |
38d0919f4d849377df4623f627e45260aa1d066d | d3ba68335668797d4d2afed4f7dd92c6210dbf87 | /tests/test_helpers.py | 8c1f88e7d5ad1000f3a5feefa18b6b1f818aea34 | [
"MIT"
] | permissive | tusharve/cellpy | 9dbd75c9e97893c1293808dd9dbcff8232f6b3cc | 554866ead452d9f96e6ff65172eb19ff68d39276 | refs/heads/master | 2023-07-31T12:57:52.216694 | 2021-08-29T10:42:28 | 2021-08-29T10:42:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,797 | py | import pytest
import logging
import pandas as pd
from cellpy import log
from cellpy.utils import helpers
from . import fdv
from cellpy.exceptions import NullData
log.setup_logging(default_level=logging.DEBUG)
@pytest.fixture
def cell():
from cellpy import cellreader
d = cellreader.CellpyData()
d.load(fdv.cellpy_file_path)
return d
def test_split_experiment(cell):
list_of_all_cycles = cell.get_cycle_numbers()
c1, c2 = helpers.split_experiment(cell, 10)
list_of_first_cycles = c1.get_cycle_numbers()
list_of_last_cycles = c2.get_cycle_numbers()
assert all(list_of_first_cycles == range(1, 10))
assert list_of_all_cycles[-1] == list_of_last_cycles[-1]
def test_split_experiment_new(cell):
list_of_all_cycles = cell.get_cycle_numbers()
c1, c2 = cell.split(10)
list_of_first_cycles = c1.get_cycle_numbers()
list_of_last_cycles = c2.get_cycle_numbers()
assert all(list_of_first_cycles == range(1, 10))
assert list_of_all_cycles[-1] == list_of_last_cycles[-1]
def test_select_summary_based_on_rate(cell):
cell.make_step_table(add_c_rate=True)
filtered_summary = helpers.select_summary_based_on_rate(cell, 0.04)
assert len(filtered_summary) == 3
def test_remove_outliers_on_index(cell):
last = cell.get_cycle_numbers()[-1]
s1 = helpers.remove_outliers_from_summary_on_index(cell.cell.summary, indexes=[15])
s2 = helpers.remove_outliers_from_summary_on_index(
cell.cell.summary, indexes=[15], remove_last=True
)
assert 14 in s1.index
assert 15 not in s1.index
assert last in s1.index
assert last not in s2.index
assert 15 not in s2.index
def test_concatenate_summaries(cell):
# the function should be moved to batch utils and the tests are in test_batch
pass
| [
"[email protected]"
] | |
7f047e70e45dbef17099c57e8dd19192383df9cf | e7669caf1a4ce9053bb720bcfa2d85a4ee35ea65 | /jade/rosetta_jade/SetupRosettaOptionsBenchmark.py | 653ddcca461dfbecc36094e66ec625695fb1a017 | [
"BSD-3-Clause"
] | permissive | SchiefLab/Jade | f81190321df061f8317db869a64a7b87a13d664a | 791d26892aacf21bca733aa9c1c6ea3f2be51958 | refs/heads/master | 2023-01-10T15:14:08.431470 | 2018-11-07T17:11:02 | 2018-11-07T17:11:02 | 41,381,258 | 7 | 2 | NOASSERTION | 2022-12-26T20:35:53 | 2015-08-25T18:39:20 | Python | UTF-8 | Python | false | false | 4,999 | py | import os
import sys
from jade.rosetta_jade.SetupRosettaOptionsGeneral import SetupRosettaOptionsGeneral
class SetupRosettaOptionsBenchmark(SetupRosettaOptionsGeneral):
"""
Class for setting up Rosetta Benchmarks. See database/rosetta/benchmark_jsons_rabd/nstruct_test.json for an example.
Basically, a set of benchmarks and rosetta options are given in the JSON.
Other keys can be specified for specific benchmarks (like the instructions file stuff in the above file.)
This can be used to use a single JSON file and run RosettaMPI on ALL combinations of benchmarks given.
"""
def __init__(self, json_file):
SetupRosettaOptionsGeneral.__init__( self, json_file)
self.key_benchmarks = "benchmarks"
self.key_rosetta_option = "rosetta_option"
self.key_use_for_prefix = "use_for_prefix"
self.key_use_for_outdir = "use_for_outdir"
def get_exp(self):
"""
Get the benchmark name or fail.
:rtype: str
"""
if self.json_dict.has_key("exp"):
return self.json_dict["exp"]
else:
sys.exit("Please include the key 'exp' to define overall the name of the benchmarking experiment.")
def get_benchmark_names(self, only_rosetta = False):
"""
Get the names of all the benchmarks we will run.
Each benchmark must have a dictionary that defines 'benchmarks' as a list.
You may optionally give the rosetta_option.
Currently, your subclass of RunRosetta will need to code how all this is run. Hopefully, that will change.
If only_rosetta is true, will only give the benchmark names that are based on rosetta options.
For example:
"outer_cycle_rounds":{
"rosetta_option":"-outer_cycle_rounds",
"benchmarks":[ 25, 50, 75, 100]
},
:rtype: list
"""
benchmark_names = []
for key in self.json_dict:
if type(self.json_dict[key]) == dict and self.json_dict[key].has_key(self.key_benchmarks):
if only_rosetta and type(self.json_dict[key]) == dict and self.json_dict[key].has_key(self.key_rosetta_option):
benchmark_names.append(key)
elif not only_rosetta:
benchmark_names.append(key)
else:
continue
return benchmark_names
def get_non_rosetta_option_benchmark_names(self):
"""
Similar to get_benchmark_names, but only for options which do not have the tag rosetta_option
:rtype: list
"""
benchmark_names = []
for key in self.json_dict:
if type(self.json_dict[key]) == dict and self.json_dict[key].has_key(self.key_benchmarks):
if not self.json_dict[key].has_key(self.key_rosetta_option):
benchmark_names.append(key)
else:
continue
return benchmark_names
def get_benchmarks_of_key(self, benchmark_name):
"""
Get the list of benchmarks for a particular benchmark key.
:param benchmark_name: str
:rtype: list
"""
if not self.json_dict.has_key(benchmark_name):
sys.exit("Could not find benchmark name in json dict! "+benchmark_name)
else:
#In special circumstances, it may be a list (as for the CDRs), since they can both be a benchmark or all together.
try:
return self.json_dict[benchmark_name][self.key_benchmarks]
except TypeError:
return self.json_dict[benchmark_name]
def get_rosetta_option_of_key(self, benchmark_name):
"""
Get the Rosetta option
:param benchmark_name:
:rtype: str
"""
return self.json_dict[benchmark_name][self.key_rosetta_option]
def use_benchmark_for_outdir(self, benchmark):
"""
Should we use the benchmark name for output?
Specified by the 'use_for_outdir' in JSON.
If not specified, or benchmark not in list, we assume True!
:param benchmark: str
:rtype: bool
"""
if self.json_dict.has_key(benchmark):
if self.json_dict[benchmark].has_key(self.key_use_for_outdir):
if not self.json_dict[benchmark][self.key_use_for_outdir]:
return False
return True
def use_benchmark_for_prefix(self, benchmark):
"""
Should we use the benchmark name for prefix?
Specified by the 'use_for_prefix' in JSON.
If not specified, or benchmark not in list, we assume True!
:param benchmark: str
:rtype: bool
"""
if self.json_dict.has_key(benchmark):
if self.json_dict[benchmark].has_key(self.key_use_for_prefix):
if not self.json_dict[benchmark][self.key_use_for_prefix]:
return False
return True | [
"[email protected]"
] | |
083cf77136d7e18b14371c9361b4caf2bfe0d2b6 | fae30fe7f6b8a6db67b64b228847186ff96d3beb | /tests/test_task_2.py | 4c2f1caa9911ae44413de0b8d306756d18514e45 | [] | no_license | pshemekhinca/xyz-reqruitment_task | fbd108a519a262199bdf77453357a2521e2f054f | a4cc8ec1e643ab7503836c036f5fc7cb6cfa046b | refs/heads/master | 2023-07-10T21:03:54.890261 | 2021-08-16T11:19:03 | 2021-08-16T11:19:03 | 392,294,788 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,392 | py | from task_2 import DigitsRangeError, combinations
import pytest
@pytest.fixture()
def sample_results():
results = combinations("23")
return results
def test_combination_function_result_type(sample_results):
assert type(sample_results) == list
@pytest.mark.parametrize('symbol, expected', [("1", []), ("*", []), ("#", []), ])
def test_return_empty_list_if_given_out_of_range(symbol, expected):
result = combinations(symbol)
assert result == expected
@pytest.mark.parametrize('digit, expected', [
("2", ['a', 'b', 'c']),
("9", ['w', 'x', 'y', 'z']),
("0", ['+']),
])
def test_return_digit_mapped_letters_list(digit, expected):
result = combinations(digit)
assert result == expected
@pytest.mark.parametrize('digit, expected', [("234", 27), ("5432", 81), ("33", 9), ])
def test_correct_result_list_according_to_given_number(digit, expected):
result = combinations(digit)
assert len(result) == expected
@pytest.mark.parametrize('number', ["23", "403", "5320"])
def test_if_result_list_contains_unique_elements(number):
results = combinations(number)
results_set = set(results)
assert len(results_set) == len(results)
@pytest.mark.parametrize('digit', ["123", "4*3", "5#"])
def test_error_raise_when_given_has_digits_or_signs_out_of_digits_range(digit):
with pytest.raises(DigitsRangeError):
combinations(digit) | [
"[email protected]"
] | |
9bbc917d6a737f2a2a556f04f038a3de181d1dac | 2bdc553a131a8ef08a6433150d1e853ed5a67d5f | /Services/ContentderAI.SimilarWord/similarword/asgi.py | 07957e59441dc9510b7eeedee60a2be2970545f9 | [] | no_license | Rushika193/Project | 3e31d8aa9705a524398991d7e93b177d7d427b3c | 3991d9fb1fb1e83cd144d6e0108107a975180a17 | refs/heads/master | 2023-02-27T14:02:00.625331 | 2021-02-09T12:08:12 | 2021-02-09T12:08:12 | 337,383,339 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 399 | py | """
ASGI config for similarword project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'similarword.settings')
application = get_asgi_application()
| [
"[email protected]"
] | |
11c68b975c4b187dc8ea4b634c849a519d049d41 | 2e96336f7a17b63a4b5ceaef9b8a519732e7b9df | /my_website/canopy/Touchdowns.py | f5f0e547c8a76a827141b164480b56ee75422dc0 | [] | no_license | danechris/danechris.github.io | a67d4b35ada0d2f7906ea5a21e8665c0d67001a9 | 83f5e9d1feba6a8e2015242813619dbdc09577fd | refs/heads/master | 2021-09-15T02:58:09.819757 | 2018-05-24T15:17:19 | 2018-05-24T15:17:19 | 104,788,065 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,020 | py | import matplotlib.pyplot as plt
import os.path
import os.path
directory = os.path.dirname(os.path.abspath(__file__))
#Open the file
filename = os.path.join(directory, 'touchdowns.csv')
datafile = open(filename,'r')
data = datafile.readlines()
#Creates empty list for weight values and touchdowns scored values
xvalues=[]
yvalues=[]
#Appends the values to the respective list
for line in data:
weight, touchdown = line.split(',')
xvalues.append(int(weight))
yvalues.append(int(touchdown))
#Creates an axis for the histogram
fig, ax = plt.subplots (1,1)
#Creates a bar graph with certain color and width
ax.bar(xvalues, yvalues, color='#3366FF', width=7)
#Sets axes titles and graph title
ax.set_title('Weight to Number of Touchdowns Scored (NFL Wide Receievers)')
ax.set_xlabel('Player Weight (Pounds)')
ax.set_ylabel('Number of Touchdowns Scored')
#Changes frequency of y ticks to more accurately read graph
ax.set_yticks([0,100,200,300,400,500,600,700,800,900,1000])
fig.show() | [
"[email protected]"
] | |
49f939b441f3b3012567786f0d0053ec92ddf2a1 | aa2b5edc23d5c888a90fb39a2f55bfa9498e3410 | /twentyfourw_dev_2116/settings.py | fbbbe62df409feac63f9bd25f03c583e1bd21244 | [] | no_license | crowdbotics-apps/twentyfourw-dev-2116 | d19c5c346f0e0f7f11265ccaab4553623658dc26 | afda378201bdab9c233c4168725162d9db6cc68f | refs/heads/master | 2022-12-14T13:43:31.071106 | 2020-03-24T09:53:58 | 2020-03-24T09:53:58 | 249,655,237 | 0 | 0 | null | 2022-12-08T03:53:27 | 2020-03-24T08:42:25 | Python | UTF-8 | Python | false | false | 5,496 | py | """
Django settings for twentyfourw_dev_2116 project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import environ
env = environ.Env()
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str("SECRET_KEY")
ALLOWED_HOSTS = env.list("HOST", default=["*"])
SITE_ID = 1
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = env.bool("SECURE_REDIRECT", default=False)
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"django.contrib.sites",
]
LOCAL_APPS = [
"home",
"users.apps.UsersConfig",
]
THIRD_PARTY_APPS = [
"rest_framework",
"rest_framework.authtoken",
"rest_auth",
"rest_auth.registration",
"bootstrap4",
"allauth",
"allauth.account",
"allauth.socialaccount",
"allauth.socialaccount.providers.google",
"django_extensions",
"drf_yasg",
]
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "twentyfourw_dev_2116.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "twentyfourw_dev_2116.wsgi.application"
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": os.path.join(BASE_DIR, "db.sqlite3"),
}
}
if env.str("DATABASE_URL", default=None):
DATABASES = {"default": env.db()}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",},
{"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",},
{"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = "/static/"
MIDDLEWARE += ["whitenoise.middleware.WhiteNoiseMiddleware"]
AUTHENTICATION_BACKENDS = (
"django.contrib.auth.backends.ModelBackend",
"allauth.account.auth_backends.AuthenticationBackend",
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [os.path.join(BASE_DIR, "static")]
STATICFILES_STORAGE = "whitenoise.storage.CompressedManifestStaticFilesStorage"
# allauth / users
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = "email"
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "mandatory"
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_UNIQUE_EMAIL = True
LOGIN_REDIRECT_URL = "users:redirect"
ACCOUNT_ADAPTER = "users.adapters.AccountAdapter"
SOCIALACCOUNT_ADAPTER = "users.adapters.SocialAccountAdapter"
ACCOUNT_ALLOW_REGISTRATION = env.bool("ACCOUNT_ALLOW_REGISTRATION", True)
SOCIALACCOUNT_ALLOW_REGISTRATION = env.bool("SOCIALACCOUNT_ALLOW_REGISTRATION", True)
REST_AUTH_SERIALIZERS = {
# Replace password reset serializer to fix 500 error
"PASSWORD_RESET_SERIALIZER": "home.api.v1.serializers.PasswordSerializer",
}
REST_AUTH_REGISTER_SERIALIZERS = {
# Use custom serializer that has no username and matches web signup
"REGISTER_SERIALIZER": "home.api.v1.serializers.SignupSerializer",
}
# Custom user model
AUTH_USER_MODEL = "users.User"
EMAIL_HOST = env.str("EMAIL_HOST", "smtp.sendgrid.net")
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
if DEBUG:
# output email to console instead of sending
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
| [
"[email protected]"
] | |
dff11dee1d715de04963bbea26440111eadd59a1 | 98ddd36de43e933e573dceaa33836ede0233d72a | /netgraph/graph.py | e9eadd11511962f10803dbe1240a07432c5f61fd | [] | no_license | Arseny-N/NetGraph | 8e029f5ce29fe119e665822537562fa52f5f745b | 9fbd742d22f2e1f75914baff61fb9bbc6ae15554 | refs/heads/master | 2020-08-29T18:22:14.535256 | 2019-10-28T20:31:43 | 2019-10-28T20:31:43 | 218,127,163 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,694 | py |
from pathlib import Path
class FileGraphBase:
def __init__(self):
self.stats = {
'nodes' : 0,
'edges' : 0
}
class GMLGraph(FileGraphBase):
def __init__(self, file):
super().__init__()
self.file = open(Path(file).expanduser(), 'w')
self.write( "graph [\n\tdirected 1\n")
def write(self, str):
self.file.write(str)
def close(self):
self.write("]\n")
self.file.close()
def add_nodes_from(self, nodes, **props):
props_str = '\n'.join( "\t\t" + key + " " + str(val) for key, val in props.items())
for node_id, attrs in nodes:
attrs_str = []
for key, val in attrs.items():
if key == 'pos':
attrs_str.append("\t\tgraphics [\n" + \
f"\t\t\tx {val[0]}\n" + \
f"\t\t\ty {val[1]}\n" + \
f"\t\t\tz {val[2]}\n" + \
"\t\t]"
)
else:
attrs_str.append("\t\t" + key + " " + str(val))
attrs_str = '\n'.join(attrs_str)
self.write(f"\tnode [\n\t\tid {node_id}\n{attrs_str}\n{props_str}\n\t]\n")
def add_edges_from(self, edges, **props):
props_str = '\n'.join( "\t\t" + key + " " + str(val) for key, val in props.items())
for source, target, attrs in edges:
attrs_str = '\n'.join( "\t\t" + key + " " + str(val) for key, val in attrs.items() )
self.write(f"\tedge [\n\t\tsource {source}\n\t\ttarget {target}\n{attrs_str}\n{props_str}\n\t]\n")
class CSVGraph(FileGraphBase):
def __init__(self, dir, prefix, node_props, edge_props):
super().__init__()
assert 'node_id' == node_props[0]
assert 'Source' == edges_props[0] and 'Target' == edges_props[1]
self.nodes_file = open(Path(dir).expanduser() / (prefix + 'nodes.csv'))
self.nodes_file.write(','.join(map(lambda x: "'" + x + "'", node_props)))
self.edges_file = open(Path(dir).expanduser() / (prefix + 'edges.csv'))
self.edges_file.write(','.join(map(lambda x: "'" + x + "'", edge_props)))
def add_nodes_from(self, nodes, **props):
raise NotImplementedError()
def add_edges_from(self, edges, **props):
raise NotImplementedError()
import h5py
class HDF5Graph(FileGraphBase):
def __init__(self, file, node_attrs, edge_attrs):
self.f = h5py.File(Path(file).expanduser(), 'w')
self.edges_ds = self.f.create_dataset('edges', (10_000, 2), chunks=True, maxshape=(None, 2), dtype="i")
self.edges_attrs_ds = self.f.create_dataset('edges_attr', (10_000, len(edge_attrs)), chunks=True, maxshape=(None, len(edge_attrs)), dtype="f")
self.edges_num = 0
self.nodes_ds = self.f.create_dataset('nodes', (10_000, len(node_attrs)), chunks=True, maxshape=(None, len(node_attrs)), dtype="f")
self.node_attrs_to_ix = { attr : ix for ix, attr in enumerate(node_attrs) }
self.edge_attrs_to_ix = { attr : ix for ix, attr in enumerate(edge_attrs) }
self.tensor_name_to_ix = {}
def add_nodes_from(self, nodes, **props):
for node_id, attrs in nodes:
n, m = self.nodes_ds.shape
if n <= node_id:
self.nodes_ds.resize(n + 10_000, axis=0)
for name, attr in attrs.items():
self.nodes_ds[node_id, self.node_attrs_to_ix[name]] = attr
for name, attr in props.items():
if name == 'tensor_name':
if name in self.tensor_name_to_ix:
attr = self.tensor_name_to_ix[attr]
else:
attr = self.tensor_name_to_ix[attr] = len(self.tensor_name_to_ix)
self.nodes_ds[node_id, self.node_attrs_to_ix[name]] = attr
def add_edges_from(self, edges, **props):
for source, target, attrs in edges:
self.edges_num += 1
edge_id = self.edges_num
n, m = self.edges_ds.shape
if n <= edge_id:
self.edges_ds.resize(n + 10_000, axis=0)
self.edges_attrs_ds.resize(n + 10_000, axis=0)
self.edges_ds[edge_id, 0] = source
self.edges_ds[edge_id, 1] = target
for name, attr in attrs.items():
self.edges_attrs_ds[edge_id, self.edge_attrs_to_ix[name]] = attr
for name, attr in props.items():
self.edges_attrs_ds[edge_id, self.edge_attrs_to_ix[name]] = attr | [
"[email protected]"
] | |
729b9db599e1ace18b90c24f15c7030a86dbc982 | 4680ff36ddf42a999b87a3fe085b5ccd12ba98cc | /core/admin.py | 69cb450d88d2374dd81b7a97d4ecd059f0554108 | [] | no_license | 2-sha/back_atomhack | a8b43eaaa304b808bb341cb0d8f35445052ff480 | bae5c6ffcf209e093e024594e676d5f89ddcdaef | refs/heads/master | 2023-07-17T14:27:15.134528 | 2021-08-29T12:04:42 | 2021-08-29T12:04:42 | 400,773,958 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 452 | py | from django.contrib import admin
from core.models import TagPerk, Perk, Tag
from user.models import UserPerk
@admin.register(Perk)
class PerkAdmin(admin.ModelAdmin):
list_display = ['name']
search_fields = ('name', )
@admin.register(Tag)
class TagAdmin(admin.ModelAdmin):
list_display = ['name']
search_fields = ('name', )
@admin.register(TagPerk)
class TagPerkAdmin(admin.ModelAdmin):
list_display = ['tag', 'perk', 'level']
| [
"[email protected]"
] | |
49297290bb8dc8eb2343338ee4a3f84bf3797b9f | 75ad8b428e56306b54ee71ae203e9a45f8e3de29 | /tests/test_project.py | ac4d8c3d178ff2a7ce2ad8164827f461301c7d32 | [] | no_license | toolness/fleeting | 09b9d0c6ed0fea723bdeb855d5c986d7488056ce | 3c4b36d32226145ef65450e41c58ca8567cd41cb | refs/heads/master | 2020-04-01T12:43:36.441109 | 2013-06-15T23:33:20 | 2013-06-15T23:33:20 | 9,755,260 | 2 | 0 | null | 2013-06-15T11:37:29 | 2013-04-29T18:40:50 | JavaScript | UTF-8 | Python | false | false | 17,101 | py | import unittest
import json
from StringIO import StringIO
import mock
from boto.exception import BotoServerError
from fleeting import project
from fleeting.tempcache import DictTempCache
def create_mock_instance(ec2, ready_tag=True):
tags = {
'fleeting:openbadges': json.dumps(dict(
git_user='toolness',
git_branch='experimentz',
slug='sluggy'
))
}
if ready_tag:
tags['fleeting:openbadges:ready'] = 'http://foo/'
instance = mock.MagicMock(
tags=tags,
state='running',
launch_time='2013-04-29T11:53:42.000Z'
)
res = mock.MagicMock(instances=[instance])
ec2.return_value.get_all_instances.return_value = [res]
return instance
def create_mock_autoscale_group(asc, instance_id=None, **kwargs):
if not kwargs:
ag = []
else:
if instance_id:
kwargs['instances'] = [mock.MagicMock(instance_id=instance_id)]
ag = [mock.MagicMock(**kwargs)]
asc.return_value.get_all_groups.return_value = ag
if ag:
return ag[0]
def create_mock_launch_config(asc):
lc = mock.MagicMock()
asc.return_value.get_all_launch_configurations.return_value = [lc]
return lc
def create_mock_http_response(http, status, content=''):
res = mock.MagicMock(status=status)
http.return_value.request.return_value = (res, content)
return res
def create_server_error(code):
err = BotoServerError('', '', '')
err.error_code = code
return err
class ProjectTests(unittest.TestCase):
def setUp(self):
project._ec2_conn = None
project._ec2_autoscale_conn = None
project.cache = DictTempCache(project.DEFAULT_CACHE_TTL)
def test_get_project_map_works(self):
pmap = project.get_project_map()
self.assertTrue('openbadges' in pmap)
def test_project_reads_meta_info(self):
proj = project.Project('openbadges')
self.assertEqual(proj.meta['name'], 'Open Badges Backpack')
self.assertEqual(proj.meta['repo'], 'mozilla/openbadges')
def test_get_instance_ready_url_works(self):
proj = project.Project('openbadges')
url = proj._get_instance_ready_url('boop.org')
self.assertEqual(url, 'http://boop.org:8888/')
@mock.patch('httplib2.Http')
def test_does_url_404_returns_true(self, http):
create_mock_http_response(http, status=404)
self.assertEqual(project.does_url_404('http://foo.org/'), True)
@mock.patch('httplib2.Http')
def test_does_url_404_returns_false(self, http):
create_mock_http_response(http, status=200)
self.assertEqual(project.does_url_404('http://foo.org/'), False)
@mock.patch('fleeting.project.does_url_404')
def test_create_instance_returns_on_bad_github_info(self, does_url_404):
does_url_404.return_value = True
proj = project.Project('openbadges')
r = proj.create_instance('z', 'uzer', 'branchu', 'key', ['default'])
self.assertEqual(r, 'INVALID_GIT_INFO')
does_url_404.assert_called_once_with(
'https://github.com/uzer/openbadges/tree/branchu'
)
@mock.patch('fleeting.project.AutoScaleConnection')
def test_destroy_instance_shuts_down_instances(self, asc):
proj = project.Project('openbadges')
ag = create_mock_autoscale_group(asc, instance_id='k', min_size=1)
ag.delete.side_effect = create_server_error('ResourceInUse')
self.assertEqual(proj.destroy_instance('ded'), 'SHUTDOWN_IN_PROGRESS')
@mock.patch('fleeting.project.AutoScaleConnection')
def test_destroy_instance_catches_misc_autoscale_errors(self, asc):
proj = project.Project('openbadges')
ag = create_mock_autoscale_group(asc, instance_id='k', min_size=1)
ag.delete.side_effect = create_server_error('Oopsie')
self.assertEqual(proj.destroy_instance('ded'), 'ERROR:Oopsie')
@mock.patch('fleeting.project.AutoScaleConnection')
def test_destroy_instance_catches_misc_launch_config_errors(self, asc):
proj = project.Project('openbadges')
ag = create_mock_autoscale_group(asc, instance_id='k', min_size=1)
lc = create_mock_launch_config(asc)
lc.delete.side_effect = create_server_error('Ack')
self.assertEqual(proj.destroy_instance('ded'), 'ERROR:Ack')
@mock.patch('fleeting.project.AutoScaleConnection')
def test_destroy_instance_returns_not_found(self, asc):
proj = project.Project('openbadges')
ascrv = asc.return_value
ascrv.get_all_groups.return_value = []
ascrv.get_all_launch_configurations.return_value = []
self.assertEqual(proj.destroy_instance('ded'), 'NOT_FOUND')
@mock.patch('fleeting.project.AutoScaleConnection')
def test_destroy_instance_adds_cache_entry(self, asc):
proj = project.Project('openbadges')
ag = create_mock_autoscale_group(asc, instance_id='k', min_size=1)
lc = create_mock_launch_config(asc)
proj.destroy_instance('buk')
self.assertEqual(project.cache.find('fleeting:openbadges:buk'), [{
'slug': 'buk',
'state': 'terminated'
}])
@mock.patch('fleeting.project.AutoScaleConnection')
def test_destroy_instance_returns_done(self, asc):
proj = project.Project('openbadges')
ag = create_mock_autoscale_group(asc, instance_id='k', min_size=1)
lc = create_mock_launch_config(asc)
self.assertEqual(proj.destroy_instance('buk'), 'DONE')
ag.delete.assert_called_once_with()
lc.delete.assert_called_once_with()
ascrv = asc.return_value
ascrv.get_all_groups.assert_called_once_with(
names=['fleeting_autoscale_openbadges_buk']
)
ascrv.get_all_launch_configurations.assert_called_once_with(
names=['fleeting_launchconfig_openbadges_buk']
)
@mock.patch('fleeting.project.AutoScaleConnection')
@mock.patch('fleeting.project.does_url_404', lambda x: False)
def test_create_instance_returns_when_instance_exists(self, asc):
proj = project.Project('openbadges')
ag = create_mock_autoscale_group(asc, instance_id='q', min_size=1)
r = proj.create_instance('z', 'uzer', 'branchu', 'key', ['default'])
self.assertEqual(r, 'INSTANCE_ALREADY_EXISTS')
asc.return_value.get_all_groups.assert_called_once_with(
names=['fleeting_autoscale_openbadges_z']
)
@mock.patch('fleeting.project.AutoScaleConnection')
@mock.patch('fleeting.project.does_url_404', lambda x: False)
def test_create_instance_adds_cache_entry(self, asc):
proj = project.Project('openbadges')
ag = create_mock_autoscale_group(asc, instances=[], min_size=0)
with mock.patch.object(proj, 'cleanup_instances') as ci:
proj.create_instance('z', 'uzer', 'branchu', 'key',
security_groups=['default'])
self.assertEqual(
project.cache.find('fleeting:openbadges:z'),
[dict(slug='z',
state='pending',
lifetime=86400.0,
git_user='uzer',
git_branch='branchu')]
)
@mock.patch('fleeting.project.AutoScaleConnection')
@mock.patch('fleeting.project.does_url_404', lambda x: False)
def test_create_instance_works(self, asc):
proj = project.Project('openbadges')
ag = create_mock_autoscale_group(asc, instances=[], min_size=0)
with mock.patch.object(proj, 'cleanup_instances') as ci:
r = proj.create_instance('z', 'uzer', 'branchu', 'key',
security_groups=['defaultr'],
notify_topic='notifytopik')
self.assertEqual(r, 'DONE')
ci.assert_called_once_with()
ascrv = asc.return_value
lc = ascrv.create_launch_configuration.call_args[0][0]
self.assertEqual(lc.name, 'fleeting_launchconfig_openbadges_z')
self.assertEqual(lc.key_name, 'key')
self.assertEqual(lc.security_groups, ['defaultr'])
self.assertTrue('uzer' in lc.user_data)
self.assertTrue('branchu' in lc.user_data)
# TODO: Ensure autoscale group tag is ok
ag = ascrv.create_auto_scaling_group.call_args[0][0]
self.assertEqual(ag.name, 'fleeting_autoscale_openbadges_z')
# TODO: Ensure conn.create_scheduled_group_action() is ok
# TODO: Ensure conn.put_notification_configuration() is ok
@mock.patch('fleeting.project.AutoScaleConnection')
def test_get_instance_status_returns_not_found(self, asc):
proj = project.Project('openbadges')
ag = create_mock_autoscale_group(asc)
self.assertEqual(proj.get_instance_status('zzz'),
('NOT_FOUND', None))
asc.return_value.get_all_groups.assert_called_once_with(
names=['fleeting_autoscale_openbadges_zzz']
)
@mock.patch('fleeting.project.AutoScaleConnection')
def test_get_instance_status_returns_instance_not_yet_exists(self, asc):
proj = project.Project('openbadges')
ag = create_mock_autoscale_group(asc, instances=[], min_size=1)
self.assertEqual(proj.get_instance_status('zzz'),
('INSTANCE_DOES_NOT_YET_EXIST', None))
@mock.patch('fleeting.project.AutoScaleConnection')
def test_get_instance_status_returns_instance_does_not_exist(self, asc):
proj = project.Project('openbadges')
ag = create_mock_autoscale_group(asc, instances=[], min_size=0)
self.assertEqual(proj.get_instance_status('zzz'),
('INSTANCE_DOES_NOT_EXIST', None))
@mock.patch('boto.connect_ec2')
def test_get_instance_returns_instance(self, ec2):
proj = project.Project('openbadges')
inst = create_mock_instance(ec2)
self.assertEqual(proj.get_instance('sluggy'), inst)
@mock.patch('httplib2.Http')
def test_get_instance_authserver_log_returns_nonempty_str(self, http):
proj = project.Project('openbadges')
create_mock_http_response(http, status=200, content='blah')
inst = mock.MagicMock(state='running', public_dns_name='foo.org')
self.assertEqual(proj.get_instance_authserver_log(inst), 'blah')
h = http.return_value
h.add_credentials.assert_called_once_with('fleeting', 'fleeting')
h.request.assert_called_once_with('http://foo.org:9312/log.txt')
@mock.patch('httplib2.Http')
def test_get_instance_authserver_log_returns_empty_str(self, http):
proj = project.Project('openbadges')
http.return_value.request.side_effect = Exception('funky socket err')
inst = mock.MagicMock(state='running', public_dns_name='foo.org')
self.assertEqual(proj.get_instance_authserver_log(inst), '')
def test_get_instance_log_returns_nonempty_str(self):
proj = project.Project('openbadges')
inst = mock.MagicMock()
inst.get_console_output.return_value.output = "LOL"
self.assertEqual(proj.get_instance_log(inst), "LOL")
def test_get_instance_log_returns_empty_str(self):
proj = project.Project('openbadges')
inst = mock.MagicMock()
inst.get_console_output.return_value.output = None
self.assertEqual(proj.get_instance_log(inst), "")
@mock.patch('boto.connect_ec2')
def test_get_instance_returns_none(self, ec2):
proj = project.Project('openbadges')
inst = create_mock_instance(ec2)
self.assertEqual(proj.get_instance('blop'), None)
@mock.patch('boto.connect_ec2')
@mock.patch('fleeting.project.AutoScaleConnection')
def test_get_instance_status_returns_ready_tag(self, asc, ec2):
proj = project.Project('openbadges')
ag = create_mock_autoscale_group(asc, instance_id='z', min_size=0)
inst = create_mock_instance(ec2)
self.assertEqual(proj.get_instance_status('zzz'),
('READY', 'http://foo/'))
ec2.return_value.get_all_instances.assert_called_once_with(['z'])
@mock.patch('boto.connect_ec2')
@mock.patch('fleeting.project.AutoScaleConnection')
def test_get_instance_status_pings_ready_url(self, asc, ec2):
proj = project.Project('openbadges')
ag = create_mock_autoscale_group(asc, instance_id='z', min_size=0)
inst = create_mock_instance(ec2, ready_tag=False)
with mock.patch.object(proj, '_ping_ready_url') as ping:
ping.return_value = ('HOORAY', 'cool')
self.assertEqual(proj.get_instance_status('zzz'),
('HOORAY', 'cool'))
ping.assert_called_once_with(inst)
def test_ping_ready_url_handles_no_public_dns_name(self):
inst = mock.MagicMock(state='running', public_dns_name=None)
self.assertEqual(project.Project('openbadges')._ping_ready_url(inst),
('INSTANCE:running', None))
@mock.patch('httplib2.Http')
def test_ping_ready_url_handles_http_exceptions(self, http):
inst = mock.MagicMock(state='running', public_dns_name='u.org')
http.return_value.request.side_effect = Exception('funky socket err')
self.assertEqual(project.Project('openbadges')._ping_ready_url(inst),
('INSTANCE:running', 'funky socket err'))
@mock.patch('httplib2.Http')
def test_ping_ready_url_handles_bad_http_status(self, http):
inst = mock.MagicMock(state='running', public_dns_name='u.org')
create_mock_http_response(http, status=500)
self.assertEqual(project.Project('openbadges')._ping_ready_url(inst),
('INSTANCE:running', 'status 500'))
@mock.patch('httplib2.Http')
def test_ping_ready_url_handles_good_http_status(self, http):
inst = mock.MagicMock(state='running', public_dns_name='u.org')
create_mock_http_response(http, status=200)
self.assertEqual(project.Project('openbadges')._ping_ready_url(inst),
('READY', 'http://u.org:8888/'))
inst.add_tag.assert_called_once_with('fleeting:openbadges:ready',
'http://u.org:8888/')
@mock.patch('boto.connect_ec2')
@mock.patch('fleeting.project.AutoScaleConnection')
def test_project_cleanup_instances_works(self, asc, ec2):
proj = project.Project('openbadges')
create_mock_instance(ec2)
ag = create_mock_autoscale_group(asc, min_size=0, instances=[])
lc = create_mock_launch_config(asc)
lc.delete.side_effect = Exception()
deleted, errors = proj.cleanup_instances()
self.assertEqual(deleted, 1)
self.assertEqual(errors, 1)
ec2.assert_called_once_with()
ec2.return_value.get_all_instances.assert_called_once_with(
filters={'instance-state-name': ['terminated'],
'tag-key': 'fleeting:openbadges'}
)
asc.return_value.get_all_groups.assert_called_once_with(
names=[u'fleeting_autoscale_openbadges_sluggy']
)
asc.return_value.get_all_launch_configurations.assert_called_once_with(
names=[u'fleeting_launchconfig_openbadges_sluggy']
)
ag.delete.assert_called_once_with()
lc.delete.assert_called_once_with()
@mock.patch('boto.connect_ec2')
def test_project_get_instances_works(self, connect_ec2):
create_mock_instance(connect_ec2)
proj = project.Project('openbadges')
self.assertEqual(proj.get_instances(), [{
'url': 'http://foo/',
'state': 'running',
'git_user': 'toolness',
'git_branch': 'experimentz',
'slug': 'sluggy',
'git_branch_url': 'https://github.com/toolness/openbadges/tree/experimentz',
'launch_time': '2013-04-29T11:53:42.000Z'
}])
@mock.patch('boto.connect_ec2')
def test_project_get_instances_adds_cache_entries(self, connect_ec2):
proj = project.Project('openbadges')
project.cache['fleeting:openbadges:foo'] = dict(
slug='foo',
state='pending',
lifetime=86400.0,
git_user='uzer',
git_branch='branchu'
)
self.assertEqual(proj.get_instances(), [{
'state': 'pending',
'git_user': 'uzer',
'git_branch': 'branchu',
'lifetime': 86400.0,
'slug': 'foo',
'git_branch_url': 'https://github.com/uzer/openbadges/tree/branchu'
}])
@mock.patch('boto.connect_ec2')
def test_project_get_instances_removes_cache_entries(self, connect_ec2):
create_mock_instance(connect_ec2)
proj = project.Project('openbadges')
project.cache['fleeting:openbadges:sluggy'] = dict(
slug='sluggy',
state='terminated'
)
self.assertEqual(proj.get_instances(), [])
| [
"[email protected]"
] | |
73214de4e0d90d7fdc56f9a97deb0efeb602ec96 | 9be5e8d96f49b3accb5e1bca2ab3565252f7d6d6 | /womens_health/Users/utils.py | 217f4f0097982695547945a69c5919ed6a60ab77 | [] | no_license | KIngSimeone/womens-health | 6dbebd24f022d101ffc39cfa1ef00446f8ab583d | e458d7d92aa9b05b8565d2b30f3ce384c203e001 | refs/heads/main | 2023-06-22T09:10:33.510602 | 2021-07-22T14:01:54 | 2021-07-22T14:01:54 | 387,877,171 | 0 | 0 | null | 2021-07-22T12:33:29 | 2021-07-20T18:13:26 | Python | UTF-8 | Python | false | false | 5,579 | py | import logging
import secrets
import uuid
from datetime import timedelta
from django.conf import settings
from django.contrib.auth.hashers import check_password, make_password
from django.core.exceptions import ObjectDoesNotExist
from django.db.models import Q
from django.utils import timezone
from .models import Patient, UserAccessTokens
# Get an instance of a logger
logger = logging.getLogger(__name__)
def getExpiresAt(minutes=None):
if not minutes:
return (timezone.now() + timedelta(minutes=eval(settings.DURATION)))
return (timezone.now() + timedelta(minutes=minutes))
def getPatientById(patientId):
"""return patient by id"""
try:
patient = Patient.objects.get(id=patientId)
return patient, "success"
except ObjectDoesNotExist as e:
logger.error(f"Patient with ID: {patientId} does not exist")
logger.error(e)
return None, str(e)
def getPatientByPhone(phone):
"""retrieve patient by phone"""
try:
patient = Patient.objects.get(phone=phone)
return patient
except ObjectDoesNotExist as e:
logger.error(f"Patient with phone: {phone} does not exist")
logger.error(e)
return None
def getPatientByEmail(email):
"""retrieve patient by email"""
try:
patient = Patient.objects.get(email=email)
return patient
except ObjectDoesNotExist as e:
logger.error(f"Patient with email: {email} does not exist")
logger.error(e)
return None
def getPatientByInputs(phone, email):
"""retrieve single patient record"""
if getPatientByPhone(phone) is not None:
return False, f"Staff with same phone already exists: {phone}"
if getPatientByEmail(email) is not None:
return False, f"Staff with email already exists: {email}"
return True, "success"
def createPatient(firstname, lastname, email, phone, password, birthday):
"""creating new patient"""
try:
patient = Patient.objects.create(
id=int(str(uuid.uuid4().int)[::6]),
first_name=firstname,
last_name=lastname,
phone=phone,
email=email,
password=make_password(password),
birthday=birthday
)
return patient, "success"
except Exception as e:
logger.error(
"createPatient@Error :: Error occurred while creating the patient")
logger.error(e)
return None, str(e)
def authenticateUser(userIdentity, password):
"""Authenticate user"""
try:
# retrieve user
users = Patient.objects.filter(
Q(phone__iexact=userIdentity) | Q(email__iexact=userIdentity))
if users.count() > 0:
existingUser = users[0]
# compare if both hashes are the same
if check_password(password, existingUser.password):
existingUser.last_active_on = timezone.now()
existingUser.save()
return existingUser
return None
except Exception as e:
logger.error(
"authenticateUser@Error :: Error occurred while authenticating user")
logger.error(e)
return None
def generateUserAccessToken(user):
"""Generate new access token for user"""
try:
userId = user.id
existingAccessToken = None
try:
# retrieve user access token record if it exists
existingAccessToken = UserAccessTokens.objects.get(user_id=userId)
# check if existingAccessToken hasn't expired
if existingAccessToken.expires_at > timezone.now():
return existingAccessToken.access_token, "success"
except ObjectDoesNotExist:
pass
# enroute to create a new access token
if existingAccessToken:
# delete existingToken before creating a new
existingAccessToken.delete()
# create a new record
generatedToken = secrets.token_urlsafe()
userAccessTokenRecord = UserAccessTokens(user_id=userId,
expires_at=getExpiresAt(),
access_token=generatedToken
)
# commit to DB
userAccessTokenRecord.save()
return generatedToken, 'success'
except Exception as e:
logger.error(
"generateUserAccessToken@Error :: Error occurred while generating user access token")
logger.error(e)
return None, str(e)
def getUserByAccessToken(accessToken):
"""Get user by the access token"""
try:
filteredTokens = UserAccessTokens.objects.filter(
access_token=accessToken)
if filteredTokens.count() > 0:
accessTokenRecord = filteredTokens[0]
if accessTokenRecord.expires_at > timezone.now():
associatedUserId = accessTokenRecord.user_id
associatedUser, msg = getPatientById(associatedUserId)
if associatedUser is not None:
associatedUser.last_active_on = timezone.now()
# push token expiry date forward
minutes = 60
accessTokenRecord.expires_at = getExpiresAt(minutes)
accessTokenRecord.save()
return associatedUser
return None
except Exception as e:
logger.error('getUserByAccessToken@Error')
logger.error(e)
return None
| [
"[email protected]"
] | |
970c77b0a70cde03ffb2411a4d0c339de9a5d195 | 052a3e59d6d28ef2c0c7af16cb43ab5dd8a122ee | /excelbt/vbproject.py | 7f26661d22684f0bd6f380bbd765838e78dfeb6b | [] | no_license | feisuo/excelbt | fef48a70358963c21f229894a2734d358777b943 | 92b160aaa384de9b874a77a23cc879e91692bc77 | refs/heads/master | 2021-05-27T01:07:30.444975 | 2012-01-12T02:07:32 | 2012-01-12T02:07:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,007 | py | import os
class Module(object):
extension = '.bas'
def __init__(self, name, code):
self.name = name
self.code = code
def export(self, path):
destination = os.path.join(path, self.filename)
file(destination, 'w+').write(self.code)
return self.filename
@property
def filename(self):
return self.name + self.extension
class ClassModule(Module):
extension = '.cls'
class VBProject(object):
def __init__(self, modules=None):
self.modules = modules or []
self.references = []
def add_module(self, module):
assert module not in self.modules
self.modules.append(module)
def add_reference(self, guid, major, minor):
self.references.append((guid, major, minor))
def export(self, path):
assert os.path.exists(path) and os.path.isdir(path)
filenames = []
for module in self.modules:
filenames.append(module.export(path))
return filenames
| [
"[email protected]"
] | |
874f1c92431eb6ea589aecf23feb98ea1d7aa4a2 | 757360a33181882095a48598eab8ea16e4b7c909 | /bs4/__init__.py | 18cec686309165e8f65c05229c147d934213b712 | [] | no_license | ardentras/topwallpaper | e011688fdd0d9ad5da33bf7ebb35c46a452a3276 | c5c947f75ed746d538121abc95b6fa2af648c8ab | refs/heads/master | 2021-01-18T21:19:12.176814 | 2016-04-22T16:27:41 | 2016-04-22T16:27:41 | 34,496,417 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,878 | py | """Beautiful Soup
Elixir and Tonic
"The Screen-Scraper's Friend"
http://www.crummy.com/software/BeautifulSoup/
Beautiful Soup uses a pluggable XML or HTML parser to parse a
(possibly invalid) document into a tree representation. Beautiful Soup
provides provides methods and Pythonic idioms that make it easy to
navigate, search, and modify the parse tree.
Beautiful Soup works with Python 2.6 and up. It works better if lxml
and/or html5lib is installed.
For more than you ever wanted to know about Beautiful Soup, see the
documentation:
http://www.crummy.com/software/BeautifulSoup/bs4/doc/
"""
__author__ = "Leonard Richardson ([email protected])"
__version__ = "4.1.0"
__copyright__ = "Copyright (c) 2004-2012 Leonard Richardson"
__license__ = "MIT"
__all__ = ['BeautifulSoup']
import re
import warnings
from .builder import builder_registry
from .dammit import UnicodeDammit
from .element import (
CData,
Comment,
DEFAULT_OUTPUT_ENCODING,
Declaration,
Doctype,
NavigableString,
PageElement,
ProcessingInstruction,
ResultSet,
SoupStrainer,
Tag,
)
# The very first thing we do is give a useful error if someone is
# running this code under Python 3 without converting it.
syntax_error = 'You are trying to run the Python 2 version of Beautiful Soup under Python 3. This will not work. You need to convert the code, either by installing it (`python setup.py install`) or by running 2to3 (`2to3 -w bs4`).'
class BeautifulSoup(Tag):
"""
This class defines the basic interface called by the tree builders.
These methods will be called by the parser:
reset()
feed(markup)
The tree builder may call these methods from its feed() implementation:
handle_starttag(name, attrs) # See note about return value
handle_endtag(name)
handle_data(data) # Appends to the current data node
endData(containerClass=NavigableString) # Ends the current data node
No matter how complicated the underlying parser is, you should be
able to build a tree using 'start tag' events, 'end tag' events,
'data' events, and "done with data" events.
If you encounter an empty-element tag (aka a self-closing tag,
like HTML's <br> tag), call handle_starttag and then
handle_endtag.
"""
ROOT_TAG_NAME = '[document]'
# If the end-user gives no indication which tree builder they
# want, look for one with these features.
DEFAULT_BUILDER_FEATURES = ['html', 'fast']
# Used when determining whether a text node is all whitespace and
# can be replaced with a single space. A text node that contains
# fancy Unicode spaces (usually non-breaking) should be left
# alone.
STRIP_ASCII_SPACES = {9: None, 10: None, 12: None, 13: None, 32: None, }
def __init__(self, markup="", features=None, builder=None,
parse_only=None, from_encoding=None, **kwargs):
"""The Soup object is initialized as the 'root tag', and the
provided markup (which can be a string or a file-like object)
is fed into the underlying parser."""
if 'convertEntities' in kwargs:
warnings.warn(
"BS4 does not respect the convertEntities argument to the "
"BeautifulSoup constructor. Entities are always converted "
"to Unicode characters.")
if 'markupMassage' in kwargs:
del kwargs['markupMassage']
warnings.warn(
"BS4 does not respect the markupMassage argument to the "
"BeautifulSoup constructor. The tree builder is responsible "
"for any necessary markup massage.")
if 'smartQuotesTo' in kwargs:
del kwargs['smartQuotesTo']
warnings.warn(
"BS4 does not respect the smartQuotesTo argument to the "
"BeautifulSoup constructor. Smart quotes are always converted "
"to Unicode characters.")
if 'selfClosingTags' in kwargs:
del kwargs['selfClosingTags']
warnings.warn(
"BS4 does not respect the selfClosingTags argument to the "
"BeautifulSoup constructor. The tree builder is responsible "
"for understanding self-closing tags.")
if 'isHTML' in kwargs:
del kwargs['isHTML']
warnings.warn(
"BS4 does not respect the isHTML argument to the "
"BeautifulSoup constructor. You can pass in features='html' "
"or features='xml' to get a builder capable of handling "
"one or the other.")
def deprecated_argument(old_name, new_name):
if old_name in kwargs:
warnings.warn(
'The "%s" argument to the BeautifulSoup constructor '
'has been renamed to "%s."' % (old_name, new_name))
value = kwargs[old_name]
del kwargs[old_name]
return value
return None
parse_only = parse_only or deprecated_argument(
"parseOnlyThese", "parse_only")
from_encoding = from_encoding or deprecated_argument(
"fromEncoding", "from_encoding")
if len(kwargs) > 0:
arg = list(kwargs.keys()).pop()
raise TypeError(
"__init__() got an unexpected keyword argument '%s'" % arg)
if builder is None:
if isinstance(features, str):
features = [features]
if features is None or len(features) == 0:
features = self.DEFAULT_BUILDER_FEATURES
builder_class = builder_registry.lookup(*features)
if builder_class is None:
raise ValueError(
"Couldn't find a tree builder with the features you "
"requested: %s. Do you need to install a parser library?"
% ",".join(features))
builder = builder_class()
self.builder = builder
self.is_xml = builder.is_xml
self.builder.soup = self
self.parse_only = parse_only
self.reset()
if hasattr(markup, 'read'): # It's a file-type object.
markup = markup.read()
(self.markup, self.original_encoding, self.declared_html_encoding,
self.contains_replacement_characters) = (
self.builder.prepare_markup(markup, from_encoding))
try:
self._feed()
except StopParsing:
pass
# Clear out the markup and remove the builder's circular
# reference to this object.
self.markup = None
self.builder.soup = None
def _feed(self):
# Convert the document to Unicode.
self.builder.reset()
self.builder.feed(self.markup)
# Close out any unfinished strings and close all the open tags.
self.endData()
while self.currentTag.name != self.ROOT_TAG_NAME:
self.popTag()
def reset(self):
Tag.__init__(self, self, self.builder, self.ROOT_TAG_NAME)
self.hidden = 1
self.builder.reset()
self.currentData = []
self.currentTag = None
self.tagStack = []
self.pushTag(self)
def new_tag(self, name, namespace=None, nsprefix=None, **attrs):
"""Create a new tag associated with this soup."""
return Tag(None, self.builder, name, namespace, nsprefix, attrs)
def new_string(self, s):
"""Create a new NavigableString associated with this soup."""
navigable = NavigableString(s)
navigable.setup()
return navigable
def insert_before(self, successor):
raise ValueError("BeautifulSoup objects don't support insert_before().")
def insert_after(self, successor):
raise ValueError("BeautifulSoup objects don't support insert_after().")
def popTag(self):
tag = self.tagStack.pop()
#print "Pop", tag.name
if self.tagStack:
self.currentTag = self.tagStack[-1]
return self.currentTag
def pushTag(self, tag):
#print "Push", tag.name
if self.currentTag:
self.currentTag.contents.append(tag)
self.tagStack.append(tag)
self.currentTag = self.tagStack[-1]
def endData(self, containerClass=NavigableString):
if self.currentData:
currentData = ''.join(self.currentData)
if (currentData.translate(self.STRIP_ASCII_SPACES) == '' and
not set([tag.name for tag in self.tagStack]).intersection(
self.builder.preserve_whitespace_tags)):
if '\n' in currentData:
currentData = '\n'
else:
currentData = ' '
self.currentData = []
if self.parse_only and len(self.tagStack) <= 1 and \
(not self.parse_only.text or \
not self.parse_only.search(currentData)):
return
o = containerClass(currentData)
self.object_was_parsed(o)
def object_was_parsed(self, o):
"""Add an object to the parse tree."""
o.setup(self.currentTag, self.previous_element)
if self.previous_element:
self.previous_element.next_element = o
self.previous_element = o
self.currentTag.contents.append(o)
def _popToTag(self, name, nsprefix=None, inclusivePop=True):
"""Pops the tag stack up to and including the most recent
instance of the given tag. If inclusivePop is false, pops the tag
stack up to but *not* including the most recent instqance of
the given tag."""
#print "Popping to %s" % name
if name == self.ROOT_TAG_NAME:
return
numPops = 0
mostRecentTag = None
for i in range(len(self.tagStack) - 1, 0, -1):
if (name == self.tagStack[i].name
and nsprefix == self.tagStack[i].nsprefix == nsprefix):
numPops = len(self.tagStack) - i
break
if not inclusivePop:
numPops = numPops - 1
for i in range(0, numPops):
mostRecentTag = self.popTag()
return mostRecentTag
def handle_starttag(self, name, namespace, nsprefix, attrs):
"""Push a start tag on to the stack.
If this method returns None, the tag was rejected by the
SoupStrainer. You should proceed as if the tag had not occured
in the document. For instance, if this was a self-closing tag,
don't call handle_endtag.
"""
# print "Start tag %s: %s" % (name, attrs)
self.endData()
if (self.parse_only and len(self.tagStack) <= 1
and (self.parse_only.text
or not self.parse_only.search_tag(name, attrs))):
return None
tag = Tag(self, self.builder, name, namespace, nsprefix, attrs,
self.currentTag, self.previous_element)
if tag is None:
return tag
if self.previous_element:
self.previous_element.next_element = tag
self.previous_element = tag
self.pushTag(tag)
return tag
def handle_endtag(self, name, nsprefix=None):
#print "End tag: " + name
self.endData()
self._popToTag(name, nsprefix)
def handle_data(self, data):
self.currentData.append(data)
def decode(self, pretty_print=False,
eventual_encoding=DEFAULT_OUTPUT_ENCODING,
formatter="minimal"):
"""Returns a string or Unicode representation of this document.
To get Unicode, pass None for encoding."""
if self.is_xml:
# Print the XML declaration
encoding_part = ''
if eventual_encoding != None:
encoding_part = ' encoding="%s"' % eventual_encoding
prefix = '<?xml version="1.0"%s?>\n' % encoding_part
else:
prefix = ''
if not pretty_print:
indent_level = None
else:
indent_level = 0
return prefix + super(BeautifulSoup, self).decode(
indent_level, eventual_encoding, formatter)
class BeautifulStoneSoup(BeautifulSoup):
"""Deprecated interface to an XML parser."""
def __init__(self, *args, **kwargs):
kwargs['features'] = 'xml'
warnings.warn(
'The BeautifulStoneSoup class is deprecated. Instead of using '
'it, pass features="xml" into the BeautifulSoup constructor.')
super(BeautifulStoneSoup, self).__init__(*args, **kwargs)
class StopParsing(Exception):
pass
#By default, act as an HTML pretty-printer.
if __name__ == '__main__':
import sys
soup = BeautifulSoup(sys.stdin)
print((soup.prettify()))
| [
"[email protected]"
] | |
cef6f8e32412557bc2218e65e7bd40ed189bad42 | 15da50a471edf8517f5ddb34ee7ddec374ba2a19 | /LNC_fileproc.py | 8118e9643ad0e21dbffadd634c05faafba06c754 | [] | no_license | nchaparr/LNCcode | 4489f1333e53e0899c67417ca1e2e42ebbcd62d9 | e22c8d32f92523eed3f7aa3734e342e42ec3835f | refs/heads/master | 2021-01-24T02:34:12.002693 | 2013-08-16T20:24:51 | 2013-08-16T20:24:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,208 | py | import pandas as pan
import numpy as np
import os,sys
import LNC_tools as LNC
#----------------------------------------------------------------------------
#Uses tools created in LNC_tools to open all files in a folder and resample
#them to a regular spacing in altitude/date the concatenates them into one
#pandas dataframe and plots it using LNC_plot
#July 05, 2012
#----------------------------------------------------------------------------
olddir = os.getcwd()
#os.chdir('K:\CORALNet\Data\ASCII Files')
newdir = LNC.set_dir('Select Event Folder')
os.chdir(newdir)
files = os.listdir(newdir)
maskfiles = []
datafiles = []
procfiles = []
rawfiles = []
#set altitude range and date step sizes
altrange = np.arange(10,10010,10)#meters
timestep = '120S' #seconds
#set buffer around backscatter ratio of 1 for mask
delta = 0.1
#check to see if each file has been processed before and separate processed
#files into a new list
for f in files:
if '_proc' in f or '.pickle' in f:
procfiles.append(f)
elif '.txt' in f:
rawfiles.append(f)
#search through list of files to separate fields to be used as a mask from those
#with data to be plotted
#initially, mask files are designated BR1064 for 1064nm Backscatter Ratio
for f in rawfiles:
if 'BR' in f:
maskfiles.append(f)
else:
datafiles.append(f)
#make sure the files are in a common order of ascending date (assuming they're all
#from the same station
maskfiles.sort()
datafiles.sort()
#first check to make sure the same number of files in each list
if len(maskfiles) != len(datafiles):
sys.exit("Error: Mask files don't match data files")
#double check to make sure the mask files match up with the data files
for d,m in zip(datafiles, maskfiles):
[d_stat,d_date,d_type] = d.split('_')
[m_stat,m_date,m_type] = m.split('_')
print 'Checking mask/data match for %s'%(d_date)
if d_date == m_date and d_stat == m_stat:
print 'Check!'
continue
else:
sys.exit("Error: Mask files don't match data files")
#open, altitude resample, and concatenate data and mask files
for d,m in zip(datafiles, maskfiles):
d_temp, data_prod = LNC.lnc_reader(d)
d_realt = LNC.alt_resample(d_temp,altrange)
try:
d_event = pan.concat([d_event,d_realt])
except NameError:
d_event = d_realt
m_temp, data_prod = LNC.lnc_reader(m)
m_realt = LNC.alt_resample(m_temp,altrange)
try:
m_event = pan.concat([m_event,m_realt])
except NameError:
m_event = m_realt
#sort by index to make certain data is in order then set date ranges to match
d_event = d_event.sort_index()
m_event = m_event.sort_index()
start = m_event.index[0]
end = m_event.index[-1]
d_event = LNC.time_resample(d_event,timestep, timerange = [start,end])
m_event = LNC.time_resample(m_event,timestep,timerange = [start,end])
dfmask = LNC.BR_mask(m_event,d_event, delta)
d_filename = datafiles[0].split('.')[0]+'-'+datafiles[-1].split('.')[0]
d_event.save(d_filename+'.pickle')
m_filename = maskfiles[0].split('.')[0]+'-'+maskfiles[-1].split('.')[0]
m_event.save(m_filename+'.pickle')
dfmask.save(d_filename+'_masked.pickle')
| [
"[email protected]"
] | |
bdb0641a40b0b50cb222d0ca0f4a2167376a00fb | 90943dc7929bb1d194d7d669501602a6e63279c2 | /yourprojectname/app/__init__.py | d11e491b89315809fe7ee41bfc08334b21a210a1 | [] | no_license | Devical/Python3-Flask-Min-Setup | 151214647aa24714780436b1a3e564051e806080 | 819a03904c0e17ad48994baf869f6d970dbe5663 | refs/heads/master | 2020-06-01T10:59:36.315154 | 2014-12-31T18:44:55 | 2014-12-31T18:44:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 275 | py | #! /usr/bin/python3
from flask import Flask
app = Flask(__name__)
@app.route('/')
def index():
return '<h1>Hello World!</h1>'
@app.route('/user/<name>')
def user(name):
return '<h1>Hello, {0}!</h1>'.format(name)
if __name__ == "__main__":
app.run(debug=True)
| [
"[email protected]"
] | |
c8c6e809b8392b49a2578267da1a5e6a9aceaec6 | 2fa471b4d7f2c7b0838dc8af4cc2611b6f244153 | /community/migrations/0003_communities_communitycreationdate.py | 62524730a2f11479d9f9e79bd736921b2156e5cc | [
"MIT"
] | permissive | akarakoc/Communityverse | c50973084c2afc43a64ea1f719375d4eb69317c4 | 73ecf51eae3f96cca865e0d7cc526b92c8ad6b5e | refs/heads/master | 2020-08-02T20:24:22.186385 | 2019-12-15T15:31:16 | 2019-12-15T15:31:16 | 211,496,677 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 412 | py | # Generated by Django 2.2.6 on 2019-11-07 09:31
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('community', '0002_communities_communitycreator'),
]
operations = [
migrations.AddField(
model_name='communities',
name='communityCreationDate',
field=models.DateField(null=True),
),
]
| [
"[email protected]"
] | |
ead4df863d115de2176526d1bd7a0cc164650f5c | 385005433e8a6fedc1114ea9a394842e9da89db7 | /airport_test/database/jokes/t.py | 86fa640c1d5d2e40064cbd61fc6fa1c66d350f37 | [] | no_license | oluwayetty/hri-for-airport | d2a575923d9568cb24efb9583a1bd34099862256 | fd3067e86a5e6f865e6294cccd832c9805051b60 | refs/heads/main | 2023-06-05T21:32:15.837106 | 2021-06-22T13:30:56 | 2021-06-22T13:30:56 | 379,263,335 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 331 | py | with open('j1.txt','r') as f:
jokes = []
s=''
for line in f:
if line.startswith('----'):
jokes.append(s)
s=''
else:
s+=line
print(jokes)
print(len(jokes))
print('\n\n\n\n')
for i in jokes:
print(i)
print('-------------------------')
| [
"[email protected]"
] | |
b8f36b1cd13f973bdc227c130d5ce47cd7c258d5 | a9ae784983f4bd9599d522d5c93d3447081b75b4 | /Tic_Tac_Toe_Board.py | 8544ae0e97561cc9ff605fb6a0cccef2c1e18465 | [] | no_license | arielmil/MinMaxTicTacToe | 6745c241c9436beda4044897178403cc65de4ed2 | 30ef96ddf0e12a312677a1f765dacafb4b287335 | refs/heads/main | 2023-08-10T16:32:17.375116 | 2021-09-30T05:11:17 | 2021-09-30T05:11:17 | 411,929,797 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,259 | py | import copy
class Board():
def __init__(self):
self.board = []
self.rows, self.cols = (3, 3)
self.x = 1
self.O = 0
self.none = -1
self.createBoard()
def posIsValid(self, pos):
(row, col) = (pos["row"], pos["col"])
if (row >= 0 and row <= 2 and col >= 0 and col <= 2):
return True
return False
def createBoard(self):
self.board = [[-1 for i in range(self.rows)] for j in range(self.cols)]
def checkWinner(self):
winner = False
i, j = (0, 0)
rows = cols = [0,1,2]
for row in rows:
pos = {"row": row}
horizontalWin = self.horizontalWin(pos)
if (horizontalWin == 1):
return 1
elif (horizontalWin == 0):
return 0
for col in cols:
pos = {"col": col}
verticalWin = self.verticalWin(pos)
if (self.verticalWin(pos) == 1):
return 1
elif (horizontalWin == 0):
return 0
diagonalURDLWin = self.diagonalURDLWin()
diagonalULDRWin = self.diagonalULDRWin()
if (diagonalURDLWin == 1 or diagonalULDRWin == 1):
return 1
elif (diagonalURDLWin == 0 or diagonalULDRWin == 0):
return 0
return -1
def diagonalULDRWin(self):
board = self.board
if (board[0][0] == board[1][1] and board[1][1] == board[2][2] and board[2][2] == "X"):
return 1
elif (board[0][0] == board[1][1] and board[1][1] == board[2][2] and board[2][2] == "O"):
return 0
return -1
def diagonalURDLWin(self):
board = self.board
if (board[0][2] == board[1][1] and board[1][1] == board[2][0] and board[2][0] == "X"):
return 1
elif (board[0][2] == board[1][1] and board[1][1] == board[2][0] and board[2][0] == "O"):
return 0
return -1
def verticalWin(self, pos):
board = self.board
col = pos["col"]
if (board[0][col] == board[1][col] and board[1][col] == board[2][col] and board[2][col] == "X"):
return 1
elif (board[0][col] == board[1][col] and board[1][col] == board[2][col] and board[2][col] == "O"):
return 0
return -1
def horizontalWin(self, pos):
board = self.board
row = pos["row"]
if (board[row][0] == board[row][1] and board[row][1] == board[row][2] and board[row][2] == "X"):
return 1
elif (board[row][0] == board[row][1] and board[row][1] == board[row][2] and board[row][2] == "O"):
return 0
return -1
def copyBoard(self, board):
self.board = board
def deepCopyBoard(self):
board = copy.deepcopy(self)
return board
#Ver e concertar
def potentialWins(self, symbol, pos):
potentialWins = 0
(row, col) = (pos["row"], pos["col"])
if (self.board[row][col] == symbol):
if (row, col) in [(0, 0), (2, 2)]:
potentialWins = diagonalULDRWin(symbol)
elif (row, col) in [(2, 0), (0, 2)]:
potentialWins = diagonalURDLWin(symbol)
elif (row, col) == (1 ,1):
potentialWins = diagonalULDRWin(symbol) + diagonalURDLWin(symbol)
potentialWins = potentialWins + verticalWins(symbol, pos) + horizontalWins(symbol, pos)
return potentialWins
def getEmptyPositions(self):
emptyPositions = []
i = 0
j = 0
for row in self.board:
for pos in row:
if pos == -1:
emptyPositions.append({"row": i, "col": j})
j += 1
i += 1
j = 0
return emptyPositions
def makeMove(self, pos, symbol, comPlays=True):
if comPlays:
self.board[pos["row"]][pos["col"]] = symbol
else:
if self.posIsValid(pos):
self.board[pos["row"]][pos["col"]] = symbol
else:
return -1
return symbol
def unMakeMove(self, pos, comPlays=True):
if comPlays:
self.board[pos["row"]][pos["col"]] = -1
else:
if self.posIsValid(pos):
self.board[pos["row"]][pos["col"]] = -1
else:
return -1
return 1
def symbolToInt(symbol):
if symbol == "X":
return 1
else:
return 0
def printBoard(self):
for row in self.board:
print(row)
def testCheckWinner():
board = Board()
boardX = [[-1 for i in range(board.rows)] for j in range(board.cols)]
boardO = [[-1 for i in range(board.rows)] for j in range(board.cols)]
boardsX = []
boardsO = []
for i in range(8):
boardsX.append(copy.deepcopy(boardX))
boardsO.append(copy.deepcopy(boardO))
for i in range(3):
boardsX[i][i][0] = boardsX[i][i][1] = boardsX[i][i][2] = 1
boardsO[i][i][0] = boardsO[i][i][1] = boardsO[i][i][2] = 0
for i in range(3):
boardsX[i + 3][0][i] = boardsX[i + 3][1][i] = boardsX[i + 3][2][i] = 1
boardsO[i + 3][0][i] = boardsO[i + 3][1][i] = boardsO[i + 3][2][i] = 0
i = i + 3
boardsX[i + 1][0][0] = boardsX[i + 1][1][1] = boardsX[i + 1][2][2] = 1
boardsO[i + 1][0][0] = boardsO[i + 1][1][1] = boardsO[i + 1][2][2] = 0
boardsX[i + 2][0][2] = boardsX[i + 2][1][1] = boardsX[i + 2][2][0] = 1
boardsO[i + 2][0][2] = boardsO[i + 2][1][1] = boardsO[i + 2][2][0] = 0
for boardX in boardsX:
board.copyBoard(boardX)
i = 0
for row in boardX:
print("%d: " %i, row)
i = i + 1
print("winner's symbol:", board.checkWinner(board.x), "\n\n")
for boardO in boardsO:
board.copyBoard(boardO)
i = 0
for row in boardO:
print("%d: " %i, row)
i = i + 1
print("winner's symbol:", board.checkWinner(board.O), "\n\n")
| [
"[email protected]"
] | |
aa966b5418da98a57aaaac4b31513de16276c42f | f5129176c7e06f42821d9d8cefcb7223c0819d8e | /atma/web/migrations/0009_project_strategy.py | 517096d3ececede99c9083663eeef2c5de90a2cb | [] | no_license | Mumbai2016/team-12 | b6cceb35485eba497a4896f8ca64a12eaf41d2f1 | a986b5fa72a01304015fdc7f6810fb7d931d474e | refs/heads/master | 2020-12-25T20:20:38.536884 | 2016-07-24T05:57:48 | 2016-07-24T05:57:48 | 63,882,298 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 485 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2016-07-23 22:22
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('web', '0008_remove_project_strategy'),
]
operations = [
migrations.AddField(
model_name='project',
name='strategy',
field=models.TextField(default=''),
preserve_default=False,
),
]
| [
"[email protected]"
] | |
328aee9cbe3266d0570cbf5c3316cc3652682c4f | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/452/usersdata/295/103617/submittedfiles/avenida.py | 6649f5b8ca7a93bb93757f04563ecc17f38f6018 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 503 | py | # -*- coding: utf-8 -*-
matriz = []
m = int(input("Digite um numero de quadras n-s:"))
n = int(input("Digite um numero de quadras l-s:"))
i = 0
j = 0
if 0<= m <=1000:
if 0<= i <=m-1:
if 2<= n <=1000:
if 0<= j <=n-1:
print("")
for i in range(0,m,1):
linha = []
for j in range(0,n,1):
linha.append(int(input("Digite o valor %dº elemento da %dª lista: "%((j+1),(i+1)))))
matriz.append(linha)
print(matriz)
| [
"[email protected]"
] | |
b1a97c6931a958d1bf2737730543b6ae7c285b35 | 190784e8861610e45eece0d617b113bf40840f28 | /ros_wkspace_asgn2/build/assignment2/robot_sim/catkin_generated/pkg.installspace.context.pc.py | e31dc2befd983f304e4ec379ec24a344cb8831ce | [] | no_license | liulc006/Robotics | 2d3d55627c1a67377bc1bb2af47bdd4f9eaf629a | 02d3ef92fcdd649db96e6574e63f7644c90c9253 | refs/heads/main | 2023-02-11T05:15:58.452570 | 2021-01-08T17:41:54 | 2021-01-08T17:41:54 | 314,352,000 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 474 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "${prefix}/include".split(';') if "${prefix}/include" != "" else []
PROJECT_CATKIN_DEPENDS = "roscpp;sensor_msgs;trajectory_msgs;urdf".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-lrobot_sim".split(';') if "-lrobot_sim" != "" else []
PROJECT_NAME = "robot_sim"
PROJECT_SPACE_DIR = "/home/luca/ros_wkspace_asgn2/install"
PROJECT_VERSION = "0.0.0"
| [
"[email protected]"
] | |
81327a2d889baeea4ad08944447ea99e61378163 | 7ff7b9f403f8c44588bc432c9cf81703dbadc149 | /flaskr/venv/bin/pyrsa-keygen | c7a1ba23b6c856ea5ee535bb602398e43fd31edb | [] | no_license | Dorisvdmeer/pythonAdventure | 32168b558208e4c9ebdac496a66fdb4d19e38999 | 29065ceacbf4b02cf5e01878a513e7dcf7f8cbe8 | refs/heads/master | 2020-06-05T04:10:59.562396 | 2019-06-26T07:41:54 | 2019-06-26T07:41:54 | 189,609,165 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 286 | #!/Users/dorisvandermeer/Documents/pythonAdventure/flask-tutorial/flaskr/venv/bin/python3.7
# -*- coding: utf-8 -*-
import re
import sys
from rsa.cli import keygen
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(keygen())
| [
"[email protected]"
] | ||
fc5fb76db405b511c747bebda2cd8a8bdb7d96f9 | 416ea1127f3e3a1a8e64dd980e59c7bf585379a0 | /number_writer.py | 2414d38b37368645071209585e62b1ef0d871232 | [] | no_license | jocogum10/learning_python_crash_course | 6cf826e4324f91a49da579fb1fcd3ca623c20306 | c159d0b0de0be8e95eb8777a416e5010fbb9e2ca | refs/heads/master | 2020-12-10T02:55:40.757363 | 2020-01-13T01:22:44 | 2020-01-13T01:22:44 | 233,486,206 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 140 | py | import json
numbers = [2,3,5,7,11,13]
filename = 'numbers.json'
with open(filename, 'w') as file_object:
json.dump(numbers, file_object)
| [
"[email protected]"
] | |
78f29afec01ed1c12af51264ebf5fbc122dbe04a | 9c70737e06c4da1aa541f3a0ef31a325881a3d9b | /DatabaseSyncAgent.py | edd1174ec25e28cc4105c2e7df65c3038b90468b | [] | no_license | zheksy/WFAssistant | a45001fae324b0bfee08695352d1f424918c8813 | c8d1923dd473048661e02fa8a57667543fa9d7b2 | refs/heads/main | 2023-06-04T06:18:25.274920 | 2021-04-13T22:06:24 | 2021-04-13T22:07:11 | 379,887,446 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 851 | py | #!/usr/bin/python3
import time
import random
import datetime
import WayfarerDataManager as wfdm
import json
import requests
from datetime import date
WAYFARER_JSON_PATH = './reviews.json'
DATABASE_PATH = './database.json'
REMOTE_DB_URL = 'https://api.jsonbin.io/v3/b/604faf7e7ea6546cf3dee383'
def main():
dbUrl = REMOTE_DB_URL + "/latest"
wfdm.pull_database_json(dbUrl,DATABASE_PATH)
unprocessedData = wfdm.parse_json(WAYFARER_JSON_PATH)
print(unprocessedData)
print("\n===============================\n")
processedData = wfdm.update_local_database(unprocessedData,DATABASE_PATH)
#processedData = wfdm.remove_outdated_reviews(processedData,30)
print(processedData)
print("\n===============================\n")
wfdm.sync_remote_database(REMOTE_DB_URL,processedData)
if __name__ == "__main__":
main() | [
"[email protected]"
] | |
d056d27f5ef63960f7fb05326219de4a8375c06e | eaa9dda8d05187ce29c6498c2b7da8679f98be95 | /resource_check.py | c7310f61e309699eca13fa13e340e9066e18e3ab | [] | no_license | bdeegan-r7/Divvy_scripts | 1fc96ec9488d0ef12af9d526f07d15dca21fb1d6 | 10f8a55d9b822e812dccdadb7ab0db336636faf3 | refs/heads/main | 2023-04-29T20:11:47.653596 | 2021-05-20T15:36:22 | 2021-05-20T15:36:22 | 384,163,904 | 0 | 1 | null | 2021-07-08T15:08:00 | 2021-07-08T15:08:00 | null | UTF-8 | Python | false | false | 3,110 | py | # Check if a resource has showed up in DivvyCloud. If it has, check to see if there are any compliance violations on it.
# Currently this looks for any insights but filtering in check_for_violations() can pull out specific insights if needed
import json
import requests
import getpass
import time
requests.packages.urllib3.disable_warnings() # verify=False throws warnings otherwise
# Instance ID and region for the instance
instance_id = "i-003014f07c517c6abc"
instance_region = "us-east-1"
# Username/password to authenticate against the API
username = ""
password = "" # Leave this blank if you don't want it in plaintext and it'll prompt you to input it when running the script.
# API URL
base_url = "https://sales-demo.divvycloud.com"
# Param validation
if not username:
username = input("Username: ")
if not password:
passwd = getpass.getpass('Password:')
else:
passwd = password
if not base_url:
base_url = input("Base URL (EX: http://localhost:8001 or http://45.59.252.4:8001): ")
# Full URL
login_url = base_url + '/v2/public/user/login'
# Shorthand helper function
def get_auth_token():
response = requests.post(
url=login_url,
verify=False,
data=json.dumps({"username": username, "password": passwd}),
headers={
'Content-Type': 'application/json;charset=UTF-8',
'Accept': 'application/json'
})
return response.json()['session_id']
auth_token = get_auth_token()
headers = {
'Content-Type': 'application/json;charset=UTF-8',
'Accept': 'application/json',
'X-Auth-Token': auth_token
}
# Get Reource info
def check_for_resource(resource_id):
data = {}
response = requests.get(
url=base_url + '/v2/public/resource/' + resource_id + '/detail',
data=json.dumps(data),
verify=False,
headers=headers
)
# If the resource isn't seen, it'll return a 404. If we see it (200), return success
if response.status_code != 200:
return
else:
return True
# Get Instance violation info. It'll be returned in an array if there are any
def check_for_violation(resource_id):
data = {}
response = requests.get(
url=base_url + '/v2/public/insights/' + resource_id + '/violations/get',
data=json.dumps(data),
verify=False,
headers=headers
)
return response.json()
resource_id = "instance:1:" + instance_region + ":" + instance_id + ":"
# Check if the resource is showing in divvycloud yet
while True:
found_resource = check_for_resource(resource_id)
if found_resource:
print("Found resource. Checking for compliance")
break
else:
print("Resource not found yet. Sleeping for 60")
time.sleep(60)
# Check for insight violations
instance_violations = check_for_violation(resource_id)
if instance_violations:
print("======== Violations found for the insight ========")
for violation in instance_violations:
print(violation['name'])
if not instance_violations:
print("No violations found on the instance") | [
"[email protected]"
] | |
4eacae07fc0db896acc89681bebd4f869bb7091c | 84179e28f7a8237ca109dfa725e75743227562b4 | /src/backend/analytics/MissionAnalysis.py | 347cd96d6f0f6d47d0f230d5a549a1e4d81acf6f | [] | no_license | O1sims/Pax | b65a3dc060c4676fdcefefb8cf2f49e718c64931 | aee9661c07429697b836f7140c76648eca7fa9d4 | refs/heads/master | 2022-12-12T09:16:32.024143 | 2018-10-19T07:55:52 | 2018-10-19T07:55:52 | 141,993,395 | 4 | 1 | null | 2022-11-22T16:47:33 | 2018-07-23T09:37:59 | Python | UTF-8 | Python | false | false | 3,093 | py | import os
import pandas as pd
import pymongo as pm
import datetime as datetime
import numpy.random as nprnd
from Risk import Risk
_author_ = 'Owen Sims ([email protected])'
"""
A set of functions used to infer information about the mission from the mission
network data, supplied by Risk Aware.
:Note: Vulnerabilities --> Software --> Hardware --> Networks
|---------- Actions ---------||-- Tasks --||--- Effects ---|
"""
class MissionAnalysis:
def __init__(self, network_data, current_risk_scores):
"""
Initiate the data to be passed on to functions within the class.
"""
self.current_risk_scores = current_risk_scores
self.node_list_df = pd.DataFrame(network_data['nodes'], columns=['id', 'name', 'type', 'facility', 'summary'])
self.arc_list_df = pd.DataFrame(network_data['edges'], columns=['source', 'target'])
def get_mission_objectives(self, mission_objective_label='Vignette'):
"""
A function used to gather all mission objectives. This will be given by
the mission graph, supplied by Risk Aware.
"""
objectives = []
mission_objectives = self.node_list_df[self.node_list_df['type'] == mission_objective_label]['name'].values
for i in range(len(mission_objectives)):
objective_information = {}
objective_information.update({
'missionObjective': mission_objectives[i],
'missionRiskTimeline': self.get_mission_objective_risk(time_periods=50)
})
objectives.append(objective_information)
return objectives
def get_mission_objective_risk(self, time_periods):
"""
Collect the historical risk profile attached to the mission objective.
"""
mission_objective_risk = []
for i in range(time_periods):
if i == 0:
risk_score = max(self.current_risk_scores)
else:
risk_score = nprnd.randint(100, size=1)[0]
mission_objective_risk.append({
'timestamp': int(datetime.date.today().strftime("%s")) - (60 * 60 * 24 * i),
'risk_score': risk_score,
'risk_label': Risk(risk_score = risk_score).get_risk_label()
})
return mission_objective_risk
def get_all_mission_data(self):
mission_data = self.get_mission_objectives()
return mission_data
def get_post_all_mission_data(self, mongo_port=int(os.environ.get('DB_PORT')), mongo_collecton="missions",
mongo_database=os.environ.get('DB_NAME'), mongo_host=os.environ.get('DB_HOSTNAME')):
mission_data = self.get_all_mission_data()
mongo_connection = pm.MongoClient('mongodb://' + mongo_host + ':' + str(mongo_port) + '/')[mongo_database][mongo_collecton]
mongo_connection.drop()
mission_data[0].update({
'missionId': 'M' + str(mongo_connection.count())
})
mongo_result = mongo_connection.insert(mission_data)
return mongo_result
| [
"[email protected]"
] | |
a8c02aff72c33917ded6e08109c456d97687e6ab | e68960a0924496da1b7999709d19bc93a0166f7e | /run_cqa.py | 4ea2863577d384f23a356e337f54d35a54fe9284 | [] | no_license | ahashisyuu/MFIN | ddc9f0f8c8d9f03d805cc1ee4b4759ec5392e34e | e4f84b733e6fe025cb6558f1829b9dcaec7c23e2 | refs/heads/master | 2022-04-17T11:47:48.655555 | 2020-04-14T06:22:14 | 2020-04-14T06:22:14 | 255,508,469 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,542 | py | import os
import numpy as np
import tensorflow as tf
import pickle as pkl
import modeling
import optimization
from tensorflow.contrib import tpu
from EvalHook import EvalHook
from run_classifier import FLAGS
from utils import PRF, eval_reranker, print_metrics
from CQAModel import DoubleModel, Baseline, DoubleModelUpGrade, DCMN, IBERT, GRUAttModel, MHGRUModel, MultiPool, HPool, \
LSTMModel, TripleModel, DoubleJointModel
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
CQAMODEL = DoubleJointModel
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_extract, input_mask, segment_ids, q_type, label_id):
self.input_ids = input_ids
self.input_extract = input_extract
self.input_mask = input_mask
self.segment_ids = segment_ids
self.q_type = q_type
self.label_id = label_id
def input_fn_builder(features, seq_length, is_training, drop_remainder):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
all_input_ids = []
all_input_extract = []
all_input_mask = []
all_segment_ids = []
all_q_type = []
all_label_ids = []
for feature in features:
all_input_ids.append(feature.input_ids)
all_input_extract.append(feature.input_extract)
all_input_mask.append(feature.input_mask)
all_segment_ids.append(feature.segment_ids)
all_q_type.append(feature.q_type)
all_label_ids.append(feature.label_id)
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
print(batch_size)
num_examples = len(features)
# This is for demo purposes and does NOT scale to large data sets. We do
# not use Dataset.from_generator() because that uses tf.py_func which is
# not TPU compatible. The right way to load data is with TFRecordReader.
d = tf.data.Dataset.from_tensor_slices({
"input_ids":
tf.constant(
all_input_ids, shape=[num_examples, seq_length],
dtype=tf.int32),
"input_extract":
tf.constant(
all_input_extract, shape=[num_examples, seq_length],
dtype=tf.int32),
"input_mask":
tf.constant(
all_input_mask,
shape=[num_examples, seq_length],
dtype=tf.int32),
"segment_ids":
tf.constant(
all_segment_ids,
shape=[num_examples, seq_length],
dtype=tf.int32),
"q_type":
tf.constant(all_q_type, shape=[num_examples], dtype=tf.int32),
"label_ids":
tf.constant(all_label_ids, shape=[num_examples], dtype=tf.int32),
})
if is_training:
d = d.repeat()
d = d.shuffle(buffer_size=100)
d = d.batch(batch_size=batch_size, drop_remainder=drop_remainder)
return d
return input_fn
def input_fn_builder_v2(features, seq_length, is_training, drop_remainder):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
all_input_ids = []
all_input1_extract = []
all_input2_extract = []
all_input_mask = []
all_segment_ids = []
all_q_type = []
all_label_ids = []
for feature in features:
all_input_ids.append(feature.input_ids)
all_input1_extract.append(feature.input1_extract)
all_input2_extract.append(feature.input2_extract)
all_input_mask.append(feature.input_mask)
all_segment_ids.append(feature.segment_ids)
all_q_type.append(feature.q_type)
all_label_ids.append(feature.label_id)
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
print(batch_size)
num_examples = len(features)
# This is for demo purposes and does NOT scale to large data sets. We do
# not use Dataset.from_generator() because that uses tf.py_func which is
# not TPU compatible. The right way to load data is with TFRecordReader.
d = tf.data.Dataset.from_tensor_slices({
"input_ids":
tf.constant(
all_input_ids, shape=[num_examples, seq_length],
dtype=tf.int32),
"input1_extract":
tf.constant(
all_input1_extract, shape=[num_examples, 110],
dtype=tf.int32),
"input2_extract":
tf.constant(
all_input2_extract, shape=[num_examples, 150],
dtype=tf.int32),
"input_mask":
tf.constant(
all_input_mask,
shape=[num_examples, seq_length],
dtype=tf.int32),
"segment_ids":
tf.constant(
all_segment_ids,
shape=[num_examples, seq_length],
dtype=tf.int32),
"q_type":
tf.constant(all_q_type, shape=[num_examples], dtype=tf.int32),
"label_ids":
tf.constant(all_label_ids, shape=[num_examples], dtype=tf.int32),
})
if is_training:
d = d.repeat()
d = d.shuffle(buffer_size=100)
d = d.batch(batch_size=batch_size, drop_remainder=drop_remainder)
return d
return input_fn
def input_fn_builder_v3(features, seq_length, is_training, drop_remainder):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
all_input_ids = []
all_input1_extract = []
all_input2_extract = []
all_input3_extract = []
all_input_mask = []
all_segment_ids = []
all_q_type = []
all_label_ids = []
for feature in features:
all_input_ids.append(feature.input_ids)
all_input1_extract.append(feature.input1_extract)
all_input2_extract.append(feature.input2_extract)
all_input3_extract.append(feature.input3_extract)
all_input_mask.append(feature.input_mask)
all_segment_ids.append(feature.segment_ids)
all_q_type.append(feature.q_type)
all_label_ids.append(feature.label_id)
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
print(batch_size)
num_examples = len(features)
# This is for demo purposes and does NOT scale to large data sets. We do
# not use Dataset.from_generator() because that uses tf.py_func which is
# not TPU compatible. The right way to load data is with TFRecordReader.
d = tf.data.Dataset.from_tensor_slices({
"input_ids":
tf.constant(
all_input_ids, shape=[num_examples, seq_length],
dtype=tf.int32),
"input1_extract":
tf.constant(
all_input1_extract, shape=[num_examples, 39],
dtype=tf.int32),
"input2_extract":
tf.constant(
all_input2_extract, shape=[num_examples, 110],
dtype=tf.int32),
"input3_extract":
tf.constant(
all_input3_extract, shape=[num_examples, 152],
dtype=tf.int32),
"input_mask":
tf.constant(
all_input_mask,
shape=[num_examples, seq_length],
dtype=tf.int32),
"segment_ids":
tf.constant(
all_segment_ids,
shape=[num_examples, seq_length],
dtype=tf.int32),
"q_type":
tf.constant(all_q_type, shape=[num_examples], dtype=tf.int32),
"label_ids":
tf.constant(all_label_ids, shape=[num_examples], dtype=tf.int32),
})
if is_training:
d = d.repeat()
d = d.shuffle(buffer_size=100)
d = d.batch(batch_size=batch_size, drop_remainder=drop_remainder)
return d
return input_fn
def _creat_bert(is_training, features, bert_config, use_one_hot_embeddings, init_checkpoint, layer_num, plus_position):
global initialized_variable_names
input_ids = features["input_ids"]
if "input_extract" in features:
input_extract = features["input_extract"]
input1_extract = None
input2_extract = None
input3_extract = None
elif "input3_extract" not in features:
input_extract = None
input1_extract = features["input1_extract"]
input2_extract = features["input2_extract"]
input3_extract = None
else:
input_extract = None
input1_extract = features["input1_extract"]
input2_extract = features["input2_extract"]
input3_extract = features["input3_extract"]
input_mask = features["input_mask"]
segment_ids = features["segment_ids"]
q_type = features["q_type"]
label_ids = features["label_ids"]
model = modeling.BertModel(
config=bert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
token_type_ids=segment_ids,
use_one_hot_embeddings=use_one_hot_embeddings,
output_layer_index=layer_num,
plus_position=plus_position)
tvars = tf.trainable_variables()
scaffold_fn = None
if init_checkpoint:
(assignment_map,
initialized_variable_names) = modeling.get_assigment_map_from_checkpoint(tvars, init_checkpoint)
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
# print("initialing checkpoint finished")
# tf.logging.info("**** Trainable Variables ****")
# residue = []
# for var in tvars:
# init_string = ""
# if var.name in initialized_variable_names:
# init_string = ", *INIT_FROM_CKPT*"
# else:
# residue.append(var)
# tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
# init_string)
predictions = {"input_extract": input_extract,
"input1_extract": input1_extract,
"input2_extract": input2_extract,
"input3_extract": input3_extract,
"embedding": model.get_embedding_output(),
"input_mask": input_mask,
"q_type": q_type,
"label_ids": label_ids,
"output_layer": model.get_output_layer(),
"last_layer": model.get_sequence_output()}
return predictions
def _create_cqa_modules(is_training, predictions, update_num):
num_labels = 2
input_extract = predictions["input_extract"]
input1_extract = predictions["input1_extract"]
input2_extract = predictions["input2_extract"]
input3_extract = predictions["input3_extract"]
embedding = predictions["embedding"]
input_mask = predictions["input_mask"]
q_type = predictions["q_type"]
labels = predictions["label_ids"]
encoder_output1 = predictions["last_layer"]
# encoder_output = encoder_output1 + encoder_output2 + \
# encoder_output3 + encoder_output4
encoder_output = predictions["output_layer"]
sent1 = None
sent2 = None
sent3 = None
sent1_mask = None
sent2_mask = None
sent3_mask = None
mark0 = None
mark1 = None
mark2 = None
mark3 = None
if input_extract is None and input3_extract is None:
sent1_mask = tf.cast(tf.not_equal(input1_extract, 0), tf.float32)
sent2_mask = tf.cast(tf.not_equal(input2_extract, 0), tf.float32)
sent1 = tf.batch_gather(encoder_output, input1_extract)
sent2 = tf.batch_gather(encoder_output, input2_extract)
elif input3_extract is None:
sent1_mask = tf.cast(tf.equal(input_extract, 1), tf.float32)
sent2_mask = tf.cast(tf.equal(input_extract, 2), tf.float32)
sent1 = encoder_output * tf.expand_dims(sent1_mask, axis=-1)
sent2 = encoder_output * tf.expand_dims(sent2_mask, axis=-1)
else:
sent1_mask = tf.cast(tf.not_equal(input1_extract, 0), tf.float32)
sent2_mask = tf.cast(tf.not_equal(input2_extract, 0), tf.float32)
sent3_mask = tf.cast(tf.not_equal(input3_extract, 0), tf.float32)
sent1 = tf.batch_gather(encoder_output, input1_extract)
sent2 = tf.batch_gather(encoder_output, input2_extract)
sent3 = tf.batch_gather(encoder_output, input3_extract)
mark0 = tf.squeeze(encoder_output1[:, 0:1, :], axis=1)
model = CQAMODEL(is_training=is_training,
all_sent=encoder_output, input_mask=input_mask,
sent1=sent1, sent2=sent2, sent3=sent3,
sent1_mask=sent1_mask, sent2_mask=sent2_mask, sent3_mask=sent3_mask,
mark0=mark0, mark1=mark1, mark2=mark2, mark3=mark3,
embedding=embedding, update_num=update_num)
# model = Baseline(is_training=is_training,
# sent1=sent1, sent2=sent2, sent3=sent3,
# sent1_mask=sent1_mask, sent2_mask=sent2_mask, sent3_mask=sent3_mask,
# mark0=mark0, mark1=mark1, mark2=mark2, mark3=mark3)
result = model.get_output() # (B, dim)
# mark0 = tf.layers.dense(mark0, 768, activation=tf.tanh)
# result = mark0
hidden_size = result.shape[-1].value
output_weights = tf.get_variable(
"output_weights_v2", [num_labels, hidden_size],
initializer=tf.truncated_normal_initializer(stddev=0.02))
output_bias = tf.get_variable(
"output_bias_v2", [num_labels], initializer=tf.zeros_initializer())
with tf.variable_scope("loss"):
if is_training:
# I.e., 0.1 dropout
result = tf.nn.dropout(result, keep_prob=0.9)
logits = tf.matmul(result, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
prob = tf.nn.softmax(logits, axis=-1)
log_probs = tf.nn.log_softmax(logits, axis=-1)
one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)
per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)
total_loss = tf.reduce_mean(per_example_loss)
return total_loss, logits, prob
def model_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate,
num_train_steps, num_warmup_steps, use_tpu,
use_one_hot_embeddings, update_num, layer_num,
plus_position):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for TPUEstimator."""
global initialized_variable_names
# tf.logging.info("*** Features ***")
# for name in sorted(features.keys()):
# tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape))
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
predictions = _creat_bert(is_training, features, bert_config, use_one_hot_embeddings, init_checkpoint, layer_num, plus_position)
# the concatenate of predictions is the output of bert encoder
# and it will be seen as input of other modules
total_loss, logits, prob = _create_cqa_modules(is_training, predictions, update_num)
scaffold_fn = None
if mode == tf.estimator.ModeKeys.TRAIN:
train_op, grade = optimization.create_optimizer(
total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)
output_spec = tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
train_op=train_op,
scaffold_fn=scaffold_fn)
elif mode == tf.estimator.ModeKeys.PREDICT:
output_spec = tpu.TPUEstimatorSpec(
mode=mode,
predictions={"logits": logits, "prob": prob},
scaffold_fn=scaffold_fn)
else:
raise ValueError("Only TRAIN and EVAL modes are supported: %s" % (mode))
return output_spec
return model_fn
def main():
tf.logging.set_verbosity(tf.logging.INFO)
if not FLAGS.do_train and not FLAGS.do_eval:
raise ValueError("At least one of `do_train` or `do_eval` must be True.")
bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file)
if FLAGS.max_seq_length > bert_config.max_position_embeddings:
raise ValueError(
"Cannot use sequence length %d because the BERT model "
"was only trained up to sequence length %d" %
(FLAGS.max_seq_length, bert_config.max_position_embeddings))
tf.gfile.MakeDirs(FLAGS.output_dir)
tpu_cluster_resolver = None
if FLAGS.use_tpu and FLAGS.tpu_name:
tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
is_per_host = tpu.InputPipelineConfig.PER_HOST_V2
run_config = tpu.RunConfig(
cluster=tpu_cluster_resolver,
master=FLAGS.master,
model_dir=FLAGS.output_dir,
save_checkpoints_steps=FLAGS.save_checkpoints_steps,
tpu_config=tpu.TPUConfig(
iterations_per_loop=FLAGS.iterations_per_loop,
num_shards=FLAGS.num_tpu_cores,
per_host_input_for_training=is_per_host))
session_config = tf.ConfigProto(log_device_placement=True)
session_config.gpu_options.allow_growth = True
run_config.replace(session_config=session_config)
num_train_steps = None
num_warmup_steps = None
with open('dataset.pkl', 'rb') as fr:
train_features, dev_cid, dev_features, test_cid, test_features = pkl.load(fr)
dev_label = [feature.label_id for feature in dev_features]
test_label = [feature.label_id for feature in test_features]
if FLAGS.do_train:
num_train_steps = int(
len(train_features) / FLAGS.train_batch_size * FLAGS.num_train_epochs)
num_warmup_steps = int(num_train_steps * FLAGS.warmup_proportion)
model_fn = model_fn_builder(
bert_config=bert_config,
num_labels=2,
init_checkpoint=FLAGS.init_checkpoint,
learning_rate=FLAGS.learning_rate,
num_train_steps=num_train_steps,
num_warmup_steps=num_warmup_steps,
use_tpu=FLAGS.use_tpu,
use_one_hot_embeddings=FLAGS.use_tpu)
# If TPU is not available, this will fall back to normal Estimator on CPU
# or GPU.
estimator = tpu.TPUEstimator(
use_tpu=FLAGS.use_tpu,
model_fn=model_fn,
config=run_config,
# params={'batch_size': FLAGS.train_batch_size},
train_batch_size=FLAGS.train_batch_size,
predict_batch_size=FLAGS.eval_batch_size)
if FLAGS.do_train:
tf.logging.info("***** Running training *****")
tf.logging.info(" Num examples = %d", len(train_features))
tf.logging.info(" Batch size = %d", FLAGS.train_batch_size)
tf.logging.info(" Num steps = %d", num_train_steps)
train_input_fn = input_fn_builder(
features=train_features,
seq_length=FLAGS.max_seq_length,
is_training=True,
drop_remainder=True)
estimator.train(input_fn=train_input_fn,
max_steps=num_train_steps,
hooks=[EvalHook(estimator=estimator,
dev_features=dev_features,
dev_label=dev_label,
dev_cid=dev_cid,
max_seq_length=FLAGS.max_seq_length,
eval_steps=FLAGS.save_checkpoints_steps,
checkpoint_dir=FLAGS.output_dir)])
if FLAGS.do_eval:
tf.logging.info("***** Running evaluation *****")
tf.logging.info(" Num examples = %d", len(test_features))
tf.logging.info(" Batch size = %d", FLAGS.eval_batch_size)
# This tells the estimator to run through the entire set.
test_steps = None
# However, if running eval on the TPU, you will need to specify the
# number of steps.
if FLAGS.use_tpu:
# Eval will be slightly WRONG on the TPU because it will truncate
# the last batch.
test_steps = int(len(test_features) / FLAGS.eval_batch_size)
test_drop_remainder = True if FLAGS.use_tpu else False
test_input_fn = input_fn_builder(
features=test_features,
seq_length=FLAGS.max_seq_length,
is_training=False,
drop_remainder=test_drop_remainder)
predictions = estimator.predict(test_input_fn, yield_single_examples=False)
res = np.concatenate([a for a in predictions], axis=0)
print(res.shape, np.array(dev_label).shape)
metrics = PRF(np.array(dev_label), res.argmax(axis=-1))
# print((np.array(dev_label) != res.argmax(axis=-1))[:1000])
MAP, AvgRec, MRR = eval_reranker(test_cid, test_label, res[:, 0])
metrics['MAP'] = MAP
metrics['AvgRec'] = AvgRec
metrics['MRR'] = MRR
print_metrics(metrics, 'test')
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
7ae21a1edd8b22d7ce236af170136a4abff5837b | 3f67ad2617457e3a164bdaae9b1e911760a25141 | /Data Structures/findMergePointofTwoLists.py | 747328a3cddd08867229fc406a3803ce6b6fd1f1 | [] | no_license | arkwl/HackerRank | 834bdc9885237392ee77c3a0cd571a2c769f6a45 | 9b9cc80ced4b38e83bdd1a6fceb1bc70f8b70faf | refs/heads/master | 2021-01-12T01:24:39.777288 | 2017-12-28T18:46:05 | 2017-12-28T18:46:05 | 78,382,163 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,058 | py | """
Find the node at which both lists merge and return the data of that node.
head could be None as well for empty list
Node is defined as
class Node(object):
def __init__(self, data=None, next_node=None):
self.data = data
self.next = next_node
"""
class Stack():
def __init__(self):
self.items = []
def isEmpty(self):
return self.items == []
def push(self, item):
return self.items.append(item)
def pop(self):
return self.items.pop()
def FindMergeNode(headA, headB):
stackA = Stack()
stackB = Stack()
while(headA != None):
stackA.push(headA)
headA = headA.next
while(headB != None):
stackB.push(headB)
headB = headB.next
valueA = stackA.pop()
valueB = stackB.pop()
while(valueA.data == valueB.data):
if stackA.isEmpty() or stackB.isEmpty():
return None
valueA = stackA.pop()
valueB = stackB.pop()
return valueA.next.data
| [
"[email protected]"
] | |
e94d1664d63695dcb5284d9aacacb921ad6d29d7 | 6fdf57b6e90c97543f270cba68edd5171525e101 | /manage.py | 7b2fcd89c6994ed554f733c8f41d4e275f3c44e3 | [] | no_license | coreyadkins/goalsaver | abc34978d2a8c2e63d27c7d8a2c951874bee0736 | acdc91abd937660c5b2f0fa43619f4aa5162c573 | refs/heads/master | 2020-06-27T07:03:58.451048 | 2017-06-08T22:16:11 | 2017-06-08T22:16:11 | 74,530,361 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 807 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "goalsaver.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| [
"[email protected]"
] | |
954401ce5ac31f8047d2cd278b7a98196ac303f9 | b7f78fd9769d47aaea08e008052ae24bb8bce893 | /model.py | 765bd42018f0a0bb44dc8fce6a6d1cf533d7afad | [] | no_license | wanghan0501/DeepBC | f2fbff95bcea99174b10c4c9a363af4ee01bce79 | 5e4485981596ecdca6ba3f10eeda6ab884f9047a | refs/heads/master | 2021-09-05T07:31:24.379604 | 2018-01-25T08:07:16 | 2018-01-25T08:07:16 | 109,246,567 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,403 | py | # -*- coding: utf-8 -*-
"""
Created by Wang Han on 2017/11/2 17:01.
E-mail address is [email protected].
Copyright © 2017 Wang Han. SCU. All Rights Reserved.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import tensorflow.contrib.slim as slim
from nets.inception_resnet_v2 import inception_resnet_v2, inception_resnet_v2_arg_scope
from nets.inception_v2 import inception_v2, inception_v2_arg_scope
class InceptionResnetV2Model(object):
def __init__(self, config):
self._config = config
self._input_shape = (config.batch_size,) + config.img_shape
self._output_shape = (config.batch_size,)
self._use_tensorboard = self._config.use_tensorboard
self._create_placeholders()
with slim.arg_scope(inception_resnet_v2_arg_scope(batch_norm_decay=0.99)):
self._create_train_model()
self._create_test_model()
def _create_placeholders(self):
self.input_data = tf.placeholder(tf.float32, shape=self._input_shape, name='input_data')
self.label = tf.placeholder(tf.float32, shape=self._output_shape, name='label')
def _create_train_model(self):
train_logits, train_end_points = inception_resnet_v2(self.input_data,
is_training=True,
dropout_keep_prob=self._config.dropout_keep_prob,
num_classes=self._config.num_classes,
)
train_predictions = train_end_points['Predictions']
train_one_hot_labels = tf.one_hot(indices=tf.cast(self.label, tf.int32), depth=self._config.num_classes,
name='train_one_hot_labels')
# set loss
train_loss = tf.losses.softmax_cross_entropy(onehot_labels=train_one_hot_labels, logits=train_logits)
# set optimizer
optimizer = tf.train.AdadeltaOptimizer(learning_rate=1)
# set train_op
train_op = slim.learning.create_train_op(train_loss, optimizer)
# get curr classes
train_classes = tf.argmax(input=train_predictions, axis=1)
# get curr accuracy
train_accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.cast(self.label, tf.int64), train_classes), tf.float32),
name='train_accuracy')
train_confusion_matrix = tf.confusion_matrix(self.label, train_classes, num_classes=self._config.num_classes)
self.train_loss = train_loss
self.train_op = train_op
self.train_accuracy = train_accuracy
self.train_classes = train_classes
self.train_logits = train_logits
self.train_predictions = train_predictions
self.train_confusion_matrix = train_confusion_matrix
def _create_test_model(self):
test_logits, test_end_points = inception_resnet_v2(self.input_data,
is_training=False,
dropout_keep_prob=1,
num_classes=self._config.num_classes,
reuse=True,
)
test_predictions = test_end_points['Predictions']
test_one_hot_labels = tf.one_hot(indices=tf.cast(self.label, tf.int32), depth=self._config.num_classes,
name='test_one_hot_labels')
# set loss
test_loss = tf.losses.softmax_cross_entropy(onehot_labels=test_one_hot_labels, logits=test_logits)
# get curr classes
test_classes = tf.argmax(input=test_predictions, axis=1)
# get curr accuracy
test_accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.cast(self.label, tf.int64), test_classes), tf.float32),
name='test_accuracy')
test_confusion_matrix = tf.confusion_matrix(self.label, test_classes, num_classes=self._config.num_classes)
self.test_loss = test_loss
self.test_accuracy = test_accuracy
self.test_classes = test_classes
self.test_logits = test_logits
self.test_predictions = test_predictions
self.test_confusion_matrix = test_confusion_matrix
class InceptionV2Model(object):
def __init__(self, config):
self._config = config
self._input_shape = (config.batch_size,) + config.img_shape
self._output_shape = (config.batch_size,)
self._create_placeholders()
self._use_tensorboard = self._config.use_tensorboard
with slim.arg_scope(inception_v2_arg_scope(batch_norm_decay=0.99)):
self._create_train_model()
self._create_test_model()
def _create_placeholders(self):
self.input_data = tf.placeholder(tf.float32, shape=self._input_shape, name='input_data')
self.label = tf.placeholder(tf.float32, shape=self._output_shape, name='label')
def _create_train_model(self):
train_logits, train_end_points = inception_v2(self.input_data,
is_training=True,
dropout_keep_prob=self._config.dropout_keep_prob,
num_classes=self._config.num_classes,
)
train_predictions = train_end_points['Predictions']
train_one_hot_labels = tf.one_hot(indices=tf.cast(self.label, tf.int32), depth=self._config.num_classes,
name='train_one_hot_labels')
# set loss
train_loss = tf.losses.softmax_cross_entropy(onehot_labels=train_one_hot_labels, logits=train_logits)
# set optimizer
optimizer = tf.train.AdadeltaOptimizer(learning_rate=1)
# set train_op
train_op = slim.learning.create_train_op(train_loss, optimizer)
# get curr classes
train_classes = tf.argmax(input=train_predictions, axis=1)
# get curr accuracy
train_accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.cast(self.label, tf.int64), train_classes), tf.float32),
name='train_accuracy')
train_confusion_matrix = tf.confusion_matrix(self.label, train_classes, num_classes=self._config.num_classes)
self.train_loss = train_loss
self.train_op = train_op
self.train_accuracy = train_accuracy
self.train_classes = train_classes
self.train_logits = train_logits
self.train_predictions = train_predictions
self.train_confusion_matrix = train_confusion_matrix
def _create_test_model(self):
test_logits, test_end_points = inception_v2(self.input_data,
is_training=False,
dropout_keep_prob=1,
num_classes=self._config.num_classes,
reuse=True,
)
test_predictions = test_end_points['Predictions']
test_one_hot_labels = tf.one_hot(indices=tf.cast(self.label, tf.int32), depth=self._config.num_classes,
name='test_one_hot_labels')
# set loss
test_loss = tf.losses.softmax_cross_entropy(onehot_labels=test_one_hot_labels, logits=test_logits)
# get curr classes
test_classes = tf.argmax(input=test_predictions, axis=1)
# get curr accuracy
test_accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.cast(self.label, tf.int64), test_classes), tf.float32),
name='test_accuracy')
test_confusion_matrix = tf.confusion_matrix(self.label, test_classes, num_classes=self._config.num_classes)
self.test_loss = test_loss
self.test_accuracy = test_accuracy
self.test_classes = test_classes
self.test_logits = test_logits
self.test_predictions = test_predictions
self.test_confusion_matrix = test_confusion_matrix
| [
"[email protected]"
] | |
516b9ee99a10824a1b7b9dde46b23c80a5075468 | 332ba17ddffba48343334663d1be36aaa9cbdf10 | /IC_engine/angle.py | 0944d444f53b5c5595c0bd93c1eaab6a7dae2f6d | [] | no_license | nivartfu/CFD-using-OpenFOAM | e1956f182c5d3318f81257221198b79552dfbdac | 90ea127801373e3877e32d64431cac3341896595 | refs/heads/master | 2023-03-29T06:48:48.040219 | 2021-04-06T20:10:31 | 2021-04-06T20:10:31 | 286,535,635 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 469 | py | import math
import numpy as np
c = np.array([-37.5, -37.5, -22.8, -20.8, -20.8, -33, -3, -3, -6, -12, -16.8, -30.5])
d = np.array([0,90,90,96.5,106.5,130,130,100,98,97,90,0])
theta = 45
#tx = c * math.cos(math.radians(theta))
#tz = c * math.sin(math.radians(theta))
#for i in range(len(tx)):
# print(tx[i], d[i] , tz[i])
e = np.array([-20.8])
f = e * math.cos(math.radians(theta))
g = e * math.sin(math.radians(theta))
for i in range(len(f)):
print(f[i],g[i])
| [
"[email protected]"
] | |
57aebdb2f7a88a25d8d2a4e49efb730394d6b804 | b7ac3795f6282d6eba6e220a101365874d9a6dba | /node_modules/fsevents/build/config.gypi | a5ead9e014ffb26de1996bb059b96bf21e2c7d8c | [
"MIT"
] | permissive | ITalik-gr/loramedical | 8072d966d43ad0b4955d07da91b8e69121ebfad5 | 2283151d4ec219bf2f8f40075916f084709d0234 | refs/heads/main | 2023-07-03T20:53:37.237570 | 2021-08-05T15:46:17 | 2021-08-05T15:46:17 | 392,381,085 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,086 | gypi | # Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"asan": 0,
"build_v8_with_gn": "false",
"coverage": "false",
"dcheck_always_on": 0,
"debug_nghttp2": "false",
"debug_node": "false",
"enable_lto": "false",
"enable_pgo_generate": "false",
"enable_pgo_use": "false",
"error_on_warn": "false",
"force_dynamic_crt": 0,
"host_arch": "x64",
"icu_data_in": "../../deps/icu-tmp/icudt69l.dat",
"icu_endianness": "l",
"icu_gyp_path": "tools/icu/icu-generic.gyp",
"icu_path": "deps/icu-small",
"icu_small": "false",
"icu_ver_major": "69",
"is_debug": 0,
"llvm_version": "11.0",
"napi_build_version": "0",
"node_byteorder": "little",
"node_debug_lib": "false",
"node_enable_d8": "false",
"node_install_npm": "true",
"node_module_version": 83,
"node_no_browser_globals": "false",
"node_prefix": "/usr/local",
"node_release_urlbase": "https://nodejs.org/download/release/",
"node_shared": "false",
"node_shared_brotli": "false",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_nghttp2": "false",
"node_shared_openssl": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_target_type": "executable",
"node_use_bundled_v8": "true",
"node_use_dtrace": "true",
"node_use_etw": "false",
"node_use_node_code_cache": "true",
"node_use_node_snapshot": "true",
"node_use_openssl": "true",
"node_use_v8_platform": "true",
"node_with_ltcg": "false",
"node_without_node_options": "false",
"openssl_fips": "",
"openssl_is_fips": "false",
"ossfuzz": "false",
"shlib_suffix": "83.dylib",
"target_arch": "x64",
"v8_enable_31bit_smis_on_64bit_arch": 0,
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 1,
"v8_enable_inspector": 1,
"v8_enable_lite_mode": 0,
"v8_enable_object_print": 1,
"v8_enable_pointer_compression": 0,
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 1,
"v8_promise_internal_field_count": 1,
"v8_random_seed": 0,
"v8_trace_maps": 0,
"v8_use_siphash": 1,
"want_separate_host_toolset": 0,
"xcode_version": "11.0",
"nodedir": "/Users/h/Library/Caches/node-gyp/14.17.4",
"standalone_static_library": 1,
"fallback_to_build": "true",
"module": "/Users/h/Documents/frontend/Lodamed/node_modules/fsevents/lib/binding/Release/node-v83-darwin-x64/fse.node",
"module_name": "fse",
"module_path": "/Users/h/Documents/frontend/Lodamed/node_modules/fsevents/lib/binding/Release/node-v83-darwin-x64",
"napi_version": "8",
"node_abi_napi": "napi",
"node_napi_label": "node-v83",
"save_dev": "",
"legacy_bundling": "",
"dry_run": "",
"viewer": "man",
"only": "",
"commit_hooks": "true",
"browser": "",
"also": "",
"sign_git_commit": "",
"rollback": "true",
"usage": "",
"audit": "true",
"globalignorefile": "/usr/local/etc/npmignore",
"shell": "/bin/zsh",
"maxsockets": "50",
"init_author_url": "",
"shrinkwrap": "true",
"parseable": "",
"metrics_registry": "https://registry.npmjs.org/",
"timing": "",
"init_license": "ISC",
"if_present": "",
"sign_git_tag": "",
"init_author_email": "",
"cache_max": "Infinity",
"preid": "",
"long": "",
"local_address": "",
"git_tag_version": "true",
"cert": "",
"registry": "https://registry.npmjs.org/",
"fetch_retries": "2",
"versions": "",
"message": "%s",
"key": "",
"globalconfig": "/usr/local/etc/npmrc",
"prefer_online": "",
"logs_max": "10",
"always_auth": "",
"global_style": "",
"cache_lock_retries": "10",
"update_notifier": "true",
"heading": "npm",
"audit_level": "low",
"searchlimit": "20",
"read_only": "",
"offline": "",
"fetch_retry_mintimeout": "10000",
"json": "",
"access": "",
"allow_same_version": "",
"https_proxy": "",
"engine_strict": "",
"description": "true",
"userconfig": "/Users/h/.npmrc",
"init_module": "/Users/h/.npm-init.js",
"cidr": "",
"user": "",
"node_version": "14.17.4",
"save": "true",
"ignore_prepublish": "",
"editor": "vi",
"auth_type": "legacy",
"tag": "latest",
"script_shell": "",
"progress": "true",
"global": "",
"before": "",
"searchstaleness": "900",
"optional": "true",
"ham_it_up": "",
"save_prod": "",
"force": "",
"bin_links": "true",
"searchopts": "",
"node_gyp": "/usr/local/lib/node_modules/npm/node_modules/node-gyp/bin/node-gyp.js",
"depth": "Infinity",
"sso_poll_frequency": "500",
"rebuild_bundle": "true",
"unicode": "true",
"fetch_retry_maxtimeout": "60000",
"tag_version_prefix": "v",
"strict_ssl": "true",
"sso_type": "oauth",
"scripts_prepend_node_path": "warn-only",
"save_prefix": "^",
"ca": "",
"save_exact": "",
"group": "20",
"fetch_retry_factor": "10",
"dev": "",
"version": "",
"prefer_offline": "",
"cache_lock_stale": "60000",
"otp": "",
"cache_min": "10",
"searchexclude": "",
"cache": "/Users/h/.npm",
"color": "true",
"package_lock": "true",
"package_lock_only": "",
"fund": "true",
"save_optional": "",
"ignore_scripts": "",
"user_agent": "npm/6.14.14 node/v14.17.4 darwin x64",
"cache_lock_wait": "10000",
"production": "",
"send_metrics": "",
"save_bundle": "",
"umask": "0022",
"node_options": "",
"init_version": "1.0.0",
"init_author_name": "",
"git": "git",
"scope": "",
"unsafe_perm": "true",
"tmp": "/var/folders/cl/bcflz8td5mbg__8yvhtly70c0000gn/T",
"onload_script": "",
"prefix": "/usr/local",
"link": "",
"format_package_lock": "true"
}
}
| [
"[email protected]"
] | |
1401a014d3574f222a7fc2e289fafa0714a025b6 | 45a93186c2c3d1f6da3b7b162874a07433963702 | /app/auth/forms.py | 41a3b0ceb142f7375534def9142aff6320f33884 | [] | no_license | kpglide/blog | ae0897c5b93e4799725dadf66886f18ac17aa792 | a755c16351d448d2d408a5ae40ebb2e820126647 | refs/heads/master | 2021-01-20T10:35:34.293486 | 2015-03-29T14:48:40 | 2015-03-29T14:48:40 | 19,876,299 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 382 | py | from flask.ext.wtf import Form
from wtforms import BooleanField, TextField, TextAreaField, PasswordField, \
validators, ValidationError
#Represents a form for logging in
class LoginForm(Form):
username = TextField('username', [validators.InputRequired()])
password = PasswordField('password', [validators.InputRequired()])
remember_me = BooleanField('Keep me logged in')
| [
"[email protected]"
] | |
ccf98e88db167a2dfc0b6cb385dc4990eebccba7 | a1b375c3e98fe059dafc4d74cbcbcb99a0571e44 | /social_app/settings.py | 5de5aa7207f386de8a90e8dc7415e0d11491db46 | [
"MIT"
] | permissive | mohsenamoon1160417237/Social_app | 478a73552ceed8001c167be6caaf550cd58626bd | 79fa0871f7b83648894941f9010f1d99f1b27ab3 | refs/heads/master | 2022-12-09T16:03:53.623506 | 2020-09-21T05:59:22 | 2020-09-21T06:02:03 | 297,242,915 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,168 | py | """
Django settings for social_app project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
from django.urls import reverse_lazy
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '(=sye3fmr^ytn#61wx5qhcs$x3%8c1%pyuf4o_z3_1lw)&$^9f'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'actions',
'sorl.thumbnail',
'images.apps.ImagesConfig',
'social_django', # pip install social-auth-app-django
'accounts',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'social_app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'social_app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
MEDIA_URL = '/media/'
MEDIA_ROOT = 'media'
STATICFILES_DIRS = [
os.path.join(BASE_DIR , 'social_app/static')
]
STATIC_URL = '/static/'
STATIC_ROOT = 'static'
LOGIN_REDIRECT_URL = 'dashboard'
LOGOUT_URL = 'logout'
LOGIN_URL = 'login'
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_PORT = 25
EMAIL_HOST_USER = '[email protected]'
EMAIL_HOST_PASSWORD = 'mohsen1160417237'
EMAIL_USE_TLS = True
AUTHENTICATION_BACKENDS = [
'django.contrib.auth.backends.ModelBackend',
'accounts.authentication.EmailAuthentication',
'social_core.backends.google.GoogleOAuth2',
]
'''https://console.developers.google.com/'''
'''https://developers.facebook.com/apps/'''
SOCIAL_AUTH_GOOGLE_OAUTH2_KEY = '904785179412-vlnsvbvn4cqhcha73t0nt1ohghpsq8bd.apps.googleusercontent.com'
SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET = 'NaejMsS-Rh_z51Vz5XqRZiax'
ABSOLUTE_URL_OVERRIDES = {
'auth.user' : lambda u: reverse_lazy('user_detail' , args=[u.username])
} | [
"[email protected]"
] | |
377389f938f9c8a011bcdcffac812946bcae9096 | 6249b52f4713adf032dce1aa495ce5e8d0714309 | /p2/financial_series/aux_fun.py | 517b259e9f2b1b61f4c6e1a5867bac7750060e5e | [] | no_license | RHDZMOTA/simulacion_riesgos | 5e86b39e3a95bdc5d414c251ad6c0d81b7f68cd2 | 7fecd738c9b726e22dea2361c8983387bb5597fd | refs/heads/master | 2020-05-22T05:57:05.956435 | 2017-04-22T18:02:32 | 2017-04-22T18:02:32 | 84,675,860 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,265 | py | # -*- coding: utf-8 -*-
"""
Auxiliar functions
"""
import pandas as pd
import numpy as np
# Function to calculate returns:
def calc_rtns(prices, warning = True, met_log = True):
# Warning function
def warn(sti = 0):
w0 = 'Error: datatype must be numpy.ndarray'
if sti == 0:
sti = w0
print(sti)
return None
# identify variable type for prices
ty = type(prices)
if warning:
# DataFrame
if ty == type(pd.DataFrame([])):
if np.shape(prices)[1] == 1:
prices = prices.values
else:
sti = 'Error: variable is a DataFrame, must be numpy.ndarray'
return warn(sti)
# Series
elif ty == type(pd.Series([])):
prices = prices.values
# None
elif ty == type(None):
sti = 'Error: The varialbe does not contain data (None)'
return warn(sti)
# Anything different to numpy.ndarray
elif ty != type(np.array([])):
return warn()
# Calculate the returns
if met_log:
returns = np.log(prices[1:] / prices[0:-1])
else:
returns = prices[1:] / prices[0:-1] - 1
return returns | [
"[email protected]"
] | |
457b32b447be9a4857f5feaddd8c09c5f625f917 | d6a8e9e44884a8d57e85c44c117f7e89faaae2db | /python/django/words_in_session/words_in_session/wsgi.py | c30669ea345f04691f2c91c0b039b970427a684f | [] | no_license | johnahnz0rs/CodingDojoAssignments | 2710cc2722f26ddce4cbd4c2433b2a089122ad37 | 1ef3b76fb3306b1bad3905358d7366d1764bbdd3 | refs/heads/master | 2021-05-12T05:44:38.807753 | 2018-07-02T16:32:30 | 2018-07-02T16:32:30 | 117,198,001 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 410 | py | """
WSGI config for words_in_session project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "words_in_session.settings")
application = get_wsgi_application()
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.